hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
786057b639f3a0b5b3e99837cc5b24131f77a024
| 135
|
py
|
Python
|
conan/tools/meson/__init__.py
|
Erlkoenig90/conan
|
da8de69618d75da5ac7d77e0eb7a38ee4d564eb9
|
[
"MIT"
] | null | null | null |
conan/tools/meson/__init__.py
|
Erlkoenig90/conan
|
da8de69618d75da5ac7d77e0eb7a38ee4d564eb9
|
[
"MIT"
] | null | null | null |
conan/tools/meson/__init__.py
|
Erlkoenig90/conan
|
da8de69618d75da5ac7d77e0eb7a38ee4d564eb9
|
[
"MIT"
] | null | null | null |
# noinspection PyUnresolvedReferences
from conan.tools.meson.toolchain import MesonToolchain
from conan.tools.meson.meson import Meson
| 33.75
| 54
| 0.866667
|
25e5b124b6b979be3bacc05f98eae96c6e33dd1c
| 2,419
|
py
|
Python
|
Software/src/liv/iotConnectors/devicehive/devicehive/utils.py
|
nadaol/Weather_Station
|
5bfb31c2974227fcc8d912e3911f356d4e3fb187
|
[
"MIT"
] | null | null | null |
Software/src/liv/iotConnectors/devicehive/devicehive/utils.py
|
nadaol/Weather_Station
|
5bfb31c2974227fcc8d912e3911f356d4e3fb187
|
[
"MIT"
] | null | null | null |
Software/src/liv/iotConnectors/devicehive/devicehive/utils.py
|
nadaol/Weather_Station
|
5bfb31c2974227fcc8d912e3911f356d4e3fb187
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# vim:set et tabstop=4 shiftwidth=4 nu nowrap fileencoding=utf-8:
import json
from datetime import datetime
from urlparse import urlsplit, urljoin
from twisted.internet.protocol import Protocol
from twisted.internet.defer import Deferred, succeed, fail
from twisted.web.iweb import IBodyProducer
from zope.interface import implements
__all__ = ['parse_url', 'parse_date', 'url_path', 'TextDataConsumer', 'EmptyDataProducer']
def parse_url(device_hive_url):
if not device_hive_url.endswith('/'):
device_hive_url += '/'
url = urlsplit(device_hive_url)
netloc_split = url.netloc.split(':')
port = 80
host = netloc_split[0]
if url.scheme == 'https':
port = 443
if len(netloc_split) == 2:
port = int(netloc_split[1], 10)
return (device_hive_url, host, port)
def url_path(base_uri, api_uri):
uri = urlsplit(urljoin(base_uri, api_uri))
path = uri.path
if len(uri.query) > 0 :
path += '?' + uri.query
return path
def parse_date(date_str) :
"""
Converts a date-time string into a datetime object.
"""
if len(date_str) > 19:
return datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%f')
else :
return datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S')
class TextDataConsumer(Protocol):
"""
Converts a text input into a C{str}.
"""
def __init__(self, deferred):
self.deferred = deferred
self.text = ''
def dataReceived(self, data):
self.text += data
def connectionLost(self, reason):
self.deferred.callback(self.text)
class JsonDataConsumer(Protocol):
"""
JsonDataConsumer receives JSON data as an input and
then converts it into C{dict} type.
"""
def __init__(self, deferred):
self.deferred = deferred
self.data = []
def dataReceived(self, data):
self.data.append(data)
def connectionLost(self, reason):
data = json.loads(''.join(self.data))
self.deferred.callback(data)
class EmptyDataProducer(object):
implements(IBodyProducer)
def __init__(self):
self.length = 0
def startProducing(self, consumer):
try:
consumer.write('')
return succeed(None)
except Exception, error:
return fail(error)
def stopProducing(self):
pass
| 24.683673
| 90
| 0.628359
|
b0eeb3b14eb48e44ed155665a50a5a04284a9d78
| 478
|
py
|
Python
|
leetcode-problems/0238_product_of_array_except_self.py
|
shikhalakra22/Algorithms
|
5932b162214add40f4fd3ca217e16217a36823fe
|
[
"MIT"
] | null | null | null |
leetcode-problems/0238_product_of_array_except_self.py
|
shikhalakra22/Algorithms
|
5932b162214add40f4fd3ca217e16217a36823fe
|
[
"MIT"
] | null | null | null |
leetcode-problems/0238_product_of_array_except_self.py
|
shikhalakra22/Algorithms
|
5932b162214add40f4fd3ca217e16217a36823fe
|
[
"MIT"
] | null | null | null |
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
n = len(nums)
left = [0]*n
right = [0]*n
prod = [0]*n
left[0] = 1
right[n-1] = 1
for i in range(1, n):
left[i] = nums[i - 1] * left[i - 1]
for i in range(n-2, -1, -1):
right[i] = nums[i + 1] * right[i + 1]
for i in range(n):
prod[i] = left[i] * right[i]
return prod
| 23.9
| 62
| 0.414226
|
e340e8a293a6f085cc11a83f60f04e733bb259a4
| 6,478
|
py
|
Python
|
maestrowf/conductor.py
|
usnistgov/corr-maestrowf
|
49d67a71a8c116754712048d51cf0bf65d6f7c42
|
[
"MIT"
] | null | null | null |
maestrowf/conductor.py
|
usnistgov/corr-maestrowf
|
49d67a71a8c116754712048d51cf0bf65d6f7c42
|
[
"MIT"
] | null | null | null |
maestrowf/conductor.py
|
usnistgov/corr-maestrowf
|
49d67a71a8c116754712048d51cf0bf65d6f7c42
|
[
"MIT"
] | null | null | null |
###############################################################################
# Copyright (c) 2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
# Written by Francesco Di Natale, dinatale3@llnl.gov.
#
# LLNL-CODE-734340
# All rights reserved.
# This file is part of MaestroWF, Version: 1.0.0.
#
# For details, see https://github.com/LLNL/maestrowf.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
from argparse import ArgumentParser, RawTextHelpFormatter
from datetime import datetime
import glob
import inspect
import logging
import os
import sys
from time import sleep
from maestrowf.datastructures.core import ExecutionGraph
from maestrowf.utils import create_parentdir
# Logger instantiation
rootlogger = logging.getLogger(inspect.getmodule(__name__))
logger = logging.getLogger(__name__)
# Formatting of logger.
LFORMAT = "%(asctime)s - %(name)s:%(funcName)s:%(lineno)s - " \
"%(levelname)s - %(message)s"
def setup_argparser():
"""
Method for setting up the program's argument parser.
"""
parser = ArgumentParser(prog="ExecutionManager",
description="An application for checking and "
"managing an ExecutionDAG within an executing"
"study.",
formatter_class=RawTextHelpFormatter)
parser.add_argument("directory", type=str, help="The directory where"
"a study has been set up and where a pickle file"
" of an ExecutionGraph is stored.")
parser.add_argument("-s", "--status", action="store_true",
help="Check the status of the ExecutionGraph "
"located as specified by the 'directory' "
"argument.")
parser.add_argument("-l", "--logpath", type=str,
help="Alternate path to store program logging.")
parser.add_argument("-d", "--debug_lvl", type=int, default=2,
help="Level of logging messages to be output:\n"
"5 - Critical\n"
"4 - Error\n"
"3 - Warning\n"
"2 - Info (Default)\n"
"1 - Debug")
parser.add_argument("-c", "--logstdout", action="store_true",
help="Output logging to stdout in addition to a file.")
parser.add_argument("-t", "--sleeptime", type=int, default=60,
help="Amount of time (in seconds) for the manager to "
"wait between job status checks.")
return parser
def setup_logging(args, name):
"""
Method for setting up logging in the Main class.
:param args: A Namespace object created by a parsed ArgumentParser.
:param name: The name of the log file.
"""
# Check if the user has specified a custom log path.
if args.logpath:
logger.info("Log path overwritten by command line -- %s",
args.logpath)
log_path = args.logpath
else:
log_path = os.path.join(args.directory, "logs")
loglevel = args.debug_lvl * 10
# Attempt to create the logging directory.
create_parentdir(log_path)
formatter = logging.Formatter(LFORMAT)
rootlogger.setLevel(loglevel)
# Set up handlers
if args.logstdout:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
log_file = os.path.join(log_path, "{}.log".format(name))
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
rootlogger.setLevel(loglevel)
# Print the level of logging.
logger.info("INFO Logging Level -- Enabled")
logger.warning("WARNING Logging Level -- Enabled")
logger.critical("CRITICAL Logging Level -- Enabled")
logger.debug("DEBUG Logging Level -- Enabled")
def main():
# Set up and parse the ArgumentParser
parser = setup_argparser()
args = parser.parse_args()
# Unpickle the ExecutionGraph
study_pkl = glob.glob(os.path.join(args.directory, "*.pkl"))
# We expect only a single pickle file.
if len(study_pkl) == 1:
dag = ExecutionGraph.unpickle(study_pkl[0])
else:
if len(study_pkl) > 1:
msg = "More than one pickle found. Expected only one. Aborting."
status = 2
else:
msg = "No pickle found. Aborting."
status = 1
sys.stderr.write(msg)
sys.exit(status)
# Set up logging
setup_logging(args, dag.name)
# Use ExecutionGraph API to determine next jobs to be launched.
logger.info("Checking the ExecutionGraph for study '%s' located in "
"%s...", dag.name, study_pkl[0])
logger.info("Study Description: %s", dag.description)
study_complete = False
while not study_complete:
logger.info("Checking DAG status at %s", str(datetime.now()))
# Execute steps that are ready
study_complete = dag.execute_ready_steps()
# Re-pickle the ExecutionGraph.
dag.pickle(study_pkl[0])
# Sleep for SLEEPTIME in args
sleep(args.sleeptime)
# Explicitly return a 0 status.
sys.exit(0)
if __name__ == "__main__":
main()
| 37.883041
| 79
| 0.628435
|
f76809be6510d2224726358af250aea09c174ecb
| 1,471
|
py
|
Python
|
src/sendMail.py
|
smrnjeet222/CertificateGenerator
|
7567e3fc2b46c529d76e68e1be12921d8582fc35
|
[
"MIT"
] | null | null | null |
src/sendMail.py
|
smrnjeet222/CertificateGenerator
|
7567e3fc2b46c529d76e68e1be12921d8582fc35
|
[
"MIT"
] | null | null | null |
src/sendMail.py
|
smrnjeet222/CertificateGenerator
|
7567e3fc2b46c529d76e68e1be12921d8582fc35
|
[
"MIT"
] | 2
|
2020-07-26T17:03:02.000Z
|
2021-05-02T20:09:42.000Z
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import src.config as con
FROM = con.MAIL
def sendMail(name, toaddr):
# instance of MIMEMultipart
filename = name.replace(" ", "_").lower()+'.pdf'
file_path = f"output/{filename}"
msg = MIMEMultipart()
# storing the senders email address
msg['From'] = FROM
# storing the receivers email address
msg['To'] = toaddr
msg['Subject'] = con.SUBJECT
body = con.body(name)
# attach the body with the msg instance
msg.attach(MIMEText(body, 'plain'))
attachment = open(file_path, "rb")
# instance of MIMEBase and named as p
p = MIMEBase('application', 'octet-stream')
# To change the payload into encoded form
p.set_payload((attachment).read())
# encode into base64
encoders.encode_base64(p)
p.add_header('Content-Disposition', f"attachment; filename= {filename}")
# attach the instance 'p' to instance 'msg'
msg.attach(p)
# creates SMTP session
s = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
s.starttls()
# Authentication
s.login(FROM, con.PASS)
# Converts the Multipart msg into a string
text = msg.as_string()
# sending the mail
s.sendmail(FROM, toaddr, text)
print(f"Mail Sent to {name} : {toaddr}\n")
# terminating the session
s.quit()
| 22.984375
| 76
| 0.665534
|
b842effa8e3dfe5899abe828cdf2d55f4e8a903f
| 4,308
|
py
|
Python
|
lib/h8s/service.py
|
enigmata/h8s-proto-py
|
4fa8a14bb80f0ac0fedc11d401412c15d2d033d7
|
[
"MIT"
] | null | null | null |
lib/h8s/service.py
|
enigmata/h8s-proto-py
|
4fa8a14bb80f0ac0fedc11d401412c15d2d033d7
|
[
"MIT"
] | null | null | null |
lib/h8s/service.py
|
enigmata/h8s-proto-py
|
4fa8a14bb80f0ac0fedc11d401412c15d2d033d7
|
[
"MIT"
] | null | null | null |
import os
import importlib
import re
class Service():
"""
A Service class is the generic representation of a service, which is
responsible for carrying out required functions of the service, and
maintaining its own lifecycle.
The attributes of a Service:
Name: Since word uniquely identifying the service's responsibility
Description: One-line English description of the service
Version: Triple: <version>.<release>.<modification>
Commands: Behaviours of the service, actions to change state of the service
State: State of the service as it progresses through its lifecycle
NOTE: You do not instantiate the Service class. A service will specialize
the Service class as a subclass which is then instantiated.
"""
def __init__(self, path, name, description, version):
self.name = name
self.description = description
self.version = version
self.state = None # TBD
self.commands = {}
self.COMMANDS_IDX_MOD = 0 # command's python module
self.COMMANDS_IDX_OBJ = 1 # command's python object
self.COMMANDS_IDX_VER = 2 # version of the command (from filename)
self.COMMANDS_IDX_SAW = 3 # highest cmd version found in file system
self.re_command_fname = re.compile(r'^(([^_.].*)_v(\d+))\.py$')
self.root_dir = path
self.cmd_dir = os.path.join(self.root_dir, 'commands')
self.service_name = os.path.basename(self.root_dir)
def getName(self):
return self.name
def getServiceName(self):
return self.service_name
def getDescription(self):
return self.description
def getVersion(self):
return self.version
def getState(self):
return self.state
def getPath(self):
return self.root_dir
def load_commands(self):
for command_fname in os.listdir(self.cmd_dir):
m = self.re_command_fname.match(command_fname)
if m and os.path.isfile(os.path.join(self.cmd_dir, command_fname)):
command_name = m.group(2)
command_ver = int(m.group(3))
if command_name in self.commands and command_ver > self.commands[command_name][self.COMMANDS_IDX_VER]:
del self.commands[command_name][self.COMMANDS_IDX_OBJ]
del self.commands[command_name][self.COMMANDS_IDX_MOD]
del self.commands[command_name]
if command_name not in self.commands:
command_fullname = m.group(1)
command_mod = importlib.import_module('services.'+self.service_name+'.commands.'+command_fullname)
command_obj = getattr( command_mod, command_name.capitalize() )(self, command_ver)
self.commands[command_name] = [ command_mod, command_obj, command_ver, command_ver ]
else:
if command_ver > self.commands[command_name][self.COMMANDS_IDX_SAW]:
self.commands[command_name][self.COMMANDS_IDX_SAW] = command_ver
for command_name in self.commands:
if self.commands[command_name][self.COMMANDS_IDX_SAW] == self.commands[command_name][self.COMMANDS_IDX_VER]:
self.commands[command_name][self.COMMANDS_IDX_SAW] = 0
else:
command_ver = self.commands[command_name][self.COMMANDS_IDX_SAW]
del self.commands[command_name][self.COMMANDS_IDX_OBJ]
del self.commands[command_name][self.COMMANDS_IDX_MOD]
del self.commands[command_name]
if command_ver > 0:
command_mod = importlib.import_module('services.'+self.service_name+'.commands.'+command_name+'_v'+command_ver)
command_obj = getattr( command_mod, command_name.capitalize() )(self, command_ver)
self.commands[command_name] = [ command_mod, command_obj, command_ver, 0 ]
def command_interfaces(self):
for command in self.commands:
yield self.commands[command][self.COMMANDS_IDX_OBJ].getInterface()
def execute_command(self, command, args):
return self.commands[command][self.COMMANDS_IDX_OBJ].execute(args)
| 43.959184
| 131
| 0.645311
|
4a479896f1d1459a4ff434d914f4af1881e24e9e
| 1,390
|
py
|
Python
|
dependencies/panda/Panda3D-1.10.0-x64/direct/interval/MopathInterval.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 3
|
2018-03-09T12:07:29.000Z
|
2021-02-25T06:50:25.000Z
|
direct/src/interval/MopathInterval.py
|
Sinkay/panda3d
|
16bfd3750f726a8831771b81649d18d087917fd5
|
[
"PHP-3.01",
"PHP-3.0"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
direct/src/interval/MopathInterval.py
|
Sinkay/panda3d
|
16bfd3750f726a8831771b81649d18d087917fd5
|
[
"PHP-3.01",
"PHP-3.0"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
"""MopathInterval module: contains the MopathInterval class"""
__all__ = ['MopathInterval']
import LerpInterval
from panda3d.core import *
from panda3d.direct import *
from direct.directnotify.DirectNotifyGlobal import *
# import Mopath
class MopathInterval(LerpInterval.LerpFunctionInterval):
# Name counter
mopathNum = 1
# create MopathInterval DirectNotify category
notify = directNotify.newCategory('MopathInterval')
# Class methods
def __init__(self, mopath, node, fromT = 0, toT = None,
duration = None, blendType = 'noBlend', name = None):
if toT == None:
toT = mopath.getMaxT()
if duration == None:
duration = abs(toT - fromT)
# Generate unique name if necessary
if (name == None):
name = 'Mopath-%d' % MopathInterval.mopathNum
MopathInterval.mopathNum += 1
LerpInterval.LerpFunctionInterval.__init__(
self, self.__doMopath, fromData = fromT, toData = toT,
duration = duration, blendType = blendType,
name = name)
self.mopath = mopath
self.node = node
def destroy(self):
"""Cleanup to avoid a garbage cycle."""
self.function = None
def __doMopath(self, t):
"""
Go to time t
"""
self.mopath.goTo(self.node, t)
| 28.367347
| 70
| 0.607194
|
cb92ced5c698a78273441d98397b8dbb18557e7e
| 6,300
|
py
|
Python
|
frappe/utils/redis_wrapper.py
|
vinhnguyent090/frappe
|
636d9442e2ccf0cfac2d5aace427d8de05f63bd9
|
[
"MIT"
] | 5
|
2017-09-12T15:56:31.000Z
|
2022-03-09T13:50:21.000Z
|
frappe/utils/redis_wrapper.py
|
alexbow2008/frappe
|
ce592a40b4c5e80a9c6cbdc541105218bf98c966
|
[
"MIT"
] | 7
|
2020-03-24T18:15:04.000Z
|
2022-02-10T20:52:45.000Z
|
frappe/utils/redis_wrapper.py
|
alexbow2008/frappe
|
ce592a40b4c5e80a9c6cbdc541105218bf98c966
|
[
"MIT"
] | 8
|
2019-04-21T07:49:50.000Z
|
2021-12-24T20:20:38.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import redis, frappe, re
from six.moves import cPickle as pickle
from frappe.utils import cstr
from six import iteritems
class RedisWrapper(redis.Redis):
"""Redis client that will automatically prefix conf.db_name"""
def connected(self):
try:
self.ping()
return True
except redis.exceptions.ConnectionError:
return False
def make_key(self, key, user=None, shared=False):
if shared:
return key
if user:
if user == True:
user = frappe.session.user
key = "user:{0}:{1}".format(user, key)
return "{0}|{1}".format(frappe.conf.db_name, key).encode('utf-8')
def set_value(self, key, val, user=None, expires_in_sec=None):
"""Sets cache value.
:param key: Cache key
:param val: Value to be cached
:param user: Prepends key with User
:param expires_in_sec: Expire value of this key in X seconds
"""
key = self.make_key(key, user)
if not expires_in_sec:
frappe.local.cache[key] = val
try:
if expires_in_sec:
self.setex(key, pickle.dumps(val), expires_in_sec)
else:
self.set(key, pickle.dumps(val))
except redis.exceptions.ConnectionError:
return None
def get_value(self, key, generator=None, user=None, expires=False):
"""Returns cache value. If not found and generator function is
given, it will call the generator.
:param key: Cache key.
:param generator: Function to be called to generate a value if `None` is returned.
:param expires: If the key is supposed to be with an expiry, don't store it in frappe.local
"""
original_key = key
key = self.make_key(key, user)
if key in frappe.local.cache:
val = frappe.local.cache[key]
else:
val = None
try:
val = self.get(key)
except redis.exceptions.ConnectionError:
pass
if val is not None:
val = pickle.loads(val)
if not expires:
if val is None and generator:
val = generator()
self.set_value(original_key, val, user=user)
else:
frappe.local.cache[key] = val
return val
def get_all(self, key):
ret = {}
for k in self.get_keys(key):
ret[key] = self.get_value(k)
return ret
def get_keys(self, key):
"""Return keys starting with `key`."""
try:
key = self.make_key(key + "*")
return self.keys(key)
except redis.exceptions.ConnectionError:
regex = re.compile(cstr(key).replace("|", "\|").replace("*", "[\w]*"))
return [k for k in list(frappe.local.cache) if regex.match(k.decode())]
def delete_keys(self, key):
"""Delete keys with wildcard `*`."""
try:
self.delete_value(self.get_keys(key), make_keys=False)
except redis.exceptions.ConnectionError:
pass
def delete_key(self, *args, **kwargs):
self.delete_value(*args, **kwargs)
def delete_value(self, keys, user=None, make_keys=True, shared=False):
"""Delete value, list of values."""
if not isinstance(keys, (list, tuple)):
keys = (keys, )
for key in keys:
if make_keys:
key = self.make_key(key, shared=shared)
if key in frappe.local.cache:
del frappe.local.cache[key]
try:
self.delete(key)
except redis.exceptions.ConnectionError:
pass
def lpush(self, key, value):
super(RedisWrapper, self).lpush(self.make_key(key), value)
def rpush(self, key, value):
super(RedisWrapper, self).rpush(self.make_key(key), value)
def lpop(self, key):
return super(RedisWrapper, self).lpop(self.make_key(key))
def llen(self, key):
return super(RedisWrapper, self).llen(self.make_key(key))
def hset(self, name, key, value, shared=False):
_name = self.make_key(name, shared=shared)
# set in local
if not _name in frappe.local.cache:
frappe.local.cache[_name] = {}
frappe.local.cache[_name][key] = value
# set in redis
try:
super(RedisWrapper, self).hset(_name,
key, pickle.dumps(value))
except redis.exceptions.ConnectionError:
pass
def hgetall(self, name):
return {key: pickle.loads(value) for key, value in
iteritems(super(RedisWrapper, self).hgetall(self.make_key(name)))}
def hget(self, name, key, generator=None, shared=False):
_name = self.make_key(name, shared=shared)
if not _name in frappe.local.cache:
frappe.local.cache[_name] = {}
if key in frappe.local.cache[_name]:
return frappe.local.cache[_name][key]
value = None
try:
value = super(RedisWrapper, self).hget(_name, key)
except redis.exceptions.ConnectionError:
pass
if value:
value = pickle.loads(value)
frappe.local.cache[_name][key] = value
elif generator:
value = generator()
try:
self.hset(name, key, value)
except redis.exceptions.ConnectionError:
pass
return value
def hdel(self, name, key, shared=False):
_name = self.make_key(name, shared=shared)
if _name in frappe.local.cache:
if key in frappe.local.cache[_name]:
del frappe.local.cache[_name][key]
try:
super(RedisWrapper, self).hdel(_name, key)
except redis.exceptions.ConnectionError:
pass
def hdel_keys(self, name_starts_with, key):
"""Delete hash names with wildcard `*` and key"""
for name in frappe.cache().get_keys(name_starts_with):
name = name.split("|", 1)[1]
self.hdel(name, key)
def hkeys(self, name):
try:
return super(RedisWrapper, self).hkeys(self.make_key(name))
except redis.exceptions.ConnectionError:
return []
def sadd(self, name, *values):
"""Add a member/members to a given set"""
super(RedisWrapper, self).sadd(self.make_key(name), *values)
def srem(self, name, *values):
"""Remove a specific member/list of members from the set"""
super(RedisWrapper, self).srem(self.make_key(name), *values)
def sismember(self, name, value):
"""Returns True or False based on if a given value is present in the set"""
return super(RedisWrapper, self).sismember(self.make_key(name), value)
def spop(self, name):
"""Removes and returns a random member from the set"""
return super(RedisWrapper, self).spop(self.make_key(name))
def srandmember(self, name, count=None):
"""Returns a random member from the set"""
return super(RedisWrapper, self).srandmember(self.make_key(name))
def smembers(self, name):
"""Return all members of the set"""
return super(RedisWrapper, self).smembers(self.make_key(name))
| 26.923077
| 93
| 0.694921
|
92d092c059fea3857092e35472c3865c7ccc5572
| 8,767
|
py
|
Python
|
graph.py
|
Goffayoh/py_graphpath
|
aed73eea19daa860ad365703fa2bfeaba5f0eefe
|
[
"CNRI-Python"
] | null | null | null |
graph.py
|
Goffayoh/py_graphpath
|
aed73eea19daa860ad365703fa2bfeaba5f0eefe
|
[
"CNRI-Python"
] | null | null | null |
graph.py
|
Goffayoh/py_graphpath
|
aed73eea19daa860ad365703fa2bfeaba5f0eefe
|
[
"CNRI-Python"
] | null | null | null |
# coding: utf-8
import numpy as np
# Définition de la classe Graph contenant la structure d'un graphe ainsi que des différentes fonctions
# permettant les réalistions des deux algorithmes de rendez-vous.
class Graph:
# Structure du graphe
def __init__(self, data):
self.size = data["nbNoeuds"]
self.sommetsList = list(data["nomSommets"])
self.rdvList = list(data["nomRdv"])
self.sommetsIniList = list(data["nomSommetsInitiaux"])
self.arcs = list(data["arcs"])
self.error = 0
if self.sommetsList.__len__() != data["nbNoeuds"]:
self.error = 1
if self.rdvList.__len__() != data["nbLieuxRdv"]:
self.error = 2
# Ces attributs sont utilisé lors du parcours récursif du graphe dans l'algorithme 2
# path conserve le chemin actuel
self.path = np.zeros(self.size ** 2)
# bool conserve les sommets par lesquels on est déja passé
self.bool = np.zeros(self.size ** 2)
# target correspont au point de rendez-vous actuellement testé
self.target = 0
# min conserve la taille du chemin le plus petit actuellement trouvé
self.min = np.inf
# res contient les chemins de taille min
self.res = []
def error(self):
return self.error
# Fonction renvoyant la matrice des distances du graphe
def mat_graph(self):
mat = np.full((self.size, self.size), np.inf)
for arc in self.arcs:
mat[self.pos_sommet(arc["sommetInitial"])
, self.pos_sommet(arc["sommetTerminal"])] = arc["duree"]
return mat
# Fonction qui renvoie un entier correspondant a un sommet du graphe permettant le parcours des matrices
def pos_sommet(self, char):
return self.sommetsList.index(char)
# fonction qui renvoie une matrice des distances correspondant à un graphe qui à pour sommet une paire de
# sommet du graphe initial et pour arcs des arcs du graphe initial tel que pour a,b,i dans S on a :
# (a,b) -> ((a,i),(b,i)) et (a,b) -> ((i,a),(i,b))
def transform(self, mat):
size_pair = self.size * self.size
mat_pair = np.full((size_pair, size_pair), np.inf)
for i in range(self.size):
for j in range(self.size):
for k in range(self.size):
for l in range(self.size):
if i == k and j == l :
mat_pair[i * self.size + j, k * self.size + l] \
= np.inf
elif i == k :
mat_pair[i * self.size + j, k * self.size + l] \
= mat[j, l]
elif j == l :
mat_pair[i * self.size + j, k * self.size + l] \
= mat[i, k]
return mat_pair
# fonction du premier algorithme qui détermine le points de rendez-optimal en comparant la plus courte distance
# entre le sommet initial (a,b) (où a et b sont les deux points de départs) et chaque points de rendez-vous
# possible (i,i) (où i figures parmit les points d'arrivés)
def rdv_optimal(self):
mat_pcd = self.mat_pcd(self.transform(self.mat_graph()))
# sommet inital
init = self.pos_sommet(self.sommetsIniList[0]) * self.size + self.pos_sommet(self.sommetsIniList[1])
# construction d'un tableau contenant les points de rendez-vous possibles
rdv = []
for c in self.rdvList:
rdv.append(self.pos_sommet(c) * self.size + self.pos_sommet(c))
res = np.inf
fin = np.inf
# recherche de la plus courte distance
for i in range(len(rdv)):
if mat_pcd[init, rdv[i]] < res:
res = mat_pcd[init, rdv[i]]
fin = i
if fin != np.inf:
return str(self.rdvList[fin])
else:
return ""
# fonction qui prend une matrice des distances en entrée et qui renvoie la matrices des plus courtes distantes
# correspondantes selon l'algorithme de Floyd-Warshall
def mat_pcd(self, mat):
size_pair = self.size * self.size
for i in range(size_pair):
mat[i, i] = 0
for k in range(size_pair):
for i in range(size_pair):
for j in range(size_pair):
mat[i, j] = min(mat[i, j], mat[i, k] + mat[k, j])
return mat
# fonction récursive qui permet de parcourir le graphe en profondeur pour rassembler tout les chemins possible
# entre un point de départ et une cible. La pronfondeur est borné par la longueur d'un chemin minimal déja
# trouvé afin de limiter les appels récursif
def explore(self, mat, position, depth):
if depth > self.min:
return
self.path[depth] = position
if position == self.target:
if self.min == depth:
# Si l'on a déja trouvé un chemin de même taille on sauvegarde aussi le nouveau chemin car il peut avoir
# une durée plus courte
self.res.append(np.copy(self.path[0:depth + 1]).astype(int).tolist())
if self.min > depth:
# Si l'on trouve un nouveau chemin minimal on écrase notre résultat par ce nouveau chemin
# et on met à jour le nombre d'étape minimale
self.min = depth
self.res = [np.copy(self.path[0:depth + 1]).astype(int).tolist()]
return
# On marque ce sommet car on y est passé une fois
self.bool[position] = 1
for i in range(self.size ** 2):
# Si il n'y a pas d'arc vers le sommet ou qu'on est déja passé par celui-ci on l'ignore
if mat[position][i] == np.inf or self.bool[i] == 1:
continue
# Sinon on explore par ce sommet
self.explore(mat, i, depth + 1)
# on retire la marque
self.bool[position] = 0
return
# Fonction qui implémente l'algorithme 2
def rdv_optimal2(self):
size_pair = self.size * self.size
mat = self.transform(self.mat_graph())
# Sommet inital
init = self.pos_sommet(self.sommetsIniList[0]) * self.size + self.pos_sommet(self.sommetsIniList[1])
# Construction d'un tableau contenant les points de rendez-vous possibles
rdv = []
for c in self.rdvList:
rdv.append(self.pos_sommet(c) * self.size + self.pos_sommet(c))
roadres = []
sizemin = np.inf
# Pour chaque point de rendez-vous on dresse une liste de chemin point de départ -> point de rendez-vous de
# taille minimale. En même temps on conserve la taille du plus petit chemin trouvé dans sizemin
for i in range(len(rdv)):
# Avant chaque exploration on réinitialise les variables et on choisit la bonne cible
self.bool = np.zeros(self.size ** 2)
self.path = np.zeros(self.size ** 2)
self.min = np.inf
self.target = rdv[i]
self.explore(mat, init, 0)
if len(self.res[0]) < sizemin:
sizemin = len(self.res[0])
roadres.append(self.res)
candidat = []
# On ne conserve que les points de rendez vous ayant des chemin de tailles minimales
for i in range(len(roadres)):
if (len(roadres[i][0])) == sizemin:
candidat.append(roadres[i])
distmin = np.inf
resfinal = np.inf
# On regarde combien de candidat on obtient
if len(candidat) > 1:
# Lorsqu'il y a plusieurs candidats on recherche celui qui a le plus court chemin en terme de durée parmi
# ceux de taille minimale
for i in range(len(candidat)):
dist = 0
for u in range(len(candidat[i])):
for j in range(sizemin - 1):
# on calcule la durée du chemin
dist = dist + mat[candidat[i][u][j]][candidat[i][u][j + 1]]
if dist < distmin:
# Si on trouve une nouvelle durée minimale on sauvegarde celle-ci ainsi que le point de
# rendez-vous qui se trouve à la fin du chemin
resfinal = candidat[i][0][sizemin - 1]
distmin = dist
else:
# Si il y a un seul candidat on le récupère en allant cherchez le point de rendez vous qui est le dernier
# des chemins qu'on a sauvegardé
resfinal = candidat[0][0][sizemin - 1]
return str(self.sommetsList[(resfinal // (self.size+1))])
| 47.389189
| 120
| 0.570549
|
e8addd2e5900990c979cf594c5b698b208713bd2
| 66,692
|
py
|
Python
|
px.py
|
bedla/px
|
d6670f27babfc948bb03141d0e95ce26865ba461
|
[
"MIT"
] | 1
|
2020-08-06T12:36:44.000Z
|
2020-08-06T12:36:44.000Z
|
px.py
|
bedla/px
|
d6670f27babfc948bb03141d0e95ce26865ba461
|
[
"MIT"
] | null | null | null |
px.py
|
bedla/px
|
d6670f27babfc948bb03141d0e95ce26865ba461
|
[
"MIT"
] | null | null | null |
"Px is an HTTP proxy server to automatically authenticate through an NTLM proxy"
from __future__ import print_function
__version__ = "0.4.0"
import base64
import ctypes
import ctypes.wintypes
import multiprocessing
import os
import select
import signal
import socket
import sys
import threading
import time
import traceback
# Print if possible
def pprint(*objs):
try:
print(*objs)
except:
pass
# Dependencies
try:
import concurrent.futures
except ImportError:
pprint("Requires module futures")
sys.exit()
try:
import netaddr
except ImportError:
pprint("Requires module netaddr")
sys.exit()
try:
import psutil
except ImportError:
pprint("Requires module psutil")
sys.exit()
try:
import pywintypes
import sspi
except ImportError:
pprint("Requires module pywin32")
sys.exit()
try:
import winkerberos
except ImportError:
pprint("Requires module winkerberos")
sys.exit()
try:
import ntlm_auth.ntlm
except ImportError:
pprint("Requires module ntlm-auth")
sys.exit()
try:
import keyring
import keyring.backends.Windows
keyring.set_keyring(keyring.backends.Windows.WinVaultKeyring())
except ImportError:
pprint("Requires module keyring")
sys.exit()
# Python 2.x vs 3.x support
try:
import configparser
import http.server as httpserver
import socketserver
import urllib.parse as urlparse
import winreg
except ImportError:
import ConfigParser as configparser
import SimpleHTTPServer as httpserver
import SocketServer as socketserver
import urlparse
import _winreg as winreg
os.getppid = psutil.Process().ppid
PermissionError = WindowsError
HELP = """Px v%s
An HTTP proxy server to automatically authenticate through an NTLM proxy
Usage:
px [FLAGS]
python px.py [FLAGS]
Actions:
--save
Save configuration to px.ini or file specified with --config
Allows setting up Px config directly from command line
Values specified on CLI override any values in existing config file
Values not specified on CLI or config file are set to defaults
--install
Add Px to the Windows registry to run on startup
--uninstall
Remove Px from the Windows registry
--quit
Quit a running instance of Px.exe
Configuration:
--config=
Specify config file. Valid file path, default: px.ini in working directory
--proxy= --server= proxy:server= in INI file
NTLM server(s) to connect through. IP:port, hostname:port
Multiple proxies can be specified comma separated. Px will iterate through
and use the one that works. Required field unless --noproxy is defined. If
remote server is not in noproxy list and proxy is undefined, Px will reject
the request
--pac= proxy:pac=
PAC file to use to connect
Use in place of server if PAC file should be loaded from a custom URL or
file location instead of from Internet Options
--listen= proxy:listen=
IP interface to listen on. Valid IP address, default: 127.0.0.1
--port= proxy:port=
Port to run this proxy. Valid port number, default: 3128
--gateway proxy:gateway=
Allow remote machines to use proxy. 0 or 1, default: 0
Overrides 'listen' and binds to all interfaces
--hostonly proxy:hostonly=
Allow only local interfaces to use proxy. 0 or 1, default: 0
Px allows all IP addresses assigned to local interfaces to use the service.
This allows local apps as well as VM or container apps to use Px when in a
NAT config. Px does this by listening on all interfaces and overriding the
allow list.
--allow= proxy:allow=
Allow connection from specific subnets. Comma separated, default: *.*.*.*
Whitelist which IPs can use the proxy. --hostonly overrides any definitions
unless --gateway mode is also specified
127.0.0.1 - specific ip
192.168.0.* - wildcards
192.168.0.1-192.168.0.255 - ranges
192.168.0.1/24 - CIDR
--noproxy= proxy:noproxy=
Direct connect to specific subnets like a regular proxy. Comma separated
Skip the NTLM proxy for connections to these subnets
127.0.0.1 - specific ip
192.168.0.* - wildcards
192.168.0.1-192.168.0.255 - ranges
192.168.0.1/24 - CIDR
--useragent= proxy:useragent=
Override or send User-Agent header on client's behalf
--username= proxy:username=
Authentication to use when SSPI is unavailable. Format is domain\\username
Service name "Px" and this username are used to retrieve the password using
Python keyring. Px only retrieves credentials and storage should be done
directly in the keyring backend.
On Windows, Credential Manager is the backed and can be accessed from
Control Panel > User Accounts > Credential Manager > Windows Credentials.
Create a generic credential with Px as the network address, this username
and corresponding password.
--auth= proxy:auth=
Force instead of discovering upstream proxy type
By default, Px will attempt to discover the upstream proxy type and either
use pywin32/ntlm-auth for NTLM auth or winkerberos for Kerberos or Negotiate
auth. This option will force either NTLM, Kerberos or Basic and not query the
upstream proxy type.
--workers= settings:workers=
Number of parallel workers (processes). Valid integer, default: 2
--threads= settings:threads=
Number of parallel threads per worker (process). Valid integer, default: 5
--idle= settings:idle=
Idle timeout in seconds for HTTP connect sessions. Valid integer, default: 30
--socktimeout= settings:socktimeout=
Timeout in seconds for connections before giving up. Valid float, default: 20
--proxyreload= settings:proxyreload=
Time interval in seconds before refreshing proxy info. Valid int, default: 60
Proxy info reloaded from a PAC file found via WPAD or AutoConfig URL, or
manual proxy info defined in Internet Options
--foreground settings:foreground=
Run in foreground when frozen or with pythonw.exe. 0 or 1, default: 0
Px will attach to the console and write to it even though the prompt is
available for further commands. CTRL-C in the console will exit Px
--debug settings:log=
Enable debug logging. default: 0
Logs are written to working directory and over-written on startup
A log is automatically created if Px crashes for some reason
--uniqlog
Generate unique log file names
Prevents logs from being overwritten on subsequent runs. Also useful if
running multiple instances of Px""" % __version__
# Windows version
# 6.1 = Windows 7
# 6.2 = Windows 8
# 6.3 = Windows 8.1
# 10.0 = Windows 10
WIN_VERSION = float(
str(sys.getwindowsversion().major) + "." +
str(sys.getwindowsversion().minor))
# Proxy modes - source of proxy info
MODE_NONE = 0
MODE_CONFIG = 1
MODE_AUTO = 2
MODE_PAC = 3
MODE_MANUAL = 4
MODE_CONFIG_PAC = 5
class State(object):
allow = netaddr.IPGlob("*.*.*.*")
config = None
domain = ""
exit = False
hostonly = False
logger = None
noproxy = netaddr.IPSet([])
noproxy_hosts = []
pac = ""
proxy_mode = MODE_NONE
proxy_refresh = None
proxy_server = []
proxy_type = {}
stdout = None
useragent = ""
username = ""
auth = None
ini = "px.ini"
max_disconnect = 3
max_line = 65536 + 1
# Locks for thread synchronization;
# multiprocess sync isn't neccessary because State object is only shared by
# threads but every process has it's own State object
proxy_type_lock = threading.Lock()
proxy_mode_lock = threading.Lock()
class Response(object):
__slots__ = ["code", "length", "headers", "data", "body", "chunked", "close"]
def __init__(self, code=503):
self.code = code
self.length = 0
self.headers = []
self.data = None
self.body = False
self.chunked = False
self.close = False
class Log(object):
def __init__(self, name, mode):
self.file = open(name, mode)
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def close(self):
sys.stdout = self.stdout
sys.stderr = self.stderr
self.file.close()
def write(self, data):
try:
self.file.write(data)
except:
pass
if self.stdout is not None:
self.stdout.write(data)
self.flush()
def flush(self):
self.file.flush()
os.fsync(self.file.fileno())
if self.stdout is not None:
self.stdout.flush()
def dprint(msg):
if State.logger is not None:
# Do locking to avoid mixing the output of different threads as there are
# two calls to print which could otherwise interleave
sys.stdout.write(
multiprocessing.current_process().name + ": " +
threading.current_thread().name + ": " + str(int(time.time())) +
": " + sys._getframe(1).f_code.co_name + ": " + msg + "\n")
def dfile():
name = multiprocessing.current_process().name
if "--quit" in sys.argv:
name = "quit"
if "--uniqlog" in sys.argv:
name = "%s-%f" % (name, time.time())
logfile = os.path.join(os.path.dirname(get_script_path()),
"debug-%s.log" % name)
return logfile
def reopen_stdout():
clrstr = "\r" + " " * 80 + "\r"
if State.logger is None:
State.stdout = sys.stdout
sys.stdout = open("CONOUT$", "w")
sys.stdout.write(clrstr)
else:
State.stdout = State.logger.stdout
State.logger.stdout = open("CONOUT$", "w")
State.logger.stdout.write(clrstr)
def restore_stdout():
if State.logger is None:
sys.stdout.close()
sys.stdout = State.stdout
else:
State.logger.stdout.close()
State.logger.stdout = State.stdout
###
# Auth support
def b64decode(val):
try:
return base64.decodebytes(val.encode("utf-8"))
except AttributeError:
return base64.decodestring(val)
def b64encode(val):
try:
return base64.encodebytes(val.encode("utf-8"))
except AttributeError:
return base64.encodestring(val)
class AuthMessageGenerator:
def __init__(self, proxy_type, proxy_server_address):
pwd = ""
if State.username:
key = State.username
if State.domain != "":
key = State.domain + "\\" + State.username
pwd = keyring.get_password("Px", key)
if proxy_type == "NTLM":
if not pwd:
self.ctx = sspi.ClientAuth("NTLM",
os.environ.get("USERNAME"), scflags=0)
self.get_response = self.get_response_sspi
else:
self.ctx = ntlm_auth.ntlm.NtlmContext(
State.username, pwd, State.domain, "", ntlm_compatibility=3)
self.get_response = self.get_response_ntlm
elif proxy_type == "BASIC":
if not State.username:
dprint("No username configured for Basic authentication")
elif not pwd:
dprint("No password configured for Basic authentication")
else:
# Colons are forbidden in usernames and passwords for basic auth
# but since this can happen very easily, we make a special check
# just for colons so people immediately understand that and don't
# have to look up other resources.
if ":" in State.username or ":" in pwd:
dprint("Credentials contain invalid colon character")
else:
# Additionally check for invalid control characters as per
# RFC5234 Appendix B.1 (section CTL)
illegal_control_characters = "".join(
chr(i) for i in range(0x20)) + "\u007F"
if any(char in State.username or char in pwd
for char in illegal_control_characters):
dprint("Credentials contain invalid characters: %s" % ", ".join("0x" + "%x" % ord(char) for char in illegal_control_characters))
else:
# Remove newline appended by base64 function
self.ctx = b64encode(
"%s:%s" % (State.username, pwd))[:-1].decode()
self.get_response = self.get_response_basic
else:
principal = None
if pwd:
if State.domain:
principal = (urlparse.quote(State.username) + "@" +
urlparse.quote(State.domain) + ":" + urlparse.quote(pwd))
else:
principal = (urlparse.quote(State.username) + ":" +
urlparse.quote(pwd))
_, self.ctx = winkerberos.authGSSClientInit("HTTP@" +
proxy_server_address, principal=principal, gssflags=0,
mech_oid=winkerberos.GSS_MECH_OID_SPNEGO)
self.get_response = self.get_response_wkb
def get_response_sspi(self, challenge=None):
dprint("pywin32 SSPI")
if challenge:
challenge = b64decode(challenge)
output_buffer = None
try:
error_msg, output_buffer = self.ctx.authorize(challenge)
except pywintypes.error:
traceback.print_exc(file=sys.stdout)
return None
response_msg = b64encode(output_buffer[0].Buffer)
response_msg = response_msg.decode("utf-8").replace('\012', '')
return response_msg
def get_response_wkb(self, challenge=""):
dprint("winkerberos SSPI")
try:
winkerberos.authGSSClientStep(self.ctx, challenge)
auth_req = winkerberos.authGSSClientResponse(self.ctx)
except winkerberos.GSSError:
traceback.print_exc(file=sys.stdout)
return None
return auth_req
def get_response_ntlm(self, challenge=""):
dprint("ntlm-auth")
if challenge:
challenge = b64decode(challenge)
response_msg = b64encode(self.ctx.step(challenge))
response_msg = response_msg.decode("utf-8").replace('\012', '')
return response_msg
def get_response_basic(self, challenge=""):
dprint("basic")
return self.ctx
###
# Proxy handler
class Proxy(httpserver.SimpleHTTPRequestHandler):
protocol_version = "HTTP/1.1"
# Contains the proxy servers responsible for the url this Proxy instance
# (aka thread) serves
proxy_servers = []
proxy_socket = None
def handle_one_request(self):
try:
httpserver.SimpleHTTPRequestHandler.handle_one_request(self)
except socket.error as e:
dprint("Socket error: %s" % e)
if not hasattr(self, "_host_disconnected"):
self._host_disconnected = 1
dprint("Host disconnected")
elif self._host_disconnected < State.max_disconnect:
self._host_disconnected += 1
dprint("Host disconnected: %d" % self._host_disconnected)
else:
dprint("Closed connection to avoid infinite loop")
self.close_connection = True
def address_string(self):
host, port = self.client_address[:2]
#return socket.getfqdn(host)
return host
def log_message(self, format, *args):
dprint(format % args)
def do_socket_connect(self, destination=None):
# Already connected?
if self.proxy_socket is not None:
return True
dests = list(self.proxy_servers) if destination is None else [
destination]
for dest in dests:
dprint("New connection: " + str(dest))
proxy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
proxy_socket.connect(dest)
self.proxy_address = dest
self.proxy_socket = proxy_socket
break
except Exception as e:
dprint("Connect failed: %s" % e)
# move a non reachable proxy to the end of the proxy list;
if len(self.proxy_servers) > 1:
# append first and then remove, this should ensure thread
# safety with manual configurated proxies (in this case
# self.proxy_servers references the shared
# State.proxy_server)
self.proxy_servers.append(dest)
self.proxy_servers.remove(dest)
if self.proxy_socket is not None:
return True
return False
def do_socket(self, xheaders={}, destination=None):
dprint("Entering")
# Connect to proxy or destination
if not self.do_socket_connect(destination):
return Response(408)
# No chit chat on SSL
if destination is not None and self.command == "CONNECT":
return Response(200)
cl = 0
chk = False
expect = False
keepalive = False
ua = False
cmdstr = "%s %s %s\r\n" % (self.command, self.path, self.request_version)
self.proxy_socket.sendall(cmdstr.encode("utf-8"))
dprint(cmdstr.strip())
for header in self.headers:
hlower = header.lower()
if hlower == "user-agent" and State.useragent != "":
ua = True
h = "%s: %s\r\n" % (header, State.useragent)
else:
h = "%s: %s\r\n" % (header, self.headers[header])
self.proxy_socket.sendall(h.encode("utf-8"))
if hlower != "authorization":
dprint("Sending %s" % h.strip())
else:
dprint("Sending %s: sanitized len(%d)" % (
header, len(self.headers[header])))
if hlower == "content-length":
cl = int(self.headers[header])
elif (hlower == "expect" and
self.headers[header].lower() == "100-continue"):
expect = True
elif hlower == "proxy-connection":
keepalive = True
elif (hlower == "transfer-encoding" and
self.headers[header].lower() == "chunked"):
dprint("CHUNKED data")
chk = True
if not keepalive and self.request_version.lower() == "http/1.0":
xheaders["Proxy-Connection"] = "keep-alive"
if not ua and State.useragent != "":
xheaders["User-Agent"] = State.useragent
for header in xheaders:
h = ("%s: %s\r\n" % (header, xheaders[header])).encode("utf-8")
self.proxy_socket.sendall(h)
if header.lower() != "proxy-authorization":
dprint("Sending extra %s" % h.strip())
else:
dprint("Sending extra %s: sanitized len(%d)" % (
header, len(xheaders[header])))
self.proxy_socket.sendall(b"\r\n")
if self.command in ["POST", "PUT", "PATCH"]:
if not hasattr(self, "body"):
dprint("Getting body for POST/PUT/PATCH")
if cl:
self.body = self.rfile.read(cl)
else:
self.body = self.rfile.read()
dprint("Sending body for POST/PUT/PATCH: %d = %d" % (
cl or -1, len(self.body)))
self.proxy_socket.sendall(self.body)
self.proxy_fp = self.proxy_socket.makefile("rb")
resp = Response()
if self.command != "HEAD":
resp.body = True
# Response code
for i in range(2):
dprint("Reading response code")
line = self.proxy_fp.readline(State.max_line)
if line == b"\r\n":
line = self.proxy_fp.readline(State.max_line)
try:
resp.code = int(line.split()[1])
except (ValueError, IndexError):
dprint("Bad response %s" % line)
if line == b"":
dprint("Client closed connection")
return Response(444)
if (b"connection established" in line.lower() or
resp.code == 204 or resp.code == 304):
resp.body = False
dprint("Response code: %d " % resp.code + str(resp.body))
# Get response again if 100-Continue
if not (expect and resp.code == 100):
break
# Headers
dprint("Reading response headers")
while not State.exit:
line = self.proxy_fp.readline(State.max_line).decode("utf-8")
if line == b"":
if self.proxy_socket:
self.proxy_socket.shutdown(socket.SHUT_WR)
self.proxy_socket.close()
self.proxy_socket = None
dprint("Proxy closed connection: %s" % resp.code)
return Response(444)
if line == "\r\n":
break
nv = line.split(":", 1)
if len(nv) != 2:
dprint("Bad header =>%s<=" % line)
continue
name = nv[0].strip()
value = nv[1].strip()
resp.headers.append((name, value))
if name.lower() != "proxy-authenticate":
dprint("Received %s: %s" % (name, value))
else:
dprint("Received %s: sanitized (%d)" % (name, len(value)))
if name.lower() == "content-length":
resp.length = int(value)
if not resp.length:
resp.body = False
elif (name.lower() == "transfer-encoding" and
value.lower() == "chunked"):
resp.chunked = True
resp.body = True
elif (name.lower() in ["proxy-connection", "connection"] and
value.lower() == "close"):
resp.close = True
return resp
def do_proxy_type(self):
# Connect to proxy
if not hasattr(self, "proxy_address"):
if not self.do_socket_connect():
return Response(408), None
State.proxy_type_lock.acquire()
try:
# Read State.proxy_type only once and use value for function return
# if it is not None; State.proxy_type should only be read here to
# avoid getting None after successfully identifying the proxy type
# if another thread clears it with load_proxy
proxy_type = State.proxy_type.get(self.proxy_address, State.auth)
if proxy_type is None:
# New proxy, don't know type yet
dprint("Searching proxy type")
resp = self.do_socket()
proxy_auth = ""
for header in resp.headers:
if header[0].lower() == "proxy-authenticate":
proxy_auth += header[1] + " "
for auth in proxy_auth.split():
auth = auth.upper()
if auth in ["NTLM", "KERBEROS", "NEGOTIATE", "BASIC"]:
proxy_type = auth
break
if proxy_type is not None:
# Writing State.proxy_type only once but use local variable
# as return value to avoid losing the query result (for the
# current request) by clearing State.proxy_type in load_proxy
State.proxy_type[self.proxy_address] = proxy_type
dprint("Auth mechanisms: " + proxy_auth)
dprint("Selected: " + str(self.proxy_address) + ": " +
str(proxy_type))
return resp, proxy_type
return Response(407), proxy_type
finally:
State.proxy_type_lock.release()
def do_transaction(self):
dprint("Entering")
ipport = self.get_destination()
if ipport not in [False, True]:
dprint("Skipping auth proxying")
resp = self.do_socket(destination=ipport)
elif ipport:
# Get proxy type directly from do_proxy_type instead by accessing
# State.proxy_type do avoid a race condition with clearing
# State.proxy_type in load_proxy which sometimes led to a proxy type
# of None (clearing State.proxy_type in one thread was done after
# another thread's do_proxy_type but before accessing
# State.proxy_type in the second thread)
resp, proxy_type = self.do_proxy_type()
if resp.code == 407:
# Unknown auth mechanism
if proxy_type is None:
dprint("Unknown auth mechanism expected")
return resp
# Generate auth message
ntlm = AuthMessageGenerator(proxy_type, self.proxy_address[0])
ntlm_resp = ntlm.get_response()
if ntlm_resp is None:
dprint("Bad auth response")
return Response(503)
self.fwd_data(resp, flush=True)
hconnection = ""
for i in ["connection", "Connection"]:
if i in self.headers:
hconnection = self.headers[i]
del self.headers[i]
dprint("Remove header %s: %s" % (i, hconnection))
# Send auth message
resp = self.do_socket({
"Proxy-Authorization": "%s %s" % (proxy_type, ntlm_resp),
"Proxy-Connection": "Keep-Alive"
})
if resp.code == 407:
dprint("Auth required")
ntlm_challenge = ""
for header in resp.headers:
if (header[0].lower() == "proxy-authenticate" and
proxy_type in header[1].upper()):
h = header[1].split()
if len(h) == 2:
ntlm_challenge = h[1]
break
if ntlm_challenge:
dprint("Challenged")
ntlm_resp = ntlm.get_response(ntlm_challenge)
if ntlm_resp is None:
dprint("Bad auth response")
return Response(503)
self.fwd_data(resp, flush=True)
if hconnection != "":
self.headers["Connection"] = hconnection
dprint("Restore header Connection: " + hconnection)
# Reply to challenge
resp = self.do_socket({
"Proxy-Authorization": "%s %s" % (
proxy_type, ntlm_resp)
})
else:
dprint("Didn't get challenge, auth didn't work")
else:
dprint("No auth required cached")
else:
dprint("No auth required")
else:
dprint("No proxy server specified and not in noproxy list")
return Response(501)
return resp
def do_HEAD(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_PAC(self):
resp = Response(404)
if State.proxy_mode in [MODE_PAC, MODE_CONFIG_PAC]:
pac = State.pac
if "file://" in State.pac:
pac = file_url_to_local_path(State.pac)
dprint(pac)
try:
resp.code = 200
with open(pac) as p:
resp.data = p.read().encode("utf-8")
resp.body = True
resp.headers = [
("Content-Length", len(resp.data)),
("Content-Type", "application/x-ns-proxy-autoconfig")
]
except:
traceback.print_exc(file=sys.stdout)
return resp
def do_GET(self):
dprint("Entering")
dprint("Path = " + self.path)
if "/PxPACFile.pac" in self.path:
resp = self.do_PAC()
else:
resp = self.do_transaction()
if resp.code >= 400:
dprint("Error %d" % resp.code)
self.fwd_resp(resp)
dprint("Done")
def do_POST(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_PUT(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_DELETE(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_PATCH(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_CONNECT(self):
dprint("Entering")
for i in ["connection", "Connection"]:
if i in self.headers:
del self.headers[i]
dprint("Remove header " + i)
cl = 0
cs = 0
resp = self.do_transaction()
if resp.code >= 400:
dprint("Error %d" % resp.code)
self.fwd_resp(resp)
else:
# Proxy connection may be already closed due to header
# (Proxy-)Connection: close received from proxy -> forward this to
# the client
if self.proxy_socket is None:
dprint("Proxy connection closed")
self.send_response(200, "True")
self.send_header("Proxy-Connection", "close")
self.end_headers()
else:
dprint("Tunneling through proxy")
self.send_response(200, "Connection established")
self.send_header("Proxy-Agent", self.version_string())
self.end_headers()
# sockets will be removed from these lists, when they are
# detected as closed by remote host; wlist contains sockets
# only when data has to be written
rlist = [self.connection, self.proxy_socket]
wlist = []
# data to be written to client connection and proxy socket
cdata = []
sdata = []
idle = State.config.getint("settings", "idle")
max_idle = time.time() + idle
while not State.exit and (rlist or wlist):
(ins, outs, exs) = select.select(rlist, wlist, rlist, idle)
if exs:
break
if ins:
for i in ins:
if i is self.proxy_socket:
out = self.connection
wdata = cdata
source = "proxy"
else:
out = self.proxy_socket
wdata = sdata
source = "client"
data = i.recv(4096)
if data:
cl += len(data)
# Prepare data to send it later in outs section
wdata.append(data)
if out not in outs:
outs.append(out)
max_idle = time.time() + idle
else:
# No data means connection closed by remote host
dprint("Connection closed by %s" % source)
# Because tunnel is closed on one end there is
# no need to read from both ends
del rlist[:]
# Do not write anymore to the closed end
if i in wlist:
wlist.remove(i)
if i in outs:
outs.remove(i)
if outs:
for o in outs:
if o is self.proxy_socket:
wdata = sdata
else:
wdata = cdata
data = wdata[0]
# socket.send() may sending only a part of the data
# (as documentation says). To ensure sending all data
bsnt = o.send(data)
if bsnt > 0:
if bsnt < len(data):
# Not all data was sent; store data not
# sent and ensure select() get's it when
# the socket can be written again
wdata[0] = data[bsnt:]
if o not in wlist:
wlist.append(o)
else:
wdata.pop(0)
if not data and o in wlist:
wlist.remove(o)
cs += bsnt
else:
dprint("No data sent")
max_idle = time.time() + idle
if max_idle < time.time():
# No data in timeout seconds
dprint("Proxy connection timeout")
break
# After serving the proxy tunnel it could not be used for samething else.
# A proxy doesn't really know, when a proxy tunnnel isn't needed any
# more (there is no content length for data). So servings will be ended
# either after timeout seconds without data transfer or when at least
# one side closes the connection. Close both proxy and client
# connection if still open.
if self.proxy_socket is not None:
dprint("Cleanup proxy connection")
self.proxy_socket.shutdown(socket.SHUT_WR)
self.proxy_socket.close()
self.proxy_socket = None
self.close_connection = True
dprint("%d bytes read, %d bytes written" % (cl, cs))
dprint("Done")
def fwd_data(self, resp, flush=False):
cl = resp.length
dprint("Reading response data")
if resp.body:
if cl:
dprint("Content length %d" % cl)
while cl > 0:
if cl > 4096:
l = 4096
cl -= l
else:
l = cl
cl = 0
d = self.proxy_fp.read(l)
if not flush:
self.wfile.write(d)
elif resp.chunked:
dprint("Chunked encoding")
while not State.exit:
line = self.proxy_fp.readline(State.max_line)
if not flush:
self.wfile.write(line)
line = line.decode("utf-8").strip()
if not len(line):
dprint("Blank chunk size")
break
else:
try:
csize = int(line, 16) + 2
dprint("Chunk of size %d" % csize)
except ValueError:
dprint("Bad chunk size '%s'" % line)
continue
d = self.proxy_fp.read(csize)
if not flush:
self.wfile.write(d)
if csize == 2:
dprint("No more chunks")
break
if len(d) < csize:
dprint("Chunk size doesn't match data")
break
elif resp.data is not None:
dprint("Sending data string")
if not flush:
self.wfile.write(resp.data)
else:
dprint("Not sure how much")
while not State.exit:
time.sleep(0.1)
d = self.proxy_fp.read(1024)
if not flush:
self.wfile.write(d)
if len(d) < 1024:
break
if resp.close and self.proxy_socket:
dprint("Close proxy connection per header")
self.proxy_socket.close()
self.proxy_socket = None
def fwd_resp(self, resp):
dprint("Entering")
self.send_response(resp.code)
for header in resp.headers:
dprint("Returning %s: %s" % (header[0], header[1]))
self.send_header(header[0], header[1])
self.end_headers()
self.fwd_data(resp)
dprint("Done")
def get_destination(self):
netloc = self.path
path = "/"
if self.command != "CONNECT":
parse = urlparse.urlparse(self.path, allow_fragments=False)
if parse.netloc:
netloc = parse.netloc
if ":" not in netloc:
port = parse.port
if not port:
if parse.scheme == "http":
port = 80
elif parse.scheme == "https":
port = 443
elif parse.scheme == "ftp":
port = 21
netloc = netloc + ":" + str(port)
path = parse.path or "/"
if parse.params:
path = path + ";" + parse.params
if parse.query:
path = path + "?" + parse.query
dprint(netloc)
# Check destination for noproxy first, before doing any expensive stuff
# possibly involving connections
if State.noproxy.size:
addr = []
spl = netloc.split(":", 1)
try:
addr = socket.getaddrinfo(spl[0], int(spl[1]))
except socket.gaierror:
# Couldn't resolve, let parent proxy try, #18
dprint("Couldn't resolve host")
if len(addr) and len(addr[0]) == 5:
ipport = addr[0][4]
dprint("%s => %s + %s" % (self.path, ipport, path))
if ipport[0] in State.noproxy:
dprint("Direct connection from noproxy configuration")
self.path = path
return ipport
# Get proxy mode and servers straight from load_proxy to avoid
# threading issues
(proxy_mode, self.proxy_servers) = load_proxy()
if proxy_mode in [MODE_AUTO, MODE_PAC, MODE_CONFIG_PAC]:
proxy_str = find_proxy_for_url(
("https://" if "://" not in self.path else "") + self.path)
if proxy_str == "DIRECT":
ipport = netloc.split(":")
ipport[1] = int(ipport[1])
dprint("Direct connection from PAC")
self.path = path
return tuple(ipport)
if proxy_str:
dprint("Proxy from PAC = " + str(proxy_str))
# parse_proxy does not modify State.proxy_server any more,
# it returns the proxy server tuples instead, because proxy_str
# contains only the proxy servers for URL served by this thread
self.proxy_servers = parse_proxy(proxy_str)
return True if self.proxy_servers else False
###
# Multi-processing and multi-threading
def get_host_ips():
localips = [ip[4][0] for ip in socket.getaddrinfo(
socket.gethostname(), 80, socket.AF_INET)]
localips.insert(0, "127.0.0.1")
return localips
class PoolMixIn(socketserver.ThreadingMixIn):
def process_request(self, request, client_address):
self.pool.submit(self.process_request_thread, request, client_address)
def verify_request(self, request, client_address):
dprint("Client address: %s" % client_address[0])
if client_address[0] in State.allow:
return True
if State.hostonly and client_address[0] in get_host_ips():
dprint("Host-only IP allowed")
return True
dprint("Client not allowed: %s" % client_address[0])
return False
class ThreadedTCPServer(PoolMixIn, socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass,
bind_and_activate=True):
socketserver.TCPServer.__init__(self, server_address,
RequestHandlerClass, bind_and_activate)
try:
# Workaround bad thread naming code in Python 3.6+, fixed in master
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=State.config.getint("settings", "threads"),
thread_name_prefix="Thread")
except:
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=State.config.getint("settings", "threads"))
def print_banner():
pprint("Serving at %s:%d proc %s" % (
State.config.get("proxy", "listen").strip(),
State.config.getint("proxy", "port"),
multiprocessing.current_process().name)
)
if getattr(sys, "frozen", False) != False or "pythonw.exe" in sys.executable:
if State.config.getint("settings", "foreground") == 0:
detach_console()
for section in State.config.sections():
for option in State.config.options(section):
dprint(section + ":" + option + " = " + State.config.get(
section, option))
def serve_forever(httpd):
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
httpd.serve_forever()
except KeyboardInterrupt:
dprint("Exiting")
State.exit = True
httpd.shutdown()
def start_worker(pipeout):
parse_config()
httpd = ThreadedTCPServer((
State.config.get("proxy", "listen").strip(),
State.config.getint("proxy", "port")), Proxy, bind_and_activate=False)
mainsock = socket.fromshare(pipeout.recv())
httpd.socket = mainsock
print_banner()
serve_forever(httpd)
def run_pool():
try:
httpd = ThreadedTCPServer((State.config.get("proxy", "listen").strip(),
State.config.getint("proxy", "port")), Proxy)
except OSError as exc:
if "attempt was made" in str(exc):
print("Px failed to start - port in use")
else:
pprint(exc)
return
mainsock = httpd.socket
print_banner()
if hasattr(socket, "fromshare"):
workers = State.config.getint("settings", "workers")
for i in range(workers-1):
(pipeout, pipein) = multiprocessing.Pipe()
p = multiprocessing.Process(target=start_worker, args=(pipeout,))
p.daemon = True
p.start()
while p.pid is None:
time.sleep(1)
pipein.send(mainsock.share(p.pid))
serve_forever(httpd)
###
# Proxy detection
class WINHTTP_CURRENT_USER_IE_PROXY_CONFIG(ctypes.Structure):
_fields_ = [("fAutoDetect", ctypes.wintypes.BOOL),
# "Automatically detect settings"
("lpszAutoConfigUrl", ctypes.wintypes.LPWSTR),
# "Use automatic configuration script, Address"
("lpszProxy", ctypes.wintypes.LPWSTR),
# "1.2.3.4:5" if "Use the same proxy server for all protocols",
# else advanced
# "ftp=1.2.3.4:5;http=1.2.3.4:5;https=1.2.3.4:5;socks=1.2.3.4:5"
("lpszProxyBypass", ctypes.wintypes.LPWSTR),
# ";"-separated list
# "Bypass proxy server for local addresses" adds "<local>"
]
class WINHTTP_AUTOPROXY_OPTIONS(ctypes.Structure):
_fields_ = [("dwFlags", ctypes.wintypes.DWORD),
("dwAutoDetectFlags", ctypes.wintypes.DWORD),
("lpszAutoConfigUrl", ctypes.wintypes.LPCWSTR),
("lpvReserved", ctypes.c_void_p),
("dwReserved", ctypes.wintypes.DWORD),
("fAutoLogonIfChallenged", ctypes.wintypes.BOOL), ]
class WINHTTP_PROXY_INFO(ctypes.Structure):
_fields_ = [("dwAccessType", ctypes.wintypes.DWORD),
("lpszProxy", ctypes.wintypes.LPCWSTR),
("lpszProxyBypass", ctypes.wintypes.LPCWSTR), ]
# Parameters for WinHttpOpen, http://msdn.microsoft.com/en-us/library/aa384098(VS.85).aspx
WINHTTP_NO_PROXY_NAME = 0
WINHTTP_NO_PROXY_BYPASS = 0
WINHTTP_FLAG_ASYNC = 0x10000000
# dwFlags values
WINHTTP_AUTOPROXY_AUTO_DETECT = 0x00000001
WINHTTP_AUTOPROXY_CONFIG_URL = 0x00000002
# dwAutoDetectFlags values
WINHTTP_AUTO_DETECT_TYPE_DHCP = 0x00000001
WINHTTP_AUTO_DETECT_TYPE_DNS_A = 0x00000002
# dwAccessType values
WINHTTP_ACCESS_TYPE_DEFAULT_PROXY = 0
WINHTTP_ACCESS_TYPE_NO_PROXY = 1
WINHTTP_ACCESS_TYPE_NAMED_PROXY = 3
WINHTTP_ACCESS_TYPE_AUTOMATIC_PROXY = 4
# Error messages
WINHTTP_ERROR_WINHTTP_UNABLE_TO_DOWNLOAD_SCRIPT = 12167
def winhttp_find_proxy_for_url(
url, autodetect=False, pac_url=None, autologon=True):
# Fix issue #51
ACCESS_TYPE = WINHTTP_ACCESS_TYPE_AUTOMATIC_PROXY
if WIN_VERSION < 6.3:
ACCESS_TYPE = WINHTTP_ACCESS_TYPE_DEFAULT_PROXY
ctypes.windll.winhttp.WinHttpOpen.restype = ctypes.c_void_p
hInternet = ctypes.windll.winhttp.WinHttpOpen(
ctypes.wintypes.LPCWSTR("Px"),
ACCESS_TYPE, WINHTTP_NO_PROXY_NAME,
WINHTTP_NO_PROXY_BYPASS, WINHTTP_FLAG_ASYNC)
if not hInternet:
dprint("WinHttpOpen failed: " + str(ctypes.GetLastError()))
return ""
autoproxy_options = WINHTTP_AUTOPROXY_OPTIONS()
if pac_url:
autoproxy_options.dwFlags = WINHTTP_AUTOPROXY_CONFIG_URL
autoproxy_options.dwAutoDetectFlags = 0
autoproxy_options.lpszAutoConfigUrl = pac_url
elif autodetect:
autoproxy_options.dwFlags = WINHTTP_AUTOPROXY_AUTO_DETECT
autoproxy_options.dwAutoDetectFlags = (
WINHTTP_AUTO_DETECT_TYPE_DHCP | WINHTTP_AUTO_DETECT_TYPE_DNS_A)
autoproxy_options.lpszAutoConfigUrl = 0
else:
return ""
autoproxy_options.fAutoLogonIfChallenged = autologon
proxy_info = WINHTTP_PROXY_INFO()
# Fix issue #43
ctypes.windll.winhttp.WinHttpGetProxyForUrl.argtypes = [ctypes.c_void_p,
ctypes.wintypes.LPCWSTR, ctypes.POINTER(WINHTTP_AUTOPROXY_OPTIONS),
ctypes.POINTER(WINHTTP_PROXY_INFO)]
ok = ctypes.windll.winhttp.WinHttpGetProxyForUrl(
hInternet, ctypes.wintypes.LPCWSTR(url),
ctypes.byref(autoproxy_options), ctypes.byref(proxy_info))
if not ok:
error = ctypes.GetLastError()
dprint("WinHttpGetProxyForUrl error %s" % error)
if error == WINHTTP_ERROR_WINHTTP_UNABLE_TO_DOWNLOAD_SCRIPT:
dprint("Could not download PAC file, trying DIRECT instead")
return "DIRECT"
return ""
if proxy_info.dwAccessType == WINHTTP_ACCESS_TYPE_NAMED_PROXY:
# Note: proxy_info.lpszProxyBypass makes no sense here!
if not proxy_info.lpszProxy:
dprint('WinHttpGetProxyForUrl named proxy without name')
return ""
return proxy_info.lpszProxy.replace(" ", ",").replace(";", ",").replace(
",DIRECT", "") # Note: We only see the first!
if proxy_info.dwAccessType == WINHTTP_ACCESS_TYPE_NO_PROXY:
return "DIRECT"
# WinHttpCloseHandle()
dprint("WinHttpGetProxyForUrl accesstype %s" % (proxy_info.dwAccessType,))
return ""
def file_url_to_local_path(file_url):
parts = urlparse.urlparse(file_url)
path = urlparse.unquote(parts.path)
if path.startswith('/') and not path.startswith('//'):
if len(parts.netloc) == 2 and parts.netloc[1] == ':':
return parts.netloc + path
return 'C:' + path
if len(path) > 2 and path[1] == ':':
return path
def load_proxy(quiet=False):
# Return if proxies specified in Px config
if State.proxy_mode in [MODE_CONFIG, MODE_CONFIG_PAC]:
return (State.proxy_mode, State.proxy_server)
# Do locking to avoid updating globally shared State object by multiple
# threads simultaneously
State.proxy_mode_lock.acquire()
try:
proxy_mode = State.proxy_mode
proxy_servers = State.proxy_server
# Check if need to refresh
if (State.proxy_refresh is not None and
time.time() - State.proxy_refresh <
State.config.getint("settings", "proxyreload")):
if not quiet:
dprint("Skip proxy refresh")
return (proxy_mode, proxy_servers)
# Start with clean proxy mode and server list
proxy_mode = MODE_NONE
proxy_servers = []
# Get proxy info from Internet Options
ie_proxy_config = WINHTTP_CURRENT_USER_IE_PROXY_CONFIG()
ok = ctypes.windll.winhttp.WinHttpGetIEProxyConfigForCurrentUser(
ctypes.byref(ie_proxy_config))
if not ok:
if not quiet:
dprint(ctypes.GetLastError())
else:
if ie_proxy_config.fAutoDetect:
proxy_mode = MODE_AUTO
elif ie_proxy_config.lpszAutoConfigUrl:
State.pac = ie_proxy_config.lpszAutoConfigUrl
proxy_mode = MODE_PAC
if not quiet:
dprint("AutoConfigURL = " + State.pac)
else:
# Manual proxy
proxies = []
proxies_str = ie_proxy_config.lpszProxy or ""
for proxy_str in proxies_str.lower().replace(
' ', ';').split(';'):
if '=' in proxy_str:
scheme, proxy = proxy_str.split('=', 1)
if scheme.strip() != "ftp":
proxies.append(proxy)
elif proxy_str:
proxies.append(proxy_str)
if proxies:
proxy_servers = parse_proxy(",".join(proxies))
proxy_mode = MODE_MANUAL
# Proxy exceptions into noproxy
bypass_str = ie_proxy_config.lpszProxyBypass or "" # FIXME: Handle "<local>"
bypasses = [h.strip() for h in bypass_str.lower().replace(
' ', ';').split(';')]
for bypass in bypasses:
try:
ipns = netaddr.IPGlob(bypass)
State.noproxy.add(ipns)
if not quiet:
dprint("Noproxy += " + bypass)
except:
State.noproxy_hosts.append(bypass)
if not quiet:
dprint("Noproxy hostname += " + bypass)
State.proxy_refresh = time.time()
if not quiet:
dprint("Proxy mode = " + str(proxy_mode))
State.proxy_mode = proxy_mode
State.proxy_server = proxy_servers
# Clear proxy types on proxy server update
State.proxy_type = {}
finally:
State.proxy_mode_lock.release()
return (proxy_mode, proxy_servers)
def find_proxy_for_url(url):
proxy_str = ""
if State.proxy_mode == MODE_AUTO:
proxy_str = winhttp_find_proxy_for_url(url, autodetect=True)
elif State.proxy_mode in [MODE_PAC, MODE_CONFIG_PAC]:
pac = State.pac
if "file://" in State.pac or not State.pac.startswith("http"):
host = State.config.get("proxy", "listen") or "localhost"
port = State.config.getint("proxy", "port")
pac = "http://%s:%d/PxPACFile.pac" % (host, port)
dprint("PAC URL is local: " + pac)
proxy_str = winhttp_find_proxy_for_url(url, pac_url=pac)
# Handle edge case if the result is a list that starts with DIRECT. Assume
# everything should be direct as the string DIRECT is tested explicitly in
# get_destination
if proxy_str.startswith("DIRECT,"):
proxy_str = "DIRECT"
# If the proxy_str it still empty at this point, then there is no proxy
# configured. Try to do a direct connection.
if proxy_str == "":
proxy_str = "DIRECT"
dprint("Proxy found: " + proxy_str)
return proxy_str
###
# Parse settings and command line
def parse_proxy(proxystrs):
if not proxystrs:
return []
servers = []
for proxystr in [i.strip() for i in proxystrs.split(",")]:
pserver = [i.strip() for i in proxystr.split(":")]
if len(pserver) == 1:
pserver.append(80)
elif len(pserver) == 2:
try:
pserver[1] = int(pserver[1])
except ValueError:
pprint("Bad proxy server port: " + pserver[1])
sys.exit()
else:
pprint("Bad proxy server definition: " + proxystr)
sys.exit()
if tuple(pserver) not in servers:
servers.append(tuple(pserver))
return servers
def parse_ip_ranges(iprangesconfig):
ipranges = netaddr.IPSet([])
iprangessplit = [i.strip() for i in iprangesconfig.split(",")]
for iprange in iprangessplit:
if not iprange:
continue
try:
if "-" in iprange:
spl = iprange.split("-", 1)
ipns = netaddr.IPRange(spl[0], spl[1])
elif "*" in iprange:
ipns = netaddr.IPGlob(iprange)
else:
ipns = netaddr.IPNetwork(iprange)
ipranges.add(ipns)
except:
pprint("Bad IP definition: %s" % iprangesconfig)
sys.exit()
return ipranges
def parse_allow(allow):
State.allow = parse_ip_ranges(allow)
def parse_noproxy(noproxy):
State.noproxy = parse_ip_ranges(noproxy)
def set_useragent(useragent):
State.useragent = useragent
def set_username(username):
ud = username.split("\\")
if len(ud) == 2:
State.username = ud[1]
State.domain = ud[0]
else:
State.username = username
def set_pac(pac):
if pac == "":
return
pacproxy = False
if pac.startswith("http"):
pacproxy = True
elif pac.startswith("file"):
pac = file_url_to_local_path(pac)
if os.path.exists(pac):
pacproxy = True
if pacproxy:
State.pac = pac
else:
pprint("Unsupported PAC location or file not found: %s" % pac)
sys.exit()
def set_auth(auth):
if auth.upper() not in ["NTLM", "KERBEROS", "BASIC", ""]:
pprint("Bad proxy auth type: %s" % auth)
sys.exit()
if auth != "":
State.auth = auth
def cfg_int_init(section, name, default, override=False):
val = default
if not override:
try:
val = State.config.get(section, name).strip()
except configparser.NoOptionError:
pass
try:
val = int(val)
except ValueError:
pprint("Invalid integer value for " + section + ":" + name)
State.config.set(section, name, str(val))
def cfg_float_init(section, name, default, override=False):
val = default
if not override:
try:
val = State.config.get(section, name).strip()
except configparser.NoOptionError:
pass
try:
val = float(val)
except ValueError:
pprint("Invalid float value for " + section + ":" + name)
State.config.set(section, name, str(val))
def cfg_str_init(section, name, default, proc=None, override=False):
val = default
if not override:
try:
val = State.config.get(section, name).strip()
except configparser.NoOptionError:
pass
State.config.set(section, name, val)
if proc != None:
proc(val)
def save():
with open(State.ini, "w") as cfgfile:
State.config.write(cfgfile)
pprint("Saved config to " + State.ini + "\n")
with open(State.ini, "r") as cfgfile:
sys.stdout.write(cfgfile.read())
sys.exit()
def parse_config():
if "--debug" in sys.argv:
State.logger = Log(dfile(), "w")
if getattr(sys, "frozen", False) != False or "pythonw.exe" in sys.executable:
attach_console()
if "-h" in sys.argv or "--help" in sys.argv:
pprint(HELP)
sys.exit()
# Load configuration file
State.config = configparser.ConfigParser()
State.ini = os.path.join(os.path.dirname(get_script_path()), State.ini)
for i in range(len(sys.argv)):
if "=" in sys.argv[i]:
val = sys.argv[i].split("=")[1]
if "--config=" in sys.argv[i]:
State.ini = val
if not os.path.exists(val) and "--save" not in sys.argv:
pprint("Could not find config file: " + val)
sys.exit()
if os.path.exists(State.ini):
State.config.read(State.ini)
# [proxy] section
if "proxy" not in State.config.sections():
State.config.add_section("proxy")
cfg_str_init("proxy", "server", "")
cfg_str_init("proxy", "pac", "", set_pac)
cfg_int_init("proxy", "port", "3128")
cfg_str_init("proxy", "listen", "127.0.0.1")
cfg_str_init("proxy", "allow", "*.*.*.*", parse_allow)
cfg_int_init("proxy", "gateway", "0")
cfg_int_init("proxy", "hostonly", "0")
cfg_str_init("proxy", "noproxy", "", parse_noproxy)
cfg_str_init("proxy", "useragent", "", set_useragent)
cfg_str_init("proxy", "username", "", set_username)
cfg_str_init("proxy", "auth", "", set_auth)
# [settings] section
if "settings" not in State.config.sections():
State.config.add_section("settings")
cfg_int_init("settings", "workers", "2")
cfg_int_init("settings", "threads", "5")
cfg_int_init("settings", "idle", "30")
cfg_float_init("settings", "socktimeout", "20.0")
cfg_int_init("settings", "proxyreload", "60")
cfg_int_init("settings", "foreground", "0")
cfg_int_init("settings", "log", "0" if State.logger is None else "1")
if State.config.get("settings", "log") == "1" and State.logger is None:
State.logger = Log(dfile(), "w")
# Command line flags
for i in range(len(sys.argv)):
if "=" in sys.argv[i]:
val = sys.argv[i].split("=")[1]
if "--proxy=" in sys.argv[i] or "--server=" in sys.argv[i]:
cfg_str_init("proxy", "server", val, None, True)
elif "--pac=" in sys.argv[i]:
cfg_str_init("proxy", "pac", val, set_pac, True)
elif "--listen=" in sys.argv[i]:
cfg_str_init("proxy", "listen", val, None, True)
elif "--port=" in sys.argv[i]:
cfg_int_init("proxy", "port", val, True)
elif "--allow=" in sys.argv[i]:
cfg_str_init("proxy", "allow", val, parse_allow, True)
elif "--noproxy=" in sys.argv[i]:
cfg_str_init("proxy", "noproxy", val, parse_noproxy, True)
elif "--useragent=" in sys.argv[i]:
cfg_str_init("proxy", "useragent", val, set_useragent, True)
elif "--username=" in sys.argv[i]:
cfg_str_init("proxy", "username", val, set_username, True)
elif "--auth=" in sys.argv[i]:
cfg_str_init("proxy", "auth", val, set_auth, True)
else:
for j in ["workers", "threads", "idle", "proxyreload"]:
if "--" + j + "=" in sys.argv[i]:
cfg_int_init("settings", j, val, True)
for j in ["socktimeout"]:
if "--" + j + "=" in sys.argv[i]:
cfg_float_init("settings", j, val, True)
if "--gateway" in sys.argv:
cfg_int_init("proxy", "gateway", "1", True)
if "--hostonly" in sys.argv:
cfg_int_init("proxy", "hostonly", "1", True)
if "--foreground" in sys.argv:
cfg_int_init("settings", "foreground", "1", True)
###
# Dependency propagation
# If gateway mode
if State.config.getint("proxy", "gateway") == 1:
# Listen on all interfaces
cfg_str_init("proxy", "listen", "", None, True)
# If hostonly mode
if State.config.getint("proxy", "hostonly") == 1:
State.hostonly = True
# Listen on all interfaces
cfg_str_init("proxy", "listen", "", None, True)
# If not gateway mode or gateway with default allow rules
if (State.config.getint("proxy", "gateway") == 0 or
(State.config.getint("proxy", "gateway") == 1 and
State.config.get("proxy", "allow") in [
"*.*.*.*", "0.0.0.0/0"])):
# Purge allow rules
cfg_str_init("proxy", "allow", "", parse_allow, True)
State.proxy_server = parse_proxy(State.config.get("proxy", "server"))
if "--install" in sys.argv:
install()
elif "--uninstall" in sys.argv:
uninstall()
elif "--quit" in sys.argv:
quit()
elif "--save" in sys.argv:
save()
if State.proxy_server:
State.proxy_mode = MODE_CONFIG
elif State.pac:
State.proxy_mode = MODE_CONFIG_PAC
else:
load_proxy(quiet=True)
if State.proxy_mode == MODE_NONE and not State.config.get(
"proxy", "noproxy"):
pprint("No proxy server or noproxy list defined")
sys.exit()
socket.setdefaulttimeout(State.config.getfloat("settings", "socktimeout"))
###
# Exit related
def quit(force=False):
count = 0
mypids = [os.getpid(), os.getppid()]
for pid in sorted(psutil.pids(), reverse=True):
if pid in mypids:
continue
try:
p = psutil.Process(pid)
if p.exe().lower() == sys.executable.lower():
count += 1
if force:
p.kill()
else:
p.send_signal(signal.CTRL_C_EVENT)
except (psutil.AccessDenied, psutil.NoSuchProcess, PermissionError, SystemError):
pass
except:
traceback.print_exc(file=sys.stdout)
if count != 0:
if force:
sys.stdout.write(".")
else:
sys.stdout.write("Quitting Px ..")
time.sleep(4)
sys.stdout.flush()
quit(True)
else:
if force:
pprint(" DONE")
else:
pprint("Px is not running")
sys.exit()
def handle_exceptions(extype, value, tb):
# Create traceback log
lst = (traceback.format_tb(tb, None) +
traceback.format_exception_only(extype, value))
tracelog = '\nTraceback (most recent call last):\n' + "%-20s%s\n" % (
"".join(lst[:-1]), lst[-1])
if State.logger != None:
pprint(tracelog)
else:
sys.stderr.write(tracelog)
# Save to debug.log
dbg = open(dfile(), 'w')
dbg.write(tracelog)
dbg.close()
###
# Install Px to startup
def get_script_path():
if getattr(sys, "frozen", False) is False:
# Script mode
return os.path.normpath(os.path.join(os.getcwd(), sys.argv[0]))
# Frozen mode
return sys.executable
def get_script_cmd():
spath = get_script_path()
if os.path.splitext(spath)[1].lower() == ".py":
return sys.executable + ' "%s"' % spath
return spath
def check_installed():
ret = True
runkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Run", 0, winreg.KEY_READ)
try:
winreg.QueryValueEx(runkey, "Px")
except:
ret = False
winreg.CloseKey(runkey)
return ret
def install():
if check_installed() is False:
runkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Run", 0,
winreg.KEY_WRITE)
winreg.SetValueEx(runkey, "Px", 0, winreg.REG_EXPAND_SZ,
get_script_cmd())
winreg.CloseKey(runkey)
pprint("Px installed successfully")
else:
pprint("Px already installed")
sys.exit()
def uninstall():
if check_installed() is True:
runkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Run", 0,
winreg.KEY_WRITE)
winreg.DeleteValue(runkey, "Px")
winreg.CloseKey(runkey)
pprint("Px uninstalled successfully")
else:
pprint("Px is not installed")
sys.exit()
###
# Attach/detach console
def attach_console():
if ctypes.windll.kernel32.GetConsoleWindow() != 0:
dprint("Already attached to a console")
return
# Find parent cmd.exe if exists
pid = os.getpid()
while True:
try:
p = psutil.Process(pid)
except psutil.NoSuchProcess:
# No such parent - started without console
pid = -1
break
if os.path.basename(p.name()).lower() in [
"cmd", "cmd.exe", "powershell", "powershell.exe"]:
# Found it
break
# Search parent
pid = p.ppid()
# Not found, started without console
if pid == -1:
dprint("No parent console to attach to")
return
dprint("Attaching to console " + str(pid))
if ctypes.windll.kernel32.AttachConsole(pid) == 0:
dprint("Attach failed with error " +
str(ctypes.windll.kernel32.GetLastError()))
return
if ctypes.windll.kernel32.GetConsoleWindow() == 0:
dprint("Not a console window")
return
reopen_stdout()
def detach_console():
if ctypes.windll.kernel32.GetConsoleWindow() == 0:
return
restore_stdout()
if not ctypes.windll.kernel32.FreeConsole():
dprint("Free console failed with error " +
str(ctypes.windll.kernel32.GetLastError()))
else:
dprint("Freed console successfully")
###
# Startup
def main():
multiprocessing.freeze_support()
sys.excepthook = handle_exceptions
parse_config()
run_pool()
if __name__ == "__main__":
main()
| 34.430563
| 152
| 0.556783
|
3fdc2a647a3512bccff693996e87dcf02307d79b
| 471
|
py
|
Python
|
app/test_health.py
|
chrisguest75/banner_service
|
ea0475e4159a84913c4d5a67a054126e82f3e440
|
[
"MIT"
] | 1
|
2021-02-23T21:59:26.000Z
|
2021-02-23T21:59:26.000Z
|
app/test_health.py
|
chrisguest75/banner_service
|
ea0475e4159a84913c4d5a67a054126e82f3e440
|
[
"MIT"
] | null | null | null |
app/test_health.py
|
chrisguest75/banner_service
|
ea0475e4159a84913c4d5a67a054126e82f3e440
|
[
"MIT"
] | null | null | null |
import pytest
import connexion
flask_app = connexion.FlaskApp(__name__, specification_dir='./openapi')
flask_app.add_api('./swagger.yaml')
flask_app.testing = True
@pytest.fixture(scope='module')
def client():
with flask_app.app.test_client() as c:
yield c
def test_health(client):
""" Test health endpoint returns 200
"""
response = client.get('/api/health')
assert response.json == "Healthy"
assert response.status_code == 200
| 21.409091
| 71
| 0.696391
|
979aae0a1b9e6111bf28f4943a9c3915918dd782
| 7,867
|
py
|
Python
|
preview_generator/utils.py
|
asweeney86/preview-generator
|
354cbac1c131ebbb81cd9cfd9b4bc0c184d10103
|
[
"MIT"
] | null | null | null |
preview_generator/utils.py
|
asweeney86/preview-generator
|
354cbac1c131ebbb81cd9cfd9b4bc0c184d10103
|
[
"MIT"
] | null | null | null |
preview_generator/utils.py
|
asweeney86/preview-generator
|
354cbac1c131ebbb81cd9cfd9b4bc0c184d10103
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from datetime import date
from datetime import datetime
from json import JSONEncoder
import os
import shutil
from subprocess import check_call
import tempfile
import typing
from PyPDF2 import PdfFileReader
from wand.version import formats as wand_supported_format
from preview_generator.extension import mimetypes_storage
LOGGER_NAME = "PreviewGenerator"
BLACKLISTED_IMAGEMAGICK_MIME = [
"image/svg+xml",
"image/svg",
"application/pdf",
"application/x-silverlight",
]
LOCKFILE_EXTENSION = ".lock"
# INFO - G.M - 2020-07-03 if another preview is created for same file,
# this is the default time preview Manager allow waiting for
# the other preview to be generated.
LOCK_DEFAULT_TIMEOUT = 20
def get_subclasses_recursively(_class: type, _seen: set = None) -> typing.Generator:
"""
itersubclasses(cls)
Generator over all subclasses of a given class, in depth first order.
>>> list(get_subclasses_recursively(int)) == [bool]
True
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in get_subclasses_recursively(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL (new-style) classes currently defined
>>> [cls.__name__ for cls in get_subclasses_recursively(object)] # doctest: +ELLIPSIS
['type', ...'tuple', ...]
"""
if not isinstance(_class, type):
raise TypeError(
"itersubclasses must be called with " "new-style classes, not %.100r" % _class
)
if _seen is None:
_seen = set()
try:
subs = _class.__subclasses__()
except TypeError: # fails only when cls is type
subs = _class.__subclasses__(_class) # type: ignore
for sub in subs:
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in get_subclasses_recursively(sub, _seen):
yield sub
class ImgDims(object):
def __init__(self, width: int, height: int) -> None:
self.width = width
self.height = height
def ratio(self) -> float:
return self.width / self.height
def __str__(self) -> str:
return "{}x{}".format(self.width, self.height)
class MimetypeMapping(object):
def __init__(self, mimetype: str, file_extension: str) -> None:
self.mimetype = mimetype
self.file_extension = file_extension
def __str__(self) -> str:
return "MimetypeMapping:{}:{}".format(self.mimetype, self.file_extension)
class CropDims(object):
def __init__(self, left: int, top: int, right: int, bottom: int) -> None:
self.left = left
self.top = top
self.right = right
self.bottom = bottom
def __str__(self) -> str:
return "({},{}) x ({},{})".format(self.left, self.top, self.right, self.bottom)
def compute_resize_dims(dims_in: ImgDims, dims_out: ImgDims) -> ImgDims:
"""
Compute resize dimensions for transforming image in format into
image out format. This is related to a crop operation which will allow
to transform ratio from image in into a given ratio.
:param dims_in:
:param dims_out:
:return:
"""
img_ratio_in = dims_in.width / dims_in.height
img_ratio_out = dims_out.width / dims_out.height
if img_ratio_in > img_ratio_out:
size_ratio = dims_out.width / dims_in.width
else:
size_ratio = dims_out.height / dims_in.height
return ImgDims(
width=round(dims_in.width * size_ratio), height=round(dims_in.height * size_ratio)
)
def compute_crop_dims(dims_in: ImgDims, dims_out: ImgDims) -> CropDims:
left = round((dims_in.width / 2) - (dims_out.width / 2))
upper = round((dims_in.height / 2) - (dims_out.height / 2))
right = left + dims_out.width
lower = upper + dims_out.height
return CropDims(left=left, top=upper, right=right, bottom=lower)
def executable_is_available(
executable_name: typing.Union[str, typing.List[str], typing.Tuple[str]]
) -> bool:
"""Check if an executable is available in execution environment.
:param executable_name: List or Tuple, or single command name
:return: `True` if the exec if found, `False` otherwize
"""
if isinstance(executable_name, (list, tuple)):
for _exec_name in executable_name:
print("_exec_name =", _exec_name)
if shutil.which(_exec_name) is not None:
return True
return False
return shutil.which(executable_name) is not None
class PreviewGeneratorJsonEncoder(JSONEncoder):
def default(self, obj: typing.Any) -> str:
if isinstance(obj, bytes):
try:
return obj.decode("ascii")
except: # noqa: E722
return ""
if isinstance(obj, (datetime, date)):
serial = obj.isoformat()
return serial
return JSONEncoder.default(self, obj)
def get_decrypted_pdf(
stream: typing.BinaryIO,
strict: bool = True,
warndest: typing.TextIO = None,
overwriteWarnings: bool = True,
) -> PdfFileReader:
"""
Return a PdfFileReader object decrypted with default empty key (if file is encrypted)
The signature is taken from PdfFileReader.__init__
See https://github.com/algoo/preview-generator/issues/52
which is related to https://github.com/mstamy2/PyPDF2/issues/51
:param stream:
:param strict:
:param warndest:
:param overwriteWarnings:
:return:
"""
pdf = PdfFileReader(stream, strict, warndest, overwriteWarnings)
if pdf.isEncrypted:
# TODO - D.A. - 2018-11-08 - manage password protected PDFs
password = ""
try:
pdf.decrypt(password)
except NotImplementedError:
# If not supported, try and use qpdf to decrypt with '' first.
# See https://github.com/mstamy2/PyPDF2/issues/378
# Workaround for the "NotImplementedError: only algorithm code 1 and 2 are supported" issue.
tf = tempfile.NamedTemporaryFile(
prefix="preview-generator-", suffix=".pdf", delete=False
)
tfoname = tf.name + "_decrypted.pdf"
stream.seek(0)
tf.write(stream.read())
tf.close()
if password:
check_call(["qpdf", "--password=" + password, "--decrypt", tf.name, tfoname])
else:
check_call(["qpdf", "--decrypt", tf.name, tfoname])
pdf = PdfFileReader(tfoname, strict, warndest, overwriteWarnings)
os.unlink(tf.name)
os.unlink(tfoname)
return pdf
def imagemagick_supported_mimes() -> typing.List[str]:
all_supported = wand_supported_format("*")
valid_mime = [] # type: typing.List[str]
all_imagemagick_mime_supported = [] # type: typing.List[str]
for supported in all_supported:
fake_url = "./FILE.{0}".format(supported) # Fake a url
mime, enc = mimetypes_storage.guess_type(fake_url)
if mime and mime not in all_imagemagick_mime_supported:
all_imagemagick_mime_supported.append(mime)
for mime in all_imagemagick_mime_supported:
# INFO - G.M - 2019-11-15 - we drop text file format support (no working correctly)
if mime.startswith("text/"):
continue
# INFO - G.M - 2019-11-15 - we drop video file format support (no working correctly either)
if mime.startswith("video/"):
continue
# HACK - G.M - 2019-11-15 - check if some "chemical" file can be processed as image,
# now considered them as not supported.
if mime.startswith("chemical/"):
continue
if mime in BLACKLISTED_IMAGEMAGICK_MIME:
continue
valid_mime.append(mime)
return valid_mime
| 32.643154
| 104
| 0.638744
|
e7936cc0535ab5e6e8a86881a995a539784d9a35
| 6,230
|
py
|
Python
|
homeassistant/components/sensor/openweathermap.py
|
gwendalg/home-assistant
|
fb94aaa5a1a1e125dafb681e50c18be45dfe1b19
|
[
"MIT"
] | 1
|
2017-02-12T18:44:18.000Z
|
2017-02-12T18:44:18.000Z
|
homeassistant/components/sensor/openweathermap.py
|
gwendalg/home-assistant
|
fb94aaa5a1a1e125dafb681e50c18be45dfe1b19
|
[
"MIT"
] | null | null | null |
homeassistant/components/sensor/openweathermap.py
|
gwendalg/home-assistant
|
fb94aaa5a1a1e125dafb681e50c18be45dfe1b19
|
[
"MIT"
] | 1
|
2021-02-25T10:32:08.000Z
|
2021-02-25T10:32:08.000Z
|
"""
Support for the OpenWeatherMap (OWM) service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.openweathermap/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_KEY, CONF_NAME, TEMP_CELSIUS, TEMP_FAHRENHEIT,
CONF_MONITORED_CONDITIONS)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['pyowm==2.4.0']
_LOGGER = logging.getLogger(__name__)
CONF_FORECAST = 'forecast'
DEFAULT_NAME = 'OWM'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=120)
SENSOR_TYPES = {
'weather': ['Condition', None],
'temperature': ['Temperature', None],
'wind_speed': ['Wind speed', 'm/s'],
'humidity': ['Humidity', '%'],
'pressure': ['Pressure', 'mbar'],
'clouds': ['Cloud coverage', '%'],
'rain': ['Rain', 'mm'],
'snow': ['Snow', 'mm']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_FORECAST, default=False): cv.boolean
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the OpenWeatherMap sensor."""
if None in (hass.config.latitude, hass.config.longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return False
from pyowm import OWM
SENSOR_TYPES['temperature'][1] = hass.config.units.temperature_unit
name = config.get(CONF_NAME)
forecast = config.get(CONF_FORECAST)
owm = OWM(config.get(CONF_API_KEY))
if not owm:
_LOGGER.error(
"Connection error "
"Please check your settings for OpenWeatherMap")
return False
data = WeatherData(owm, forecast, hass.config.latitude,
hass.config.longitude)
dev = []
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(OpenWeatherMapSensor(
name, data, variable, SENSOR_TYPES[variable][1]))
if forecast:
SENSOR_TYPES['forecast'] = ['Forecast', None]
dev.append(OpenWeatherMapSensor(
name, data, 'forecast', SENSOR_TYPES['temperature'][1]))
add_devices(dev)
# pylint: disable=too-few-public-methods
class OpenWeatherMapSensor(Entity):
"""Implementation of an OpenWeatherMap sensor."""
def __init__(self, name, weather_data, sensor_type, temp_unit):
"""Initialize the sensor."""
self.client_name = name
self._name = SENSOR_TYPES[sensor_type][0]
self.owa_client = weather_data
self.temp_unit = temp_unit
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
# pylint: disable=too-many-branches
def update(self):
"""Get the latest data from OWM and updates the states."""
self.owa_client.update()
data = self.owa_client.data
fc_data = self.owa_client.fc_data
if self.type == 'weather':
self._state = data.get_detailed_status()
elif self.type == 'temperature':
if self.temp_unit == TEMP_CELSIUS:
self._state = round(data.get_temperature('celsius')['temp'],
1)
elif self.temp_unit == TEMP_FAHRENHEIT:
self._state = round(data.get_temperature('fahrenheit')['temp'],
1)
else:
self._state = round(data.get_temperature()['temp'], 1)
elif self.type == 'wind_speed':
self._state = round(data.get_wind()['speed'], 1)
elif self.type == 'humidity':
self._state = round(data.get_humidity(), 1)
elif self.type == 'pressure':
self._state = round(data.get_pressure()['press'], 0)
elif self.type == 'clouds':
self._state = data.get_clouds()
elif self.type == 'rain':
if data.get_rain():
self._state = round(data.get_rain()['3h'], 0)
self._unit_of_measurement = 'mm'
else:
self._state = 'not raining'
self._unit_of_measurement = ''
elif self.type == 'snow':
if data.get_snow():
self._state = round(data.get_snow(), 0)
self._unit_of_measurement = 'mm'
else:
self._state = 'not snowing'
self._unit_of_measurement = ''
elif self.type == 'forecast':
self._state = fc_data.get_weathers()[0].get_status()
class WeatherData(object):
"""Get the latest data from OpenWeatherMap."""
def __init__(self, owm, forecast, latitude, longitude):
"""Initialize the data object."""
self.owm = owm
self.forecast = forecast
self.latitude = latitude
self.longitude = longitude
self.data = None
self.fc_data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from OpenWeatherMap."""
obs = self.owm.weather_at_coords(self.latitude, self.longitude)
if obs is None:
_LOGGER.warning('Failed to fetch data from OWM')
return
self.data = obs.get_weather()
if self.forecast == 1:
obs = self.owm.three_hours_forecast_at_coords(self.latitude,
self.longitude)
self.fc_data = obs.get_forecast()
| 33.494624
| 79
| 0.617496
|
a254617bd948f167093fcdf391f123345660ae35
| 1,247
|
py
|
Python
|
jp.atcoder/abc060/arc073_b/8237346.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/abc060/arc073_b/8237346.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/abc060/arc073_b/8237346.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
n, W = [int(x) for x in input().split()]
wv = [[int(x) for x in input().split()] for _ in range(n)] # weight, value
min_w = wv[0][0]
for i in range(n):
wv[i].insert(0, wv[i][1] / wv[i][0])
wv.sort(reverse=1) # sort in order that high value per unit weight.
sw = [0] * n # camulative sum of weights
sw[0] = wv[0][1]
sv = [0] * n # camulative sum of values
sv[0] = wv[0][2]
for i in range(n - 1):
sw[i + 1] = sw[i] + wv[i + 1][1]
sv[i + 1] = sv[i] + wv[i + 1][2]
ans = 0
for i in range(n):
if sw[i] < W:
ans = sv[i] # continuous updating
continue
elif sw[i] == W:
ans = sv[i]
break
else:
if i > 0:
d = W - sw[i - 1]
for j in range(d, min_w - 1, -1): # if d >= min_w
for k in range(i + 1, n):
if j == wv[k][1]:
ans += wv[k][2]
print(ans)
exit()
else:
for j in range(W, min_w - 1, -1):
for k in range(1, n):
if j == wv[k][1]:
ans += wv[k][2]
print(ans)
exit()
print(ans)
| 27.711111
| 76
| 0.38733
|
dcc358e323d4da561d37ecaac25b7775ee87c28f
| 148
|
py
|
Python
|
docs_src/path_operation_advanced_configuration/tutorial003.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 53,007
|
2018-12-08T10:05:29.000Z
|
2022-03-31T23:30:02.000Z
|
docs_src/path_operation_advanced_configuration/tutorial003.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 4,155
|
2019-01-05T05:07:49.000Z
|
2022-03-31T21:25:38.000Z
|
docs_src/path_operation_advanced_configuration/tutorial003.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 4,092
|
2018-12-09T16:21:00.000Z
|
2022-03-31T07:59:45.000Z
|
from fastapi import FastAPI
app = FastAPI()
@app.get("/items/", include_in_schema=False)
async def read_items():
return [{"item_id": "Foo"}]
| 16.444444
| 44
| 0.682432
|
b8cfe6e8a7df5a671babb7f0f2bfe2f2a2544940
| 13,392
|
py
|
Python
|
tests/www/views/test_views_log.py
|
jayantsande25/airflow
|
d04aa135268b8e0230be3af6598a3b18e8614c3c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-07-30T17:22:53.000Z
|
2021-08-03T13:51:15.000Z
|
tests/www/views/test_views_log.py
|
jayantsande25/airflow
|
d04aa135268b8e0230be3af6598a3b18e8614c3c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/www/views/test_views_log.py
|
jayantsande25/airflow
|
d04aa135268b8e0230be3af6598a3b18e8614c3c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-08-28T09:47:31.000Z
|
2021-08-28T09:47:31.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import logging.config
import pathlib
import sys
import tempfile
import unittest.mock
import urllib.parse
import pytest
from airflow import settings
from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG
from airflow.models import DAG, DagBag, TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import ExternalLoggingMixin
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.www.app import create_app
from tests.test_utils.config import conf_vars
from tests.test_utils.decorators import dont_initialize_flask_app_submodules
from tests.test_utils.www import client_with_login
DAG_ID = 'dag_for_testing_log_view'
DAG_ID_REMOVED = 'removed_dag_for_testing_log_view'
TASK_ID = 'task_for_testing_log_view'
DEFAULT_DATE = timezone.datetime(2017, 9, 1)
ENDPOINT = f'log?dag_id={DAG_ID}&task_id={TASK_ID}&execution_date={DEFAULT_DATE}'
@pytest.fixture(scope="module", autouse=True)
def backup_modules():
"""Make sure that the configure_logging is not cached."""
return dict(sys.modules)
@pytest.fixture(scope="module")
def log_app(backup_modules):
@dont_initialize_flask_app_submodules(
skip_all_except=["init_appbuilder", "init_jinja_globals", "init_appbuilder_views"]
)
@conf_vars({('logging', 'logging_config_class'): 'airflow_local_settings.LOGGING_CONFIG'})
def factory():
app = create_app(testing=True)
app.config["WTF_CSRF_ENABLED"] = False
settings.configure_orm()
security_manager = app.appbuilder.sm
if not security_manager.find_user(username='test'):
security_manager.add_user(
username='test',
first_name='test',
last_name='test',
email='test@fab.org',
role=security_manager.find_role('Admin'),
password='test',
)
return app
# Create a custom logging configuration
logging_config = copy.deepcopy(DEFAULT_LOGGING_CONFIG)
logging_config['handlers']['task']['base_log_folder'] = str(
pathlib.Path(__file__, "..", "..", "test_logs").resolve(),
)
logging_config['handlers']['task'][
'filename_template'
] = '{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts | replace(":", ".") }}/{{ try_number }}.log'
with tempfile.TemporaryDirectory() as settings_dir:
local_settings = pathlib.Path(settings_dir, "airflow_local_settings.py")
local_settings.write_text(f"LOGGING_CONFIG = {logging_config!r}")
sys.path.append(settings_dir)
yield factory()
sys.path.remove(settings_dir)
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
@pytest.fixture(autouse=True)
def reset_modules_after_every_test(backup_modules):
yield
# Remove any new modules imported during the test run.
# This lets us import the same source files for more than one test.
for mod in [m for m in sys.modules if m not in backup_modules]:
del sys.modules[mod]
@pytest.fixture(autouse=True)
def dags(log_app):
dag = DAG(DAG_ID, start_date=DEFAULT_DATE)
dag_removed = DAG(DAG_ID_REMOVED, start_date=DEFAULT_DATE)
bag = DagBag(include_examples=False)
bag.bag_dag(dag=dag, root_dag=dag)
bag.bag_dag(dag=dag_removed, root_dag=dag_removed)
# Since we don't want to store the code for the DAG defined in this file
with unittest.mock.patch('airflow.models.dag.DagCode.bulk_sync_to_db'):
dag.sync_to_db()
dag_removed.sync_to_db()
bag.sync_to_db()
log_app.dag_bag = bag
return dag, dag_removed
@pytest.fixture(autouse=True)
def tis(dags):
dag, dag_removed = dags
ti = TaskInstance(
task=DummyOperator(task_id=TASK_ID, dag=dag),
execution_date=DEFAULT_DATE,
)
ti.try_number = 1
ti_removed_dag = TaskInstance(
task=DummyOperator(task_id=TASK_ID, dag=dag_removed),
execution_date=DEFAULT_DATE,
)
ti_removed_dag.try_number = 1
with create_session() as session:
session.merge(ti)
session.merge(ti_removed_dag)
yield ti, ti_removed_dag
with create_session() as session:
session.query(TaskInstance).delete()
@pytest.fixture()
def log_admin_client(log_app):
return client_with_login(log_app, username="test", password="test")
@pytest.mark.parametrize(
"state, try_number, num_logs",
[
(State.NONE, 0, 0),
(State.UP_FOR_RETRY, 2, 2),
(State.UP_FOR_RESCHEDULE, 0, 1),
(State.UP_FOR_RESCHEDULE, 1, 2),
(State.RUNNING, 1, 1),
(State.SUCCESS, 1, 1),
(State.FAILED, 3, 3),
],
ids=[
"none",
"up-for-retry",
"up-for-reschedule-0",
"up-for-reschedule-1",
"running",
"success",
"failed",
],
)
def test_get_file_task_log(log_admin_client, tis, state, try_number, num_logs):
ti, _ = tis
with create_session() as session:
ti.state = state
ti.try_number = try_number
session.merge(ti)
response = log_admin_client.get(
ENDPOINT,
data={"username": "test", "password": "test"},
follow_redirects=True,
)
assert response.status_code == 200
data = response.data.decode()
assert 'Log by attempts' in data
for num in range(1, num_logs + 1):
assert f'log-group-{num}' in data
assert 'log-group-0' not in data
assert f'log-group-{num_logs + 1}' not in data
def test_get_logs_with_metadata_as_download_file(log_admin_client):
url_template = (
"get_logs_with_metadata?dag_id={}&"
"task_id={}&execution_date={}&"
"try_number={}&metadata={}&format=file"
)
try_number = 1
url = url_template.format(
DAG_ID,
TASK_ID,
urllib.parse.quote_plus(DEFAULT_DATE.isoformat()),
try_number,
"{}",
)
response = log_admin_client.get(url)
content_disposition = response.headers['Content-Disposition']
assert content_disposition.startswith('attachment')
assert f'{DAG_ID}/{TASK_ID}/{DEFAULT_DATE.isoformat()}/{try_number}.log' in content_disposition
assert 200 == response.status_code
assert 'Log for testing.' in response.data.decode('utf-8')
@unittest.mock.patch(
"airflow.utils.log.file_task_handler.FileTaskHandler.read",
side_effect=[
([[('default_log', '1st line')]], [{}]),
([[('default_log', '2nd line')]], [{'end_of_log': False}]),
([[('default_log', '3rd line')]], [{'end_of_log': True}]),
([[('default_log', 'should never be read')]], [{'end_of_log': True}]),
],
)
def test_get_logs_with_metadata_as_download_large_file(_, log_admin_client):
url_template = (
"get_logs_with_metadata?dag_id={}&"
"task_id={}&execution_date={}&"
"try_number={}&metadata={}&format=file"
)
try_number = 1
url = url_template.format(
DAG_ID,
TASK_ID,
urllib.parse.quote_plus(DEFAULT_DATE.isoformat()),
try_number,
"{}",
)
response = log_admin_client.get(url)
data = response.data.decode()
assert '1st line' in data
assert '2nd line' in data
assert '3rd line' in data
assert 'should never be read' not in data
@pytest.mark.parametrize("metadata", ["null", "{}"])
def test_get_logs_with_metadata(log_admin_client, metadata):
url_template = "get_logs_with_metadata?dag_id={}&task_id={}&execution_date={}&try_number={}&metadata={}"
response = log_admin_client.get(
url_template.format(
DAG_ID,
TASK_ID,
urllib.parse.quote_plus(DEFAULT_DATE.isoformat()),
1,
metadata,
),
data={"username": "test", "password": "test"},
follow_redirects=True,
)
assert 200 == response.status_code
data = response.data.decode()
assert '"message":' in data
assert '"metadata":' in data
assert 'Log for testing.' in data
@unittest.mock.patch(
"airflow.utils.log.file_task_handler.FileTaskHandler.read",
return_value=(['airflow log line'], [{'end_of_log': True}]),
)
def test_get_logs_with_metadata_for_removed_dag(_, log_admin_client):
url_template = "get_logs_with_metadata?dag_id={}&task_id={}&execution_date={}&try_number={}&metadata={}"
response = log_admin_client.get(
url_template.format(
DAG_ID_REMOVED,
TASK_ID,
urllib.parse.quote_plus(DEFAULT_DATE.isoformat()),
1,
"{}",
),
data={"username": "test", "password": "test"},
follow_redirects=True,
)
assert 200 == response.status_code
data = response.data.decode()
assert '"message":' in data
assert '"metadata":' in data
assert 'airflow log line' in data
def test_get_logs_response_with_ti_equal_to_none(log_admin_client):
url_template = (
"get_logs_with_metadata?dag_id={}&"
"task_id={}&execution_date={}&"
"try_number={}&metadata={}&format=file"
)
try_number = 1
url = url_template.format(
DAG_ID,
'Non_Existing_ID',
urllib.parse.quote_plus(DEFAULT_DATE.isoformat()),
try_number,
"{}",
)
response = log_admin_client.get(url)
data = response.json
assert 'message' in data
assert 'error' in data
assert "*** Task instance did not exist in the DB\n" == data['message']
def test_get_logs_with_json_response_format(log_admin_client):
url_template = (
"get_logs_with_metadata?dag_id={}&"
"task_id={}&execution_date={}&"
"try_number={}&metadata={}&format=json"
)
try_number = 1
url = url_template.format(
DAG_ID,
TASK_ID,
urllib.parse.quote_plus(DEFAULT_DATE.isoformat()),
try_number,
"{}",
)
response = log_admin_client.get(url)
assert 200 == response.status_code
assert 'message' in response.json
assert 'metadata' in response.json
assert 'Log for testing.' in response.json['message'][0][1]
@unittest.mock.patch("airflow.www.views.TaskLogReader")
def test_get_logs_for_handler_without_read_method(mock_reader, log_admin_client):
type(mock_reader.return_value).supports_read = unittest.mock.PropertyMock(return_value=False)
url_template = (
"get_logs_with_metadata?dag_id={}&"
"task_id={}&execution_date={}&"
"try_number={}&metadata={}&format=json"
)
try_number = 1
url = url_template.format(
DAG_ID,
TASK_ID,
urllib.parse.quote_plus(DEFAULT_DATE.isoformat()),
try_number,
"{}",
)
response = log_admin_client.get(url)
assert 200 == response.status_code
data = response.json
assert 'message' in data
assert 'metadata' in data
assert 'Task log handler does not support read logs.' in data['message']
@pytest.mark.parametrize("task_id", ['inexistent', TASK_ID])
def test_redirect_to_external_log_with_local_log_handler(log_admin_client, task_id):
"""Redirect to home if TI does not exist or if log handler is local"""
url_template = "redirect_to_external_log?dag_id={}&task_id={}&execution_date={}&try_number={}"
try_number = 1
url = url_template.format(
DAG_ID,
task_id,
urllib.parse.quote_plus(DEFAULT_DATE.isoformat()),
try_number,
)
response = log_admin_client.get(url)
assert 302 == response.status_code
assert 'http://localhost/home' == response.headers['Location']
class _ExternalHandler(ExternalLoggingMixin):
EXTERNAL_URL = 'http://external-service.com'
@property
def log_name(self) -> str:
return 'ExternalLog'
def get_external_log_url(self, *args, **kwargs) -> str:
return self.EXTERNAL_URL
@property
def supports_external_link(self) -> bool:
return True
@unittest.mock.patch(
'airflow.utils.log.log_reader.TaskLogReader.log_handler',
new_callable=unittest.mock.PropertyMock,
return_value=_ExternalHandler(),
)
def test_redirect_to_external_log_with_external_log_handler(_, log_admin_client):
url_template = "redirect_to_external_log?dag_id={}&task_id={}&execution_date={}&try_number={}"
try_number = 1
url = url_template.format(
DAG_ID,
TASK_ID,
urllib.parse.quote_plus(DEFAULT_DATE.isoformat()),
try_number,
)
response = log_admin_client.get(url)
assert 302 == response.status_code
assert _ExternalHandler.EXTERNAL_URL == response.headers['Location']
| 32.347826
| 108
| 0.671744
|
f847ae3ae08d4ce7b42ad8a1d6cfd2db782e029f
| 1,259
|
py
|
Python
|
models/difference.py
|
ChaokunChang/SVAS
|
61af6eb39269edff8ea5147311628b3200c3a3d2
|
[
"Apache-2.0"
] | null | null | null |
models/difference.py
|
ChaokunChang/SVAS
|
61af6eb39269edff8ea5147311628b3200c3a3d2
|
[
"Apache-2.0"
] | null | null | null |
models/difference.py
|
ChaokunChang/SVAS
|
61af6eb39269edff8ea5147311628b3200c3a3d2
|
[
"Apache-2.0"
] | null | null | null |
from numpy.core.numeric import Inf
import torch
class SimpleDiff():
def __init__(self, diff_thresh=30.0, delay=30) -> None:
self.diff_thresh = diff_thresh
self.delay = delay
def infer(self, frames, delay=None):
if delay is None:
delay = self.delay
num_frames = len(frames)
results = [Inf] + [-Inf]*(min(delay, num_frames)-1)
prev_key_id = len(results) // 2
ref_frame = frames[prev_key_id]
for start_id in range(delay, num_frames, delay):
end_id = min(start_id + delay, num_frames)
batch_results = [-Inf for _ in range(start_id, end_id)]
if len(batch_results) == 0:
break
cur_key_id = (start_id + end_id) // 2
cur_frame = frames[cur_key_id]
diff = ((cur_frame - ref_frame.unsqueeze(0))**2).view(len(cur_frame), -1).mean(-1)
score = torch.sum(diff).cpu()
# print("DEBUG-DIFF: ", diff)
if score > self.diff_thresh:
batch_results[0] = Inf
ref_frame = cur_frame
results.extend(batch_results)
return results
def __call__(self, frames, delay=None):
return self.infer(frames, delay)
| 35.971429
| 94
| 0.570294
|
f7dbc2e9901dc1323049db23230fd23f2b3b5ede
| 191
|
py
|
Python
|
expense/admin.py
|
ShyamSundhar1411/Expense-Manager
|
8ecc4196619f1666f43fa74f1140c72a1e14b800
|
[
"MIT"
] | 1
|
2021-11-10T15:29:22.000Z
|
2021-11-10T15:29:22.000Z
|
expense/admin.py
|
ShyamSundhar1411/Expense-Manager
|
8ecc4196619f1666f43fa74f1140c72a1e14b800
|
[
"MIT"
] | null | null | null |
expense/admin.py
|
ShyamSundhar1411/Expense-Manager
|
8ecc4196619f1666f43fa74f1140c72a1e14b800
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Expense,Budget,Profile
# Register your models here.
admin.site.register(Expense)
admin.site.register(Budget)
admin.site.register(Profile)
| 27.285714
| 42
| 0.82199
|
2c2c0776a9592d048b2a2656481cdaa527d11ec2
| 444
|
py
|
Python
|
api/views.py
|
julianarchila/music_controller
|
166936ff9b03fbf7dc0d27db923638ac284636bd
|
[
"MIT"
] | 1
|
2021-08-07T08:03:52.000Z
|
2021-08-07T08:03:52.000Z
|
api/views.py
|
julianarchila/music_controller
|
166936ff9b03fbf7dc0d27db923638ac284636bd
|
[
"MIT"
] | null | null | null |
api/views.py
|
julianarchila/music_controller
|
166936ff9b03fbf7dc0d27db923638ac284636bd
|
[
"MIT"
] | null | null | null |
""" Api views. """
# Django REST Framework
from rest_framework.viewsets import GenericViewSet
from rest_framework import mixins
# Serializers
from api.serializers import RoomModelSerializer
# Models
from api.models import Room
class RoomViewSet(
mixins.CreateModelMixin,
mixins.ListModelMixin,
GenericViewSet):
""" Room view set. """
serializer_class = RoomModelSerializer
queryset = Room.objects.all()
| 21.142857
| 50
| 0.734234
|
77ae80a24d3a27b944e22b681f4072ec80a5295a
| 169
|
py
|
Python
|
src/domain/use_cases/divide.py
|
panda-coder/py-clean-flask
|
e7b8af5056178cd1dc6161f52a909f8043dc4b66
|
[
"MIT"
] | null | null | null |
src/domain/use_cases/divide.py
|
panda-coder/py-clean-flask
|
e7b8af5056178cd1dc6161f52a909f8043dc4b66
|
[
"MIT"
] | null | null | null |
src/domain/use_cases/divide.py
|
panda-coder/py-clean-flask
|
e7b8af5056178cd1dc6161f52a909f8043dc4b66
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from domain.dtos import DivideDTO
class Divide(ABC):
@abstractmethod
def calculate(self, params: DivideDTO):
pass
| 16.9
| 43
| 0.721893
|
e0c35b43e304e504503490a3a2636c90f41bacd4
| 2,851
|
py
|
Python
|
MultiEsenNIC/pycocoevalcap/tokenizer/ptbtokenizer.py
|
CSnode/Global-Local-Captioning
|
7eb420b1217391d03732e220d92e680f0aadb38e
|
[
"MIT"
] | null | null | null |
MultiEsenNIC/pycocoevalcap/tokenizer/ptbtokenizer.py
|
CSnode/Global-Local-Captioning
|
7eb420b1217391d03732e220d92e680f0aadb38e
|
[
"MIT"
] | null | null | null |
MultiEsenNIC/pycocoevalcap/tokenizer/ptbtokenizer.py
|
CSnode/Global-Local-Captioning
|
7eb420b1217391d03732e220d92e680f0aadb38e
|
[
"MIT"
] | null | null | null |
#-*- coding:utf-8 -*-
#!/usr/bin/env python
#
# File Name : ptbtokenizer.py
#
# Description : Do the PTB Tokenization and remove punctuations.
#
# Creation Date : 29-12-2014
# Last Modified : Thu Mar 19 09:53:35 2015
# Authors : Hao Fang <hfang@uw.edu> and Tsung-Yi Lin <tl483@cornell.edu>
import os
import sys
import subprocess
import tempfile
import itertools
# path to the stanford corenlp jar
STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
# punctuations to be removed from the sentences
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";", "。"]
class PTBTokenizer:
"""Python wrapper of Stanford PTBTokenizer"""
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
'edu.stanford.nlp.process.PTBTokenizer', \
'-preserveLines', '-lowerCase']
# ======================================================
# prepare data for PTB Tokenizer
# ======================================================
final_tokenized_captions_for_image = {}
image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
# ======================================================
# save sentences to temporary file
# ======================================================
path_to_jar_dirname=os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences.encode('utf-8'))
tmp_file.close()
# ======================================================
# tokenize sentence
# ======================================================
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.split('\n')
# remove temp file
os.remove(tmp_file.name)
# ======================================================
# create dictionary for tokenized captions
# ======================================================
for k, line in zip(image_id, lines):
if not k in final_tokenized_captions_for_image:
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
| 40.728571
| 114
| 0.519467
|
99ce67861db52925fd7c7e46be30807ffb2a19d0
| 1,679
|
py
|
Python
|
samples/carpedrestian/car_pedrestian_api/api/config.py
|
Risvil/CarPedrestianDetectorAPI
|
19beb31c991304115279fef5f231d0408be37989
|
[
"MIT"
] | null | null | null |
samples/carpedrestian/car_pedrestian_api/api/config.py
|
Risvil/CarPedrestianDetectorAPI
|
19beb31c991304115279fef5f231d0408be37989
|
[
"MIT"
] | null | null | null |
samples/carpedrestian/car_pedrestian_api/api/config.py
|
Risvil/CarPedrestianDetectorAPI
|
19beb31c991304115279fef5f231d0408be37989
|
[
"MIT"
] | null | null | null |
import pathlib
import logging
from logging.handlers import TimedRotatingFileHandler
import os
import sys
PACKAGE_ROOT = pathlib.Path(__file__).resolve().parent.parent
FORMATTER = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s -"
"%(funcName)s:%(lineno)d - %(message)s"
)
LOG_DIR = PACKAGE_ROOT / 'logs'
LOG_DIR.mkdir(exist_ok=True)
LOG_FILE = LOG_DIR / 'car_pedrestian_api.log'
UPLOAD_FOLDER = PACKAGE_ROOT / 'uploads'
UPLOAD_FOLDER.mkdir(exist_ok=True)
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
def get_console_handler():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(FORMATTER)
return console_handler
def get_file_handler():
file_handler = TimedRotatingFileHandler(
LOG_FILE, when='midnight'
)
file_handler.setFormatter(FORMATTER)
file_handler.setLevel(logging.WARNING)
return file_handler
def get_logger(logger_name):
"""Get logger with prepared handlers."""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
logger.addHandler(get_console_handler())
logger.addHandler(get_file_handler())
logger.propagate = False
return logger
class Config:
DEBUG = False
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = 'this-really-needs-to-be-changed'
SERVER_PORT = 5000
UPLOAD_FOLDER = UPLOAD_FOLDER
class ProductionConfig(Config):
DEBUG = False
SEREVER_ADDRESS: os.environ.get('SERVER_ADDRESS', '0.0.0.0')
SERVER_PORT: os.environ.get('SERVER_PORT', '5000')
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True
| 23.319444
| 64
| 0.724241
|
66e619e99524835511454a826a9afa19c968806b
| 109,839
|
py
|
Python
|
devices/mikrotik/mikrotik_routeros.py
|
ericorain/netscud
|
b791ee25c2e60b10f7269c306c4b153914149b22
|
[
"Apache-2.0"
] | null | null | null |
devices/mikrotik/mikrotik_routeros.py
|
ericorain/netscud
|
b791ee25c2e60b10f7269c306c4b153914149b22
|
[
"Apache-2.0"
] | null | null | null |
devices/mikrotik/mikrotik_routeros.py
|
ericorain/netscud
|
b791ee25c2e60b10f7269c306c4b153914149b22
|
[
"Apache-2.0"
] | null | null | null |
# Python library import
from netscud.base_connection import NetworkDevice, log
import asyncio, asyncssh
# Declaration of constant values
# Max data to read in read function
MAX_BUFFER_DATA = 65535
class MikrotikRouterOS(NetworkDevice):
"""
Class for Mikrotik RouterOS devices
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface_mode = {
"access": "admit-only-untagged-and-priority-tagged",
"trunk": "admit-only-vlan-tagged",
"hybrid": "admit-all",
}
# Remove useless escape data using the user login
self.username = self.username + "+cte"
# self._connect_first_ending_prompt = ["> \x1b[K"]
self._connect_first_ending_prompt = "\x1b\x5b\x4b"
self._connect_second_ending_prompt = "> "
self.list_of_possible_ending_prompts = [
"] > ",
]
self._carriage_return_for_send_command = "\r\n"
self._telnet_connect_login = "Login: "
self._telnet_connect_password = "Password: "
self._telnet_connect_authentication_fail_prompt = [
"Login: ",
"Login failed, incorrect username or password",
]
self._telnet_connect_first_ending_prompt = ["] > "]
# General commands
# No global disabling for Mikrotik RouterOS so use
# "without-paging" at the end of your commands
self.cmd_disable_paging = None
self.cmd_exit_config_mode = "/"
self.cmd_get_version = "system resource print without-paging"
self.cmd_get_hostname = "system identity print without-paging"
self.cmd_get_model = "system resource print without-paging"
self.cmd_get_serial_number = "system routerboard print without-paging"
self.cmd_get_config = "export"
# No command to save the config. So it is always saved after "Enter"
self.cmd_save_config = ""
# Layer 1 commands
# Commands for status, duplex/speed, mode
# self.cmd_get_interfaces = [
# "interface ethernet print terse without-paging",
# "foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}",
# "interface bridge vlan print terse",
# ]
self.cmd_get_interfaces = [
"interface ethernet print terse without-paging",
"foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}",
"interface bridge port print terse without-paging",
]
self.cmd_set_interface = [
"interface ethernet enable <INTERFACE>",
"interface ethernet disable <INTERFACE>",
'interface ethernet comment <INTERFACE> "<COMMENT>"',
"interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>",
"interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]",
]
# Layer 2 commands
self.cmd_get_mac_address_table = "interface bridge host print without-paging"
self.cmd_get_arp = "ip arp print terse without-paging"
self.cmd_get_lldp_neighbors = "ip neighbor print terse without-paging"
self.cmd_get_vlans = "interface bridge vlan print terse without-paging"
self.cmd_get_bridges = (
"interface bridge print terse without-paging" # Specific to Mikrotik
)
self.cmd_add_vlan = 'interface bridge vlan add vlan-ids=<VLAN> comment="<VLAN_NAME>" bridge=<BRIDGE>'
self.cmd_remove_vlan = "interface bridge vlan remove [find vlan-ids=<VLAN>]"
self.cmd_add_interface_to_vlan = [
"interface bridge vlan print terse",
"interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>",
"interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>",
"interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>",
]
self.cmd_remove_interface_from_vlan = [
"interface bridge vlan print terse",
"interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>",
"interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>",
"interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>",
]
# Layer 3 commands
self.cmd_get_routing_table = "ip route print without-paging terse"
self.cmd_get_interfaces_ip = "ip address print terse without-paging"
self.cmd_add_static_route = "ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>"
self.cmd_remove_static_route = (
"ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]"
)
async def connectSSH(self):
"""
Async method used for connecting a device using SSH protocol
Mikrotik has a special prompt which is difficult to manage. Here
is an example of the SSH prompt of Mikrotik switch:
"[admin@myswitch] >
[admin@myswitch] >
[admin@myswitch] > [K
[admin@myswitch] >"
So this method is special to Mikrotik devices.
"""
# Display info message
log.info("connectSSH")
# Parameters of the connection
generator = asyncssh.connect(
self.ip,
username=self.username,
password=self.password,
known_hosts=None,
# encryption_algs="*", # Parameter that includes all encryption algorithms (even the old ones disabled by default)
encryption_algs=[
algs.decode("utf-8") for algs in asyncssh.encryption._enc_algs
], # Parameter that includes all encryption algorithms (even the old ones disabled by default)
)
# Trying to connect to the device
try:
self.conn = await asyncio.wait_for(generator, timeout=self.timeout)
except asyncio.exceptions.TimeoutError as error:
# Timeout
# Display error message
log.error(f"connectSSH: connection failed: {self.ip} timeout: '{error}'")
# Exception propagation
raise asyncio.exceptions.TimeoutError(
"Connection failed: connection timed out."
)
except Exception as error:
# Connection failed
# Display error message
log.error(f"connectSSH: connection failed: {self.ip} '{error}'")
# Exception propagation
raise
# Display info message
log.info("connectSSH: connection success")
# Create a session
self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type="netscud")
# Display info message
log.info("connectSSH: open_session success")
# By default no data has been read
data = ""
# By default no prompt found
prompt_not_found = True
try:
# Read data
while prompt_not_found:
# Display info message
log.info("connectSSH: beginning of the loop")
# Read the prompt
data += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=self.timeout
)
# Display info message
log.info(f"connectSSH: data: '{str(data)}'")
# Display info message
log.info(f"connectSSH: data: hex:'{data.encode('utf-8').hex()}'")
# Check if the first part of the expected prompt is found
if self._connect_first_ending_prompt in data:
# Found
# Second (ending) prompt found?
if data.endswith(self._connect_second_ending_prompt):
# Yes
# Display info message
log.info(f"connectSSH: ending of prompt found")
# A ending prompt has been found
prompt_not_found = False
# Leave the loop
break
# Display info message
log.info("connectSSH: end of loop")
except Exception as error:
# Fail while reading the prompt
# Display error message
log.error(
f"connectSSH: timeout while reading the prompt: {self.ip} '{error}'"
)
# Exception propagation
raise
# Display info message
log.info(f"connectSSH: end of prompt loop")
# # Remove possible escape sequence
# data = self.remove_ansi_escape_sequence(data)
# # Find prompt
# self.prompt = self.find_prompt(str(data))
# # Display info message
# log.info(f"connectSSH: prompt found: '{self.prompt}'")
# # Display info message
# log.info(f"connectSSH: prompt found size: '{len(self.prompt)}'")
# # Disable paging command available?
# if self.cmd_disable_paging:
# # Yes
# # Disable paging
# await self.disable_paging()
async def connectTelnet(self):
"""
Async method used for connecting a device using Telnet protocol
Mikrotik has a special prompt which is difficult to manage. Here
is an example of the Telnet prompt of Mikrotik switch:
"\r\r\r\r\r\r[admin@myswitch] > \r[admin@myswitch] > "
So this method is special to Mikrotik devices.
"""
# Display info message
log.info("connectTelnet")
try:
# Prepare connection with Telnet
conn = asyncio.open_connection(self.ip, self.port)
except Exception as error:
# Preparation to the connection failed
# Display error message
log.error(f"connectTelnet: preparation to the connection failed: '{error}'")
# Exception propagation
raise
# Display info message
log.info("connectTelnet: preparation to the connection success")
try:
# Connection with Telnet
self._reader, self._writer = await asyncio.wait_for(
conn, timeout=self.timeout
)
except asyncio.TimeoutError:
# Time out during connection
# Display error message
log.error("connectTelnet: connection: timeout")
# Exception propagation
raise
# Display info message
log.info("connectTelnet: connection success")
# Get prompt for the login
prompt = self._telnet_connect_login
# Get prompt for the password
prompt_password = self._telnet_connect_password
# By default a login is expected
use_login = True
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
# Read the telnet information and first prompt (for login but a password prompt can be found for IOS for instance)
while True:
# Display info message
log.info(f"connectTelnet: read data for prompt")
# await asyncio.sleep(2)
# Read returned prompt
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=self.timeout
)
# Display info message
log.info(f"connectTelnet: byte_data: {byte_data}")
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"connectTelnet: output: {output}")
# Prompt for the username found?
if prompt in output:
# Yes
# Leave the loop
break
# Prompt for the password found?
elif prompt_password in output:
# Yes
# That means only password is required
use_login = False
# Leave the loop
break
# A special Telnet string send at first connection?
elif b"\xff\xfd\x18\xff\xfd \xff\xfd#\xff\xfd" in byte_data:
# Yes
# Display info message
log.info(f"connectTelnet: telnet_init_message")
# chr(0xFF).chr(0xFB).chr(0x1F).chr(0xFF).chr(0xFB).chr(0x20).chr(0xFF).chr(0xFB).chr(0x18).chr(0xFF).chr(0xFB).chr(0x27).chr(0xFF).chr(0xFD).chr(0x01).chr(0xFF).chr(0xFB).chr(0x03).chr(0xFF).chr(0xFD).chr(0x03).chr(0xFF).chr(0xFC).chr(0x23).chr(0xFF).chr(0xFC).chr(0x24).chr(0xFF).chr(0xFA).chr(0x1F).chr(0x00).chr(0x50).chr(0x00).chr(0x18).chr(0xFF).chr(0xF0).chr(0xFF).chr(0xFA).chr(0x20).chr(0x00).chr(0x33).chr(0x38).chr(0x34).chr(0x30).chr(0x30).chr(0x2C).chr(0x33).chr(0x38).chr(0x34).chr(0x30).chr(0x30).chr(0xFF).chr(0xF0).chr(0xFF).chr(0xFA).chr(0x27).chr(0x00).chr(0xFF).chr(0xF0).chr(0xFF).chr(0xFA).chr(0x18).chr(0x00).chr(0x41).chr(0x4E).chr(0x53).chr(0x49).chr(0xFF).chr(0xF0);
# Messages in Telnet format
cmd = b"\xff\xfb\x1f\xff\xfb\x20\xff\xfb\x18\xff\xfb\x27\xff\xfd\x01\xff\xfb\x03\xff\xfd\x03\xff\xfc\x23\xff\xfc\x24\xff\xfa\x1f\x00\x50\x00\x18\xff\xf0\xff\xfa\x20\x00\x33\x38\x34\x30\x30\x2c\x33\x38\x34\x30\x30\xff\xf0\xff\xfa\x27\x00\xff\xf0\xff\xfa\x18\x00\x41\x4e\x53\x49\xff\xf0"
cmd += b"\xff\xfc\x01\xff\xfc\x22\xff\xfe\x05\xff\xfc\x21"
# Display info message
log.info(f"connectTelnet: telnet_init_message: send: {cmd}")
# Display info message
log.debug(f"connectTelnet: telnet_init_message: send: '{cmd.hex()}'")
# Sending command
self._writer.write(cmd)
# Temporary bytes variable cleared
byte_data = b""
# Display info message
log.info(f"connectTelnet: login prompt: '{output}'")
# Login to use?
if use_login:
# Yes
# Display info message
log.info("connectTelnet: sending login")
try:
# Send login
# await self.send_command(self.username, prompt_password)
# Sending command
cmd = self.username + "\r\n"
self._writer.write(cmd.encode())
# Display info message
log.info("connectTelnet: login sent")
except Exception:
# Problem with the login
# Propagate the exception
raise
# Display info message
log.info("connectTelnet: sending password")
try:
# Send password
output = await self.telnet_send_command_with_unexpected_pattern(
self.password,
self._telnet_connect_first_ending_prompt,
self._telnet_connect_authentication_fail_prompt,
)
except Exception:
# Problem with the password
# Propagate the exception
raise
# Display info message
log.info("connectTelnet: password sent")
async def send_commandSSH(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
# Debug info message
log.info("send_commandSSH")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Debug info message
log.info(f"send_commandSSH: cmd = '{cmd}'")
# Sending command
# Add carriage return at the end of the command (mandatory to send the command)
self.stdinx.write(cmd + self._carriage_return_for_send_command)
# Display message
log.info("send_commandSSH: command sent")
# Variable used to gather data
output = ""
# Variable used for leaving loop (necessary since there is a "while" with a "for" and a "break" command)
stay_in_loop = True
# Reading data
while stay_in_loop:
# Read the data received
output += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout
)
# Debug info message
log.debug(f"send_commandSSH: output hex: '{output.encode('utf-8').hex()}'")
# Remove ANSI escape sequence
output = self.remove_ansi_escape_sequence(output)
# Remove possible "\r"
output = output.replace("\r", "")
# Debug info message
log.info(f"send_commandSSH: output: '{output}'")
# Is a patten used?
if pattern:
# Use pattern instead of prompt
if pattern in output:
# Yes
# Leave the loop
break
else:
# Check if prompt is found
for prompt in self.list_of_possible_ending_prompts:
# A pattern found twice (or more)?
if output.count(prompt) >= 2:
# Yes
# Display info message
log.info(
f"send_commandSSH: prompt found twice or more: '{prompt}'"
)
# Will leave the while loop
stay_in_loop = False
# Leave the loop
break
# Debug info message
log.debug(
f"send_commandSSH: raw output: '{output}'\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'"
)
# # Remove the command sent from the result of the command
# output = self.remove_command_in_output(output, str(cmd))
# # Remove the carriage return of the output
# output = self.remove_starting_carriage_return_in_output(output)
# # Remove the ending prompt of the output
# output = self.remove_ending_prompt_in_output(output)
# Remove the command sent from the result of the command
# output = self.remove_command_in_output(output, str(cmd))
# For Mikrotik just remove the first line (complicated otherwise)
output = output.split("\n", 1)[1]
# Remove the carriage return of the output
# output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
# 2 lines?
if "\n" in output:
# Yes
# For Mikrotik just remove the last line (complicated otherwise)
output = output[: output.rfind("\n")]
else:
# No. There is just the prompt
# Empty string is returned
output = ""
# Debug info message
log.debug(
f"send_commandSSH: cleaned output: '{output}'\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'"
)
# Return the result of the command
return output
async def send_commandTelnet(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
# Debug info message
log.info("send_commandTelnet")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Sending command
self._writer.write(cmd.encode())
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
# Variable used for leaving loop (necessary since there is a "while" with a "for" and a "break" command)
stay_in_loop = True
try:
# Read data
while stay_in_loop:
# Read returned prompt
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(f"send_commandTelnet: byte_data: '{byte_data}'")
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"send_commandTelnet: output: '{output}'")
# Is a patten used?
if pattern:
# Use pattern instead of prompt
if pattern in output:
# Yes
# Leave the loop
break
else:
# Check if prompt is found
for prompt in self.list_of_possible_ending_prompts:
# A pattern found twice (or more)?
if output.count(prompt) >= 2:
# Yes
# Display info message
log.info(
f"send_commandTelnet: prompt found twice or more: '{prompt}'"
)
# Will leave the while loop
stay_in_loop = False
# Leave the loop
break
except asyncio.TimeoutError:
# Time out during when reading prompt
# Display error message
log.error("send_commandTelnet: connection: timeout")
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Display error message
log.error(f"send_commandTelnet: error: {error}")
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"send_commandTelnet: raw output: '{output}'\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'"
)
# Remove the command sent from the result of the command
# output = self.remove_command_in_output(output, str(cmd))
# For Mikrotik just remove the first line (complicated otherwise)
output = output.split("\n\r", 1)[1]
# Remove the carriage return of the output
# output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
# For Mikrotik just remove the last line (complicated otherwise)
output = output[: output.rfind("\n")]
# Debug info message
log.debug(
f"send_commandTelnet: cleaned output: '{output}'\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'"
)
# Return the result of the command
return output
async def telnet_send_command_with_unexpected_pattern(
self, cmd, pattern, error_pattern=None, timeout=None
):
"""
Async method used to send command for Telnet connection to a device with possible unexpected patterns
send_command can wait till time out if login and password are wrong. This method
speed up the returned error message when authentication failed is identified.
This method is limited to authentication whem password is required
:param cmd: command to send
:type cmd: str
:param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used
to define a custom or unexpected prompt a the end of a string
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:param error_pattern: optional, a list of failed prompts found when the login and password are not correct
:type error_pattern: str
:return: the output of command
:rtype: str
"""
# Debug info message
log.info("telnet_send_command_with_unexpected_pattern")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + "\n"
# Sending command
self._writer.write(cmd.encode())
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
# By default pattern is not found
pattern_not_found = True
try:
# Read data
while pattern_not_found:
# Read returned prompt
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'"
)
# Display debug message
log.debug(
f"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'"
)
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: output: '{output}'"
)
# Is a pattern used?
if pattern:
# Check all pattern of prompt in the output
for prompt in pattern:
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'"
)
# A pattern found twice (or more)?
if output.count(prompt) >= 2:
# if prompt in output:
# Yes
# A pattern is found. The main loop can be stopped
pattern_not_found = False
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'"
)
# Leave the loop
break
# Is an unexpected pattern used?
if error_pattern and pattern_not_found:
# Check all unexpected pattern of prompt in the output
for bad_prompt in error_pattern:
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'"
)
# An error_pattern pattern found?
if bad_prompt in output:
# Yes
# Display error message
log.error(
"telnet_send_command_with_unexpected_pattern: authentication failed"
)
# Raise exception
raise Exception(
"telnet_send_command_with_unexpected_pattern: authentication failed"
)
# Leave the loop
# break
except asyncio.TimeoutError:
# Time out during when reading prompt
# Close the connection in order to not display RuntimeError
await self.disconnect()
# Display error message
log.error(
"telnet_send_command_with_unexpected_pattern: reading prompt: timeout"
)
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Close the connection in order to not display RuntimeError
await self.disconnect()
# Display error message
log.error(
f"telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}"
)
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'"
)
# Remove the command sent from the result of the command
# output = self.remove_command_in_output(output, str(cmd))
output = output.split("\n\r", 1)[1]
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
# For Mikrotik just remove the last line (complicated otherwise)
output = output[: output.rfind("\n")]
# Debug info message
log.debug(
f"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'"
)
# Return the result of the command
return output
async def send_config_set(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
There is no configuration mode with Mikrotik RouterOS switches.
So this command will just run a group of commands
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
# Display info message
log.info("send_config_set")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# By default there is no output
output = ""
# Optional carriage return
carriage_return = ""
# Check if cmds is a string
if isinstance(cmds, str):
# A string
# Convert the string into a list
cmds = [cmds]
# A list?
elif not isinstance(cmds, list):
# Not a list (and not a string)
# Display error message
log.error(
"send_config_set: parameter cmds used in send_config_set is neither a string nor a list"
)
# Leave the method
return output
# Run each command
for cmd in cmds:
# Add carriage return if needed (first time no carriage return)
output += carriage_return
# Send a command
output += await self.send_command(cmd)
# Set carriage return for next commands
carriage_return = "\n"
# Return the commands sent
return output
#########################################################
#
# List of API
#
#########################################################
async def get_version(self):
"""
Asyn method used to get the version of the software of the device
:return: Version of the software of the device
:rtype: str
"""
# Display info message
log.info("get_version")
# By default empty string
version = ""
# Run get version on the device
output = await self.send_command(self.cmd_get_version)
# Seek data to get the version in the returned output
version = output.split("version: ")[1].split()[0]
# Display info message
log.info(f"get_version: version: {version}")
# Return the version of the software of the device
return version
async def get_hostname(self):
"""
Asyn method used to get the name of the device
:return: Name of the device
:rtype: str
"""
# Display info message
log.info("get_hostname")
# Get hostname
output = await self.send_command(self.cmd_get_hostname)
# Display info message
log.info(f"get_hostname: output: '{output}'")
# Remove the useless information in the returned string
output = output.split()[1]
# Display info message
log.info(f"get_hostname: hostname found: '{output}'")
# Return the name of the device
return output
async def get_model(self):
"""
Asyn method used to get the model of the device
:return: Model of the device
:rtype: str
"""
# Display info message
log.info("get_model")
# Get model
output = await self.send_command(self.cmd_get_model)
# Display info message
log.info(f"get_model: output: '{output}'")
# Remove the useless information in the returned string
output = output.split("board-name: ")[1].split()[0]
# Display info message
log.info(f"get_model: model found: '{output}'")
# Return the model of the device
return output
async def get_serial_number(self):
"""
Get serial number of the switch or the serial number of the first switch of a stack
:return: Serial number of the device
:rtype: str
"""
# Display info message
log.info("get_serial_number")
# Get model
output = await self.send_command(self.cmd_get_serial_number)
# Display info message
log.info(f"get_serial_number: output: '{output}'")
# Remove the useless information in the returned string
output = output.split("serial-number: ")[1].split()[0]
# Display info message
log.info(f"get_hostname: hostname found: '{output}'")
# Return the serial number of the device
return output
async def get_config(self, timeout=None):
"""
Asyn method used to get the configuration of the device
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: Configuration of the device
:rtype: str
"""
# Display info message
log.info("get_config")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Get config
output = await self.send_command(self.cmd_get_config, timeout=timeout)
# Return de configuration of the device
return output
async def save_config(self):
"""
Asyn method used to save the current configuration on the device
:return: Commands of the configuration saving process
:rtype: str
"""
# Display info message
log.info("save_config")
# No need to send a commmand
output = ""
# Return the commands of the configuration saving process
return output
async def get_mac_address_table(self):
"""
Asyn method used to get the mac address table of the device
:return: MAC address table of the device
:rtype: list of dict
"""
# Display info message
log.info("get_mac_address_table")
# By default nothing is returned
returned_output = []
# Send a command
output = await self.send_command(self.cmd_get_mac_address_table)
# Convert string to list of string and remove the 2 first lines
lines = output.splitlines()[2:]
# Read each line
for line in lines:
# Set default values for variables
mac_type = None
mac_address = None
vlan = None
interface = None
# If the MAC address is dynamic AND local then it is self (its own MAC address)
# Get the type of MAC address (dynamic, static or self)
if len(line) > 6:
if line[6].lower() == "l":
# Self MAC address
mac_type = "self"
# Get the type of MAC address (dynamic, static or self)
elif line[5].lower() == "d":
# Dynamic MAC address
mac_type = "dynamic"
else:
# Static MAC address
mac_type = "static"
# Get MAC address
if len(line) > 26:
mac_address = line[9:26]
# Convert MAC address into lower case
mac_address = mac_address.lower()
# Get VLAN
if len(line) > 31:
vlan = int(line[27:31].strip())
# Get interface
if len(line) > 32:
interface = line[32:].split()[0]
# Create a dictionary
mac_dict = {
"mac_type": mac_type,
"mac_address": mac_address,
"vlan": vlan,
"interface": interface,
}
# Add the MAC information to the list
if mac_address:
returned_output.append(mac_dict)
# Return data
return returned_output
async def get_arp_table(self):
"""
Asyn method used to get the ARP table of the device
:return: ARP table of the device
:rtype: list of dict
"""
# Display info message
log.info("get_arp_table")
# By default nothing is returned
returned_output = []
# Send a command
output = await self.send_command(self.cmd_get_arp)
# Display info message
log.info(f"get_arp:\n'{output}'")
# Convert a string into a list of strings
lines = output.splitlines()
# Read each line
for line in lines:
# Set default values for variables
address = None
mac_address = None
interface = None
# Get IP address
if " address=" in line:
address = line.split(" address=")[-1].split()[0]
# Get MAC address
if " mac-address=" in line:
mac_address = line.split(" mac-address=")[-1].split()[0]
# Get interface
if " interface=" in line:
interface = line.split(" interface=")[-1].split()[0]
# Create a dictionary
returned_dict = {
"address": address,
"mac_address": mac_address,
"interface": interface,
}
# Add the information to the list
if address:
returned_output.append(returned_dict)
# Return data
return returned_output
async def get_lldp_neighbors(self):
"""
Asyn method used to get the LLDP information from the device
The problem with LLDP implementation on RouterOS is that the command
used to get LLDP information can return data even though there is no
LLDP service running on neighbour device. Thus Interface and MAC
addresses fields could be filled of data without LLDP neighbour
device. Data will be considered as LLDP information is there are
other fields than Interface and MAC addresses are found.
:return: LLDP information of the device
:rtype: dict of list of dict
"""
# Display info message
log.info("get_lldp_neighbors")
# By default nothing is returned
returned_output = {}
# Send a command
output = await self.send_command(self.cmd_get_lldp_neighbors)
# Display info message
log.info(f"get_lldp_neighbors:\n'{output}'")
# Convert a string into a list of strings
lines = output.splitlines()
# Read each line
for line in lines:
# Default value for local interface (no interface)
local_interface = None
# Initialize potential LLDP data with default values
chassis_id = ""
port_id = ""
ttl = None
port_description = ""
system_name = ""
system_description = ""
system_capabilities = []
management_address = ""
# Get local interface
if " interface=" in line:
local_interface = line.split(" interface=")[-1].split()[0].split(",")[0]
# Display info message
log.info(f"get_lldp_neighbors: local_interface: {local_interface}")
# Get Chassis ID - TLV type 1
if " mac-address=" in line:
chassis_id = line.split(" mac-address=")[-1].split()[0]
# Convert the MAC address of the Chassis ID into a lower case string
chassis_id = chassis_id.lower()
# Display info message
log.info(f"get_lldp_neighbors: chassis_id: {chassis_id}")
# Get Port ID - TLV type 2
if " interface-name=" in line:
port_id = (
line.split(" interface-name=")[-1].split("=")[0].rsplit(" ", 1)[0]
)
# Display info message
log.info(f"get_lldp_neighbors: port_id: {port_id}")
# Get Time To Live - TLV type 3
# Not available on RouterOS. "age" parameter is a decreasing counter
# Get Port description - TLV type 4
# Not available on RouterOS.
# Get System name - TLV type 5
if " identity=" in line:
system_name = line.split(" identity=")[-1].split()[0]
# Check if return value is a string "" (just double quotes which means empty data)
if system_name == '""':
# Yes, empty string
system_name = ""
# Display info message
log.info(f"get_lldp_neighbors: system_name: {system_name}")
# Get System description - TLV type 6
if " system-description=" in line:
system_description = (
line.split(" system-description=")[-1]
.split("=")[0]
.rsplit(" ", 1)[0]
)
# Display info message
log.info(
f"get_lldp_neighbors: system_description: {system_description}"
)
# Get System capabilities - TLV type 7
if " system-caps=" in line:
# First get the capablities as a string separated by commas
# e.g.: 'bridge,wlan-ap,router,station-only'
string_capability = line.split(" system-caps=")[-1].split()[0]
# Then convert them into a list of characters
# Code Capability
# B Bridge (Switch)
# C DOCSIS Cable Device
# O Other
# P Repeater
# R Router
# S Station
# T Telephone
# W WLAN Access Point
# Read each capability
for capability in string_capability.split(","):
# Check if string is not null
if len(capability) > 0:
# Get the first letter of the capability, convert this character in uppercase
# and add it to a list
system_capabilities.append(capability[0].upper())
# Display info message
log.info(
f"get_lldp_neighbors: system_capabilities: {system_capabilities}"
)
# Get Management address - TLV type 8
if " address=" in line:
management_address = line.split(" address=")[-1].split()[0]
# LLDP TLV Type 9 to 127 are currently not supported by this method
# Check if data can be considered as LLDP
if local_interface and (
port_id or system_name or system_description or management_address
):
# Probably LLDP
# Create a dictionary
returned_dict = {
"chassis_id": chassis_id,
"port_id": port_id,
"ttl": ttl,
"port_description": port_description,
"system_name": system_name,
"system_description": system_description,
"system_capabilities": system_capabilities,
"management_address": management_address,
}
# Add the information to the dict
# Each interface can get several returned_dict in a list
returned_output[local_interface] = returned_output.get(
local_interface, []
) + [returned_dict]
# Return data
return returned_output
async def get_interfaces(self):
"""
Asyn method used to get the information of ALL the interfaces of the device
some commands are used to collect interface data:
- one for status
- one for duplex/speed
- one for mode (access / trunk / hybrid)
:return: Interfaces of the device
:rtype: dict of dict
"""
# Display info message
log.info("get_interfaces")
# By default nothing is returned
returned_output = {}
# Command for the status of the interfaces
# Send a command
output_status = await self.send_command(self.cmd_get_interfaces[0])
# Display info message
log.info(f"get_interfaces: status command\n'{output_status}'")
# Command for the speed and the duplex mode of the interfaces
# Send a command
output_bitrate = await self.send_command(self.cmd_get_interfaces[1])
# Display info message
log.info(f"get_interfaces: speed duplex command\n'{output_bitrate}'")
# Command for the mode of the interfaces (access or trunk)
# Send a command
output_mode = await self.send_command(self.cmd_get_interfaces[2])
# Display info message
log.info(f"get_interfaces: mode command\n'{output_mode}'")
# Convert a string into a list of strings (status)
lines = output_status.splitlines()
# Convert a string into a list of block of strings (duplex/speed)
block_of_strings_bitrate = output_bitrate.split("\n\n")
# Convert a string into a list of block of strings (mode)
block_of_strings_mode = output_mode.splitlines()
# By default there is no trunk interface
dict_trunk_interface = {}
# Read all tagged interfaces line by line
for line in block_of_strings_mode:
# Check if a " frame-types=" is inside the string
if " frame-types=" in line:
# Yes
# Save the string with the name of the interfaces separated with a comma
frame_types = line.split(" frame-types=")[-1].split()[0]
# Mikrotik devices have 3 modes:
# access, trunk or hybrid
# (FrameTypes ::= admit-all | admit-only-untagged-and-priority-tagged | admit-only-vlan-tagged)
#
# self.interface_mode = {
# "access": "admit-only-untagged-and-priority-tagged",
# "trunk": "admit-only-vlan-tagged",
# "hybrid": "admit-all",
# }
# Check all modes an interface can get
for mode in self.interface_mode:
# Does this interface is in the current mode?
if frame_types == self.interface_mode[mode]:
# Yes
# Display info message
log.info(
f"get_interfaces: frame-types: mode found: '{frame_types}'"
)
# Get the name of the interface
interface_trunk = line.split(" interface=")[-1].split()[0]
# Display info message
log.info(
f"get_interfaces: frame-types: interface: '{interface_trunk}'"
)
# So save the interface mode with a conventional name
dict_trunk_interface[interface_trunk] = mode
# Leave the loop
break
# # Check if value is not empty
# if tagged_interfaces != '""':
# # Not empty
# # Read all trunk interfaces found and separate them
# for interface_trunk in tagged_interfaces.split(","):
# # Save the trunk interface
# dict_trunk_interface[interface_trunk] = True
# Read each line
for line in lines:
# Initialize data with default values
interface_name = ""
operational = False
admin_state = False
maximum_frame_size = 0
full_duplex = False
speed = 0 # speed is in Mbit/s
mode = "access"
description = ""
# Get interface name
if " name=" in line:
interface_name = line.split(" name=")[-1].split()[0]
# Display info message
log.info(f"get_interfaces: interface_name: {interface_name}")
# Get operational and admin_state status
if len(line) > 3:
data = line[3].upper()
# operational + admin_state = "up"?
if data == "R":
# Yes
operational = True
admin_state = True
# operational = "down" and admin_state = "up"?
elif data == " ":
# Yes
admin_state = True
# operational + admin_state = "down" means data == "X"
# No need to compare since default values are already fine
# Display info message
log.info(f"get_interfaces: operational: {operational}, admin_state")
# Get maximum frame size
if " l2mtu=" in line:
maximum_frame_size = int(line.split(" l2mtu=")[-1].split()[0])
# Display info message
log.info(
f"get_interfaces: maximum_frame_size : {maximum_frame_size}"
)
# Get speed and duplex information
for index, data_block in enumerate(block_of_strings_bitrate):
# Display info message
log.info(
f"get_interfaces: get_speed: index: {index} [{len(block_of_strings_bitrate)}]"
)
# Is the name of interface found in the block of strings?
if f"name: {interface_name}" in data_block:
# Yes, so this block of strings has information on the interface
# Display info message
log.info(f"get_interfaces: get_speed: index found: {index}")
# " rate: " field found in the block of strings? (speed)
if " rate: " in data_block:
# Yes
# Then extract the string data
rate_string = (
data_block.split(" rate: ")[-1].split()[0].lower()
)
# Is is mbps?
if "mbps" in rate_string:
# Yes
# Then speed is saved
speed = int(float(rate_string.split("mbps")[0]))
# Is is gbps?
elif "gbps" in rate_string:
# Yes
# Then speed is saved in mpbs
speed = int(float(rate_string.split("gbps")[0]) * 1000)
# Is is tbps? (not seen on current Mikrotik product; for future use)
elif "tbps" in rate_string:
# Yes
# Then speed is saved in mpbs
speed = int(
float(rate_string.split("tbps")[0]) * 1000000
)
# Display info message
log.info(
f"get_interfaces: get_speed: rate found: {rate_string}, rate: {speed} mbps"
)
# " full-duplex: yes" field found in the block of strings? (full_duplex)
if " full-duplex: yes" in data_block:
# Yes
# Display info message
log.info(
f"get_interfaces: get_duplex: {interface_name} is in full duplex mode"
)
# Then the insterface is in full duplex mode
full_duplex = True
# Remove current interface information from the block of data
# (to speed up the research of data)
del block_of_strings_bitrate[index]
# Leave the loop
break
# Get interface mode (access, trunk or hybrid)
# Check if the interface is one of the trunk interface
if interface_name in dict_trunk_interface:
# Yes
# Set interface mode
mode = dict_trunk_interface[interface_name]
# Display info message
log.info(f"get_interfaces: mode: {mode}")
# # Check if the interface is one of the trunk interface
# if interface_name in dict_trunk_interface:
# # Yes
# # Set trunk mode
# mode = "trunk"
# # Display info message
# log.info(f"get_interfaces: mode: {mode}")
# # Get input erros, FCS errors, input packets anf output packets
# for index, data_stats in enumerate(block_of_strings_stats):
# # Display info message
# log.info(
# f"get_interfaces: get_stats: index: {index} [{len(block_of_strings_stats)}]"
# )
# # Is the name of interface found in the block of strings?
# if f"name: {interface_name}" in data_stats:
# # Yes, so this block of strings has information on the interface
# # Display info message
# log.info(f"get_interfaces: get_stats: index found: {index}")
# # " rx-fcs-error=" filed found in the block of strings? (speed)
# if " rx-fcs-error=" in data_stats:
# # Yes
# # Save the line with the data of FCS errors
# line_split = data_stats.split("rx-fcs-error=")[-1].split("=")[0]
# # By default no string gathered
# fcs_string = ""
# # Check each character till a non-numeric character
# for character in line_split:
# # Display info message
# log.info(
# f"get_interfaces: get_stats: fcs errors: char = {character}"
# )
# # Is it a numeric characer ("0" to "9")?
# if character >= "0" and character <= "9":
# # Yes
# # So the character is added to a string
# fcs_string += character
# # Is the character different than " " (which can be used for separator)?
# elif character != " ":
# # Yes, this is not a space
# # Leave the loop then since this is the beginning of another word
# break
# log.info(
# f"get_interfaces: get_stats: fcs errors: fcs_string: {fcs_string}"
# )
# # String not empty?
# if fcs_string:
# # Yes
# # Then save the result in integer
# fcs_error = int(fcs_string)
# Get description
if " comment=" in line:
description = (
line.split(" comment=")[-1].split("=")[0].rsplit(" ", 1)[0]
)
# Display info message
log.info(f"get_interfaces: comment: {description}")
# Create a dictionary
returned_dict = {
"operational": operational,
"admin_state": admin_state,
"maximum_frame_size": maximum_frame_size,
"full_duplex": full_duplex,
"speed": speed,
"mode": mode,
"description": description,
}
# Add the information to the dict
if interface_name:
returned_output[interface_name] = returned_dict
# Return data
return returned_output
async def get_vlans(self):
"""
Asyn method used to get the vlans information from the device
:return: VLANs of the device
:rtype: dict
"""
# Display info message
log.info("get_vlans")
# By default nothing is returned
returned_output = {}
# Send a command
output = await self.send_command(self.cmd_get_vlans)
# Display info message
log.info(f"get_vlans:\n'{output}'")
# Convert a string into a list of strings
lines = output.splitlines()
# Read each line
for line in lines:
# Initialize data with default values
name = ""
vlan_id = 0
extra = None
# extra = {
# "bridge": "",
# }
# Get VLAN name
if " comment=" in line:
name = line.split(" comment=")[-1].split("=")[0].rsplit(" ", 1)[0]
# Display info message
log.info(f"get_vlans: name: {name}")
# Get VLAN ID
if " vlan-ids=" in line:
vlan_id = int(line.split(" vlan-ids=")[-1].split()[0])
# Display info message
log.info(f"get_vlans: vlan_id: {vlan_id}")
# Get bridge (special Mikrotik)
if " bridge=" in line:
bridge = line.split(" bridge=")[-1].split("=")[0].rsplit(" ", 1)[0]
# Display info message
log.info(f"get_vlans: bridge: {bridge}")
# Save bridge information into
extra = {
"bridge": bridge,
}
# Create a dictionary
returned_dict = {
"name": name,
"extra": extra,
}
# Is VLAN ID not nul?
if vlan_id:
# Yes
# Add the information to the dict
returned_output[vlan_id] = returned_dict
# Return data
return returned_output
async def get_routing_table(self):
"""
Asyn method used to get the routing table of the device
:return: Routing table of the device
:rtype: dict
"""
# Display info message
log.info("get_routing_table")
# By default nothing is returned
returned_output = {}
# Send a command
output = await self.send_command(self.cmd_get_routing_table)
# Display info message
log.info(f"get_routing_table:\n'{output}'")
# Convert a string into a list of strings
lines = output.splitlines()
# Read each line
for line in lines:
# Initialize data with default values
network = ""
address = ""
prefix = 0
protocol = "unknown"
administrative_distance = 0
gateway = ""
active = False
protocol_attributes = None
# Get network, address and prefix
if " dst-address=" in line:
network = line.split(" dst-address=")[-1].split()[0]
address = network.split("/")[0]
prefix = int(network.split("/")[1])
# Get protocol
# Save char with protocol letter
if len(line) > 5:
protocol_char = line[5]
if protocol_char == "C":
# Connected
protocol = "connected"
elif protocol_char == "S":
# Static
protocol = "static"
elif protocol_char == "r":
# RIP
protocol = "rip"
elif protocol_char == "b":
# BGP
protocol = "bgp"
elif protocol_char == "o":
# OSPF
protocol = "ospf"
elif protocol_char == "m":
# MME
protocol = "mme"
# Get administrative distance
if " distance=" in line:
administrative_distance = int(line.split(" distance=")[-1].split()[0])
# Get gateway
if " gateway=" in line:
gateway = line.split(" gateway=")[-1].split()[0]
# Get active status
if len(line) > 3:
if line[3] == "A":
active = True
# Create a dictionary
returned_dict = {
"address": address,
"prefix": prefix,
"protocol": protocol,
"administrative_distance": administrative_distance,
"gateway": gateway,
"active": active,
"protocol_attributes": protocol_attributes,
}
# Is a network found?
if network:
# Yes
# Add the information to the dict
returned_output[network] = returned_dict
# Return data
return returned_output
async def get_bridges(self):
"""
Asyn method used to get bridges from the device
:return: A dictionary with the bridge information
:rtype: dict of dict
"""
# Display info message
log.info("get_bridges")
# By default nothing is returned
returned_output = {}
# Send a command
output = await self.send_command(self.cmd_get_bridges)
# Display info message
log.info(f"get_bridges:\n'{output}'")
# Convert a string into a list of strings
lines = output.splitlines()
# Read each line
for line in lines:
# Initialize data with default values
index = None
name = ""
status = False
mac_address = None
spanning_tree = None
igmp_snooping = False
vlan_filtering = False
multicast_querier = False
# Get index
# Line has enough characters?
if len(line) > 1:
# Yes
# Get the 2 first characters (100 bridges max should be ok)
index_string = line[:2]
# Convert characters into a integer
try:
index = int(index_string)
# Display info message
log.info(f"get_bridges: index: {index}")
except:
# Convertion failed
pass
# Get name
if " name=" in line:
name = line.split(" name=")[-1].split("=")[0].rsplit(" ", 1)[0]
# Display info message
log.info(f"get_bridges: name: {name}")
# Get status
line_words = line.split()
# Enough words?
if len(line_words) > 1:
# Running?
if line_words[1] == "R":
# Yes
# So the bridge is enabled
status = True
# Display info message
log.info(f"get_bridges: status: {status}")
# Get MAC ADDRESS
if " mac-address=" in line:
mac_address = (
line.split(" mac-address=")[-1].split("=")[0].rsplit(" ", 1)[0]
)
# Display info message
log.info(f"get_bridges: mac_address: {mac_address}")
# Get Spanning Tree mode
if " protocol-mode=" in line:
spanning_tree = (
line.split(" protocol-mode=")[-1].split("=")[0].rsplit(" ", 1)[0]
)
# Display info message
log.info(f"get_bridges: spanning_tree: {spanning_tree}")
# Get IGMP SNOOPING status
if " igmp-snooping=" in line:
# Value "yes" for IGMP SNOOPING?
if (
line.split(" igmp-snooping=")[-1].split("=")[0].rsplit(" ", 1)[0]
== "yes"
):
# Yes
# IGMP SNOOPING is enabled
igmp_snooping = True
# Display info message
log.info(f"get_bridges: igmp_snooping: {igmp_snooping}")
# Get VLAN filtering status
if " vlan-filtering=" in line:
# Value "yes" for VLAN filtering?
if (
line.split(" vlan-filtering=")[-1].split("=")[0].rsplit(" ", 1)[0]
== "yes"
):
# Yes
# VLAN filtering is enabled
vlan_filtering = True
# Display info message
log.info(f"get_bridges: vlan_filtering: {vlan_filtering}")
# Get multicast querier status
if " multicast-querier=" in line:
# Value "yes"?
if (
line.split(" multicast-querier=")[-1]
.split("=")[0]
.rsplit(" ", 1)[0]
== "yes"
):
# Yes
# VLAN filtering is enabled
multicast_querier = True
# Display info message
log.info(f"get_bridges: multicast_querier: {multicast_querier}")
# Create a dictionary
returned_dict = {
"name": name,
"status": status,
"mac_address": mac_address,
"spanning_tree": spanning_tree,
"igmp_snooping": igmp_snooping,
"vlan_filtering": vlan_filtering,
"multicast_querier": multicast_querier,
}
# Is there a value?
if index is not None:
# Yes
# Add the information to the dict
returned_output[index] = returned_dict
# Return data
return returned_output
async def add_vlan(self, vland_id, vlan_name="", **kwargs):
"""
Asyn method used to add a vlan to a bridge from the device
VLAN to interface is not supported
:param vland_id: VLAN ID
:type vland_id: int
:param vlan_name: optional, name of the VLAN
:type vlan_name: str
:param kwargs: mandatory, must contain "bridge_name" (str) to specify
which bridge to use (specific to Mikrotik)
:type kwargs: str
:return: Status. True = no error, False = error
:rtype: bool
"""
# Display info message
log.info("add_vlan")
# Default parameters value
bridge_name = None
# By default result status is having an error
return_status = False
# Get parameters
# "bridge_name" found?
if "bridge_name" not in kwargs:
# No
# So the VLAN cannot be added
# Return status
return return_status
# Save "bridge" parameter
bridge_name = kwargs["bridge_name"]
# Display info message
log.info(f"add_vlan: bridge_name found: '{bridge_name}'")
# Adapt the command line
# self.cmd_add_vlan = "interface bridge vlan add vlan-ids=<VLAN> comment=\"<VLAN_NAME>\" bridge=<BRIDGE>"
cmd_add_vlan = self.cmd_add_vlan
# Replace <VLAN> with the VLAN number
cmd_add_vlan = cmd_add_vlan.replace("<VLAN>", str(vland_id))
# Replace <BRIDGE> with the bridge name
cmd_add_vlan = cmd_add_vlan.replace("<BRIDGE>", bridge_name)
# Replace <VLAN_NAME> with the VLAN name
cmd_add_vlan = cmd_add_vlan.replace("<VLAN_NAME>", vlan_name)
# Display info message
log.info(f"add_vlan: cmd_add_vlan: '{cmd_add_vlan}'")
# Add VLAN
output = await self.send_command(cmd_add_vlan)
# Display info message
log.info(f"add_vlan: output: '{output}'")
# Check if an error happened
# "failure: vlan already added"
if "failure" not in output:
# No error
return_status = True
# Return status
return return_status
async def remove_vlan(self, vland_id):
"""
Asyn method used to remove a vlan from a bridge of the device
VLAN to interface is not supported
:param vland_id: VLAN ID
:type vland_id: int
:return: Status. True = no error, False = error
:rtype: bool
"""
# Display info message
log.info("remove_vlan")
# By default result status is having an error
return_status = False
# Adapt the command line
# Replace <VLAN> with the VLAN number
cmd_remove_vlan = self.cmd_remove_vlan.replace("<VLAN>", str(vland_id))
# Display info message
log.info(f"remove_vlan: cmd_remove_vlan: '{cmd_remove_vlan}'")
# Add VLAN
output = await self.send_command(cmd_remove_vlan)
# Display info message
log.info(f"remove_vlan: output: '{output}'")
# No error?
if "no such item" not in output:
# No error
return_status = True
# Sadly "no such item" or any error message cannot be returned
# with "[find ...]" command
# Return status
return return_status
async def set_interface(
self,
interface=None,
admin_state=None,
description=None,
maximum_frame_size=None,
mode=None,
**kwargs,
):
"""
Asyn method used to set the state of an interface of the device
:param interface: the name of the interface
:type interface: str
:param admin_state: optional, "up" or "down" status of the interface
:type admin_state: bool
:param description: optional, a description for the interface
:type description: str
:param maximum_frame_size: optional, L2 MTU for packets
:type maximum_frame_size: int
:param mode: optional, set the mode (access, trunk, hybrid) of the interface
:type mode: str
:param kwargs: not used
:type kwargs: dict
:return: Status. True = no error, False = error
:rtype: bool
"""
# Display info message
log.info("set_interface")
# By default result status is having an error
return_status = False
# Display info message
log.info(f"set_interface: input: interface: {interface}")
log.info(f"set_interface: input: admin_state: {admin_state}")
log.info(f"set_interface: input: description: {description}")
log.info(f"set_interface: input: maximum_frame_size: {maximum_frame_size}")
log.info(f"set_interface: input: mode: {mode}")
# Get parameters
# "interface" found?
if interface == None:
# No
# So no action can be performed
# Display info message
log.info("set_interface: no interface specified")
# Return status
return return_status
# "admin_state" found?
if admin_state != None:
# Yes
# So admin state of the interface can be changed
# Display info message
log.info("set_interface: admin_state")
# "up" or "down"? (True of False)
if admin_state:
# "up"
# ["interface ethernet enable <INTERFACE>", "interface ethernet disable <INTERFACE>"]
# Get the command
cmd = self.cmd_set_interface[0]
else:
# "down"
# Get the command
cmd = self.cmd_set_interface[1]
# Adapt the command line
# Replace <INTERFACE> with the interface name
cmd = cmd.replace("<INTERFACE>", interface)
# Display info message
log.info(f"set_interface: admin_state: cmd: {cmd}")
# Change the state of the interface
await self.send_command(cmd)
# "description" found?
if description != None:
# Yes
# So description of the interface can be changed
# Display info message
log.info("set_interface: description")
# Adapt the command line
# 'interface ethernet comment <INTERFACE> "<COMMENT>"',
# Replace <INTERFACE> with the interface name
cmd = self.cmd_set_interface[2].replace("<INTERFACE>", interface)
# Replace <COMMENT> with the description
cmd = cmd.replace("<COMMENT>", description)
# Display info message
log.info(f"set_interface: description: cmd: {cmd}")
# Change the description of the interface
await self.send_command(cmd)
# "maximum_frame_size" found?
if maximum_frame_size != None:
# Yes
# So the Maximum Frame Size can be changed
# Display info message
log.info("set_interface: maximum_frame_size")
# Adapt the command line
# "interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>",
# Replace <INTERFACE> with the interface name
cmd = self.cmd_set_interface[3].replace("<INTERFACE>", interface)
# Replace <MAXIMUMFRAMESIZE> with the size of the frame
cmd = cmd.replace("<MAXIMUMFRAMESIZE>", str(maximum_frame_size))
# Display info message
log.info(f"set_interface: maximum_frame_size: cmd: {cmd}")
# Change the Maximum Frame Size of the interface
output = await self.send_command(cmd)
# Check if there is an error
# "value of l2mtu out of range (0..65536)"
if "out of range" in output:
# Error with the Maximum Frame Size value
# Display info message
log.error(f"set_interface: maximum_frame_size: output: {output}")
# Return an error
return return_status
# "mode" found?
if mode != None:
# Yes
# So the mode (access, trunk, hybrid) of the interface can be changed
# Note that it affects an interface inside a bridge
# Display info message
log.info("set_interface: mode")
# Adapt the command line
# "interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]",
# Replace <INTERFACE> with the interface name
cmd = self.cmd_set_interface[4].replace("<INTERFACE>", interface)
# Replace <FILTERINGVLAN> with "yes" (hybrid, trunk) or "no" (access)
cmd = cmd.replace("<FILTERINGVLAN>", "no" if mode == "access" else "yes")
# By default port is in trunk mode
interface_mode = "admit-only-vlan-tagged"
# Access?
if mode == "access":
# Yes
interface_mode = "admit-only-untagged-and-priority-tagged"
# Hybrid?
elif mode == "hybrid":
# Yes
interface_mode = "admit-all"
# Replace <MODE> with:
# "admit-all": for "hybrid"
# "admit-only-untagged-and-priority-tagged": for "access"
# "admit-only-vlan-tagged": for "trunk"
cmd = cmd.replace("<MODE>", interface_mode)
# Display info message
log.info(f"set_interface: mode: cmd: {cmd}")
# Change the mode of the interface
await self.send_command(cmd)
# No error
return_status = True
# Return status
return return_status
async def add_interface_to_vlan(
self,
interface=None,
mode=None,
vlan=None,
**kwargs,
):
"""
Asyn method used to add an interface to a VLAN of the device
:param interface: the name of the interface
:type interface: str
:param mode: mode of the interface (access, trunk, hybrid)
:type mode: str
:param vlan: VLAN number
:type vlan: int
:param kwargs: not used
:type kwargs: dict
:return: Status. True = no error, False = error
:rtype: bool
"""
# Display info message
log.info("add_interface_to_vlan")
# By default result status is having an error
return_status = False
# Display info message
log.info(f"add_interface_to_vlan: input: interface: {interface}")
log.info(f"add_interface_to_vlan: input: mode: {mode}")
log.info(f"add_interface_to_vlan: input: vlan: {vlan}")
# Get parameters
# "interface" found?
if interface == None:
# No
# So no action can be performed
# Display info message
log.info("add_interface_to_vlan: no interface specified")
# Return status
return return_status
# "mode" found?
if mode == None:
# No
# So no action can be performed
# Display info message
log.info("add_interface_to_vlan: no mode specified")
# Return status
return return_status
# "vlan" found?
if vlan == None:
# No
# So no action can be performed
# Display info message
log.info("add_interface_to_vlan: no vlan specified")
# Return status
return return_status
# Convert VLAN (integer) to string
vlan_string = str(vlan)
# Get all VLAN IDs
# Get command
cmd = self.cmd_add_interface_to_vlan[0]
# Display info message
log.info(f"add_interface_to_vlan: get VLAN IDs: cmd: {cmd}")
# Change the VLAN of the interface (in VLAN config of a bridge)
output = await self.send_command(cmd)
# Display info message
log.info(f"add_interface_to_vlan: get VLAN IDs: output: {output}")
# Convert a string into a list of strings
lines = output.splitlines()
# By default no VLAN found
vlan_found = False
# Check each line
for line in lines:
# VLAN IDs in the line?
if "vlan-ids=" in line:
# Yes
# Get VLAN IDs
list_of_vlans_in_one_line = (
line.split(" vlan-ids=")[-1].split()[0].split(",")
)
# Something returned?
if list_of_vlans_in_one_line:
# Yes
# Is the first element empty?
if list_of_vlans_in_one_line[0] != '""':
# No it is not empty
# Check if the current VLAN is the one we look for
if vlan_string in list_of_vlans_in_one_line:
# That is the VLAN
# Display info message
log.info(
f"add_interface_to_vlan: get VLAN IDs: VLAN found: {vlan}"
)
# Get tagged list of interfaces
tagged_list_of_interfaces = (
line.split(" tagged=")[-1].split()[0].split(",")
)
# Get untagged list of interfaces
untagged_list_of_interfaces = (
line.split(" untagged=")[-1].split()[0].split(",")
)
# VLAN found
vlan_found = True
# Leave the loop
break
# VLAN found?
if not vlan_found:
# No VLAN found
# So it is impossible to add interface to a non-existing VLAN
# Display info message
log.info("add_interface_to_vlan: get VLAN IDs: no VLAN found")
return False
# Display info message
log.info(
f"add_interface_to_vlan: get VLAN IDs: tagged_list_of_interfaces: {tagged_list_of_interfaces}"
)
# Display info message
log.info(
f"add_interface_to_vlan: get VLAN IDs: untagged_list_of_interfaces: {untagged_list_of_interfaces}"
)
# Check if tagged and untagged list have a value ['""']
# Check if tagged_list_of_interfaces has just one element
if len(tagged_list_of_interfaces) == 1:
# Yes just one
# Check if that element is ""
if tagged_list_of_interfaces[0] == '""':
# Yes it is
# So the value is removed
tagged_list_of_interfaces = []
# Check if untagged_list_of_interfaces has just one element
if len(untagged_list_of_interfaces) == 1:
# Yes just one
# Check if that element is ""
if untagged_list_of_interfaces[0] == '""':
# Yes it is
# So the value is removed
untagged_list_of_interfaces = []
# Display info message
log.info(
f'add_interface_to_vlan: get VLAN IDs: after removing "": tagged_list_of_interfaces: {tagged_list_of_interfaces}'
)
# Display info message
log.info(
f'add_interface_to_vlan: get VLAN IDs: after removing "": untagged_list_of_interfaces: {untagged_list_of_interfaces}'
)
# Check if mode is "access"
if mode == "access":
# Access mode interface
# Add the interface to the list of all the untagged interfaces
untagged_list_of_interfaces.append(interface)
# String with all interfaces seperated with comma
all_untagged_list_of_interfaces = ",".join(untagged_list_of_interfaces)
# "interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>",
# Replace <INTERFACE> with the interface name
cmd = self.cmd_add_interface_to_vlan[1].replace(
"<INTERFACE>", all_untagged_list_of_interfaces
)
# Replace <VLAN> with the VLAN value
cmd = cmd.replace("<VLAN>", vlan_string)
# Display info message
log.info(f"add_interface_to_vlan: mode access: vlan: cmd: {cmd}")
# Change the VLAN of the interface (in VLAN config of a bridge)
output = await self.send_command(cmd)
# Check if there is an error
# "failure: interface cannot be in tagged and untagged at the same time"
# "failure: each interface can appear only once"
if "failure" in output:
# Error with the VLAN value
# Display info message
log.error(f"add_interface_to_vlan: mode access: vlan: output: {output}")
# Return an error
return return_status
# "interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>",
# Replace <INTERFACE> with the interface name
cmd = self.cmd_add_interface_to_vlan[3].replace("<INTERFACE>", interface)
# Replace <VLAN> with the VLAN value
cmd = cmd.replace("<VLAN>", vlan_string)
# Display info message
log.info(f"add_interface_to_vlan: mode access: port: cmd: {cmd}")
# Change the VLAN of the interface (in Port config of a bridge)
output = await self.send_command(cmd)
# Check if there is an error
# "value of pvid out of range (1..4094)"
if "out of range" in output:
# Error with the VLAN value
# Display info message
log.error(f"add_interface_to_vlan: mode access: port: output: {output}")
# Return an error
return return_status
else:
# trunk or hybrid mode
# Add the interface to the list of all the tagged interfaces
tagged_list_of_interfaces.append(interface)
# String with all interfaces seperated with comma
all_tagged_list_of_interfaces = ",".join(tagged_list_of_interfaces)
# "interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>",
# Replace <INTERFACE> with the interface name
cmd = self.cmd_add_interface_to_vlan[2].replace(
"<INTERFACE>", all_tagged_list_of_interfaces
)
# Replace <VLAN> with the VLAN value
cmd = cmd.replace("<VLAN>", vlan_string)
# Display info message
log.info(f"add_interface_to_vlan: mode trunk or hybrid: cmd: {cmd}")
# Change the description of the interface
output = await self.send_command(cmd)
# Check if there is an error
# "failure: interface cannot be in tagged and untagged at the same time"
# "failure: can not change dynamic"
# "failure: each interface can appear only once"
if "failure" in output:
# Error with the VLAN value
# Display info message
log.error(
f"add_interface_to_vlan: mode trunk/hybrid: port: output: {output}"
)
# Return an error
return return_status
# No error
return_status = True
# Return status
return return_status
async def remove_interface_from_vlan(
self,
interface=None,
mode=None,
vlan=None,
**kwargs,
):
"""
Asyn method used to remove an interface from a VLAN of the device
:param interface: the name of the interface
:type interface: str
:param mode: mode of the interface (access, trunk, hybrid)
:type mode: str
:param vlan: VLAN number
:type vlan: int
:param kwargs: not used
:type kwargs: dict
:return: Status. True = no error, False = error
:rtype: bool
"""
# Display info message
log.info("remove_interface_from_vlan")
# By default result status is having an error
return_status = False
# Display info message
log.info(f"remove_interface_from_vlan: input: interface: {interface}")
log.info(f"remove_interface_from_vlan: input: mode: {mode}")
log.info(f"remove_interface_from_vlan: input: vlan: {vlan}")
# Get parameters
# "interface" found?
if interface == None:
# No
# So no action can be performed
# Display info message
log.info("remove_interface_from_vlan: no interface specified")
# Return status
return return_status
# "mode" found?
if mode == None:
# No
# So no action can be performed
# Display info message
log.info("remove_interface_from_vlan: no mode specified")
# Return status
return return_status
# "vlan" found?
if vlan == None:
# No
# So no action can be performed
# Display info message
log.info("remove_interface_from_vlan: no vlan specified")
# Return status
return return_status
# Convert VLAN (integer) to string
vlan_string = str(vlan)
# Get command
cmd = self.cmd_remove_interface_from_vlan[0]
# Display info message
log.info(f"remove_interface_from_vlan: get VLAN IDs: cmd: {cmd}")
# Change the VLAN of the interface (in VLAN config of a bridge)
output = await self.send_command(cmd)
# Display info message
log.info(f"remove_interface_from_vlan: get VLAN IDs: output: {output}")
# Convert a string into a list of strings
lines = output.splitlines()
# By default no VLAN found
vlan_found = False
# Check each line
for line in lines:
# VLAN IDs in the line?
if "vlan-ids=" in line:
# Yes
# Get VLAN IDs
list_of_vlans_in_one_line = (
line.split(" vlan-ids=")[-1].split()[0].split(",")
)
# Something returned?
if list_of_vlans_in_one_line:
# Yes
# Is the first element empty?
if list_of_vlans_in_one_line[0] != '""':
# No it is not empty
# Check if the current VLAN is the one we look for
if vlan_string in list_of_vlans_in_one_line:
# That is the VLAN
# Display info message
log.info(
f"remove_interface_from_vlan: get VLAN IDs: VLAN found: {vlan}"
)
# Get tagged list of interfaces
tagged_list_of_interfaces = (
line.split(" tagged=")[-1].split()[0].split(",")
)
# Get untagged list of interfaces
untagged_list_of_interfaces = (
line.split(" untagged=")[-1].split()[0].split(",")
)
# VLAN found
vlan_found = True
# Leave the loop
break
# VLAN found?
if not vlan_found:
# No VLAN found
# So it is impossible to remove interface from a non-existing VLAN
# Display info message
log.info("remove_interface_from_vlan: get VLAN IDs: no VLAN found")
return False
# Display info message
log.info(
f"remove_interface_from_vlan: get VLAN IDs: tagged_list_of_interfaces: {tagged_list_of_interfaces}"
)
# Display info message
log.info(
f"remove_interface_from_vlan: get VLAN IDs: untagged_list_of_interfaces: {untagged_list_of_interfaces}"
)
# Check if tagged and untagged list have a value ['""']
# Check if tagged_list_of_interfaces has just one element
if len(tagged_list_of_interfaces) == 1:
# Yes just one
# Check if that element is ""
if tagged_list_of_interfaces[0] == '""':
# Yes it is
# So the value is removed
tagged_list_of_interfaces = []
# Check if untagged_list_of_interfaces has just one element
if len(untagged_list_of_interfaces) == 1:
# Yes just one
# Check if that element is ""
if untagged_list_of_interfaces[0] == '""':
# Yes it is
# So the value is removed
untagged_list_of_interfaces = []
# Display info message
log.info(
f'remove_interface_from_vlan: get VLAN IDs: after removing "": tagged_list_of_interfaces: {tagged_list_of_interfaces}'
)
# Display info message
log.info(
f'remove_interface_from_vlan: get VLAN IDs: after removing "": untagged_list_of_interfaces: {untagged_list_of_interfaces}'
)
# Check if mode is "access"
if mode == "access":
# Access mode interface
# Check if the interface is in the list of tagged interfaces
if interface not in untagged_list_of_interfaces:
# The interface is not in the list of interfaces of the VLAN
# Display info message
log.error(
f"remove_interface_from_vlan: access: interface '{interface}' does not belong to VLAN {vlan_string}"
)
# Return an error
return return_status
# Remove the interface to the list of all the untagged interfaces
untagged_list_of_interfaces.remove(interface)
# String with all interfaces seperated with comma
all_untagged_list_of_interfaces = ",".join(untagged_list_of_interfaces)
# Empty string?
if all_untagged_list_of_interfaces == "":
# Yes
# Give an empty string (Mikrotik format)
all_untagged_list_of_interfaces = '""'
# "interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>",
# Replace <INTERFACE> with the interface name
cmd = self.cmd_remove_interface_from_vlan[1].replace(
"<INTERFACE>", all_untagged_list_of_interfaces
)
# Replace <VLAN> with the VLAN value
cmd = cmd.replace("<VLAN>", vlan_string)
# Display info message
log.info(f"remove_interface_from_vlan: mode access: vlan: cmd: {cmd}")
# Change the VLAN of the interface (in VLAN config of a bridge)
output = await self.send_command(cmd)
# Check if there is an error
# "failure: interface cannot be in tagged and untagged at the same time"
# "failure: each interface can appear only once"
if "failure" in output:
# Error with the VLAN value
# Display info message
log.error(
f"remove_interface_from_vlan: mode access: vlan: output: {output}"
)
# Return an error
return return_status
# "interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>",
# Replace <INTERFACE> with the interface name
cmd = self.cmd_remove_interface_from_vlan[3].replace(
"<INTERFACE>", interface
)
# Replace <VLAN> with the default VLAN value
cmd = cmd.replace("<VLAN>", "1")
# Display info message
log.info(f"remove_interface_from_vlan: mode access: port: cmd: {cmd}")
# Change the VLAN of the interface (in Port config of a bridge)
output = await self.send_command(cmd)
# Check if there is an error
# "value of pvid out of range (1..4094)"
if "out of range" in output:
# Error with the VLAN value
# Display info message
log.error(
f"cmd_remove_interface_from_vlan: mode access: port: output: {output}"
)
# Return an error
return return_status
else:
# trunk or hybrid mode
# Check if the interface is in the list of tagged interfaces
if interface not in tagged_list_of_interfaces:
# The interface is not in the list of interfaces of the VLAN
# Display info message
log.error(
f"remove_interface_from_vlan: trunk/hybrid: interface '{interface}' does not belong to VLAN {vlan_string}"
)
# Return an error
return return_status
# Remove the interface from the list of all the tagged interfaces
tagged_list_of_interfaces.remove(interface)
# String with all interfaces seperated with comma
all_tagged_list_of_interfaces = ",".join(tagged_list_of_interfaces)
# Empty string?
if all_tagged_list_of_interfaces == "":
# Yes
# Give an empty string (Mikrotik format)
all_tagged_list_of_interfaces = '""'
# "interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>",
# Replace <INTERFACE> with the interface name
cmd = self.cmd_remove_interface_from_vlan[2].replace(
"<INTERFACE>", all_tagged_list_of_interfaces
)
# Replace <VLAN> with the VLAN value
cmd = cmd.replace("<VLAN>", vlan_string)
# Display info message
log.info(f"remove_interface_from_vlan: mode trunk or hybrid: cmd: {cmd}")
# Change the description of the interface
output = await self.send_command(cmd)
# Check if there is an error
# "failure: interface cannot be in tagged and untagged at the same time"
# "failure: can not change dynamic"
# "failure: each interface can appear only once"
if "failure" in output:
# Error with the VLAN value
# Display info message
log.error(
f"remove_interface_from_vlan: mode trunk/hybrid: port: output: {output}"
)
# Return an error
return return_status
# No error
return_status = True
# Return status
return return_status
async def get_interfaces_ip(self):
"""
Asyn method used to get IP addresses of the interfaces of the device
Only IPv4 is supported
:return: the interfaces and their IP addresses
:rtype: dict of dict
"""
# Display info message
log.info("get_interfaces_ip")
# Get command
cmd = self.cmd_get_interfaces_ip
# Sending command
output = await self.send_command(cmd)
# Display info message
log.info(f"get_interfaces_ip: output: '{output}'")
# By default the dictionary returned is empty
returned_dict = {}
# Convert a string into a list of strings
lines = output.splitlines()
# Read each line
for line in lines:
# Set default values for variables
interface = None
address = None
prefix = None
# Get interface
if " interface=" in line:
interface = line.split(" interface=")[-1].split()[0]
# Get IP address and prefix
if " address=" in line:
full_address = line.split(" address=")[-1].split()[0]
# Separate IP address from prefix
(address, prefix_string) = full_address.split("/")
# Convert prefix into a number
prefix = int(prefix_string)
# An interface found?
if interface:
# Yes
# So the information can be saved into the returned dictionary
# Is it a new interface?
if interface in returned_dict:
# No
# Save another IP address for the same interface
returned_dict[interface]["ipv4"][address] = {"prefix_length": 24}
else:
# Yes
# So the new interface is saved into the dictionary
returned_dict[interface] = {
"ipv4": {address: {"prefix_length": prefix}}
}
# Return data
return returned_dict
async def add_static_route(
self,
network_ip=None,
prefix_length=None,
destination_ip=None,
metric=1,
**kwargs,
):
"""
Asyn method used to add a static route to the routing table the device
Only IPv4 is supported
EXPERIMENTAL (not tested)
:param network_ip: the network to add to the route
:type network_ip: str
:param prefix_length: length of the network mask (32, 31, 30 ... for /32, /31, /30 ...)
:type prefix_length: int
:param destination_ip: IP address as a destination
:type destination_ip: str
:param metric: optional, the metric to specify to the route. Default value is 1
:type metric: int
:param kwargs: not used
:type kwargs: dict
:return: Status. True = no error, False = error
:rtype: bool
"""
# Display info message
log.info("add_static_route")
# By default result status is having an error
return_status = False
# Check if a network has been specified
if not network_ip:
# No
# Display info message
log.error(f"add_static_route: no network specified: {network_ip}")
# Return an error
return return_status
# Check if a prefix_length has been specified
if not prefix_length:
# No
# Display info message
log.error(f"add_static_route: no prefix_length specified: {prefix_length}")
# Return an error
return return_status
# Check if the prefix_length is between 1 and 32
if prefix_length < 1 or prefix_length > 32:
# No
# Display info message
log.error(
f"add_static_route: prefix_length incorrect value (1...32): {prefix_length}"
)
# Return an error
return return_status
# Check if a destination_ip has been specified
if not destination_ip:
# No
# Display info message
log.error(
f"add_static_route: no destination_ip specified: {destination_ip}"
)
# Return an error
return return_status
# Check if a metric has been specified
if not metric:
# No
# Display info message
log.error(f"add_static_route: no metric specified: {metric}")
# Return an error
return return_status
# self.cmd_add_static_route = "ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>"
# Replace <NETWORK> with the network value
cmd = self.cmd_add_static_route.replace("<NETWORK>", network_ip)
# Replace <PREFIXLENGTH> with the prefix_length value
cmd = cmd.replace("<PREFIXLENGTH>", str(prefix_length))
# Replace <DESTINATION> with the destination value
cmd = cmd.replace("<DESTINATION>", destination_ip)
# Replace <METRIC> with the metric value
cmd = cmd.replace("<METRIC>", str(metric))
# Display info message
log.info(f"add_static_route: cmd: {cmd}")
# Sending command
await self.send_command(cmd)
# No error
return_status = True
# Return status
return return_status
async def remove_static_route(
self,
network_ip=None,
prefix_length=None,
destination_ip=None,
**kwargs,
):
"""
Asyn method used to remove a static route to the routing table the device
Only IPv4 is supported
EXPERIMENTAL (not tested)
:param network_ip: the network to remove to the route
:type network_ip: str
:param prefix_length: length of the network mask (32, 31, 30 ... for /32, /31, /30 ...)
:type prefix_length: int
:param destination_ip: not used
:type destination_ip: str
:param kwargs: not used
:type kwargs: dict
:return: Status. True = no error, False = error
:rtype: bool
"""
# Display info message
log.info("remove_static_route")
# By default result status is having an error
return_status = False
# Check if a network has been specified
if not network_ip:
# No
# Display info message
log.error(f"remove_static_route: no network specified: {network_ip}")
# Return an error
return return_status
# Check if a prefix_length has been specified
if not prefix_length:
# No
# Display info message
log.error(
f"remove_static_route: no prefix_length specified: {prefix_length}"
)
# Return an error
return return_status
# Check if the prefix_length is between 1 and 32
if prefix_length < 1 or prefix_length > 32:
# No
# Display info message
log.error(
f"remove_static_route: prefix_length incorrect value (1...32): {prefix_length}"
)
# Return an error
return return_status
# self.cmd_remove_static_route = "ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]"
# Replace <NETWORK> with the network value
cmd = self.cmd_remove_static_route.replace("<NETWORK>", network_ip)
# Replace <PREFIXLENGTH> with the prefix_length value
cmd = cmd.replace("<PREFIXLENGTH>", str(prefix_length))
# Display info message
log.info(f"remove_static_route: cmd: {cmd}")
# Sending command
await self.send_command(cmd)
# No error
return_status = True
# Return status
return return_status
| 31.572003
| 708
| 0.532033
|
cd3e8ded2d7a9f19b9301172ece1f38fba4de2ca
| 22,060
|
py
|
Python
|
source/segment/nnmf_segment.py
|
lucori/NN-MitralSeg
|
d20a7348f309db5c86d3f53c3e13c3cfa98a64a6
|
[
"MIT"
] | 2
|
2021-09-23T06:35:57.000Z
|
2021-12-07T18:04:28.000Z
|
source/segment/nnmf_segment.py
|
lucori/NN-MitralSeg
|
d20a7348f309db5c86d3f53c3e13c3cfa98a64a6
|
[
"MIT"
] | null | null | null |
source/segment/nnmf_segment.py
|
lucori/NN-MitralSeg
|
d20a7348f309db5c86d3f53c3e13c3cfa98a64a6
|
[
"MIT"
] | 1
|
2021-05-06T09:07:45.000Z
|
2021-05-06T09:07:45.000Z
|
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import os
from segment.nnmf import NNMF
from evaluation import get_scores
from .pytorch_utils import load_dataset, EarlyStopping
from .segment_class import MitralSeg
from sklearn.decomposition import NMF
from utils import window_detection
import numpy as np
from utils import animate, colorize, refactor, softplus
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('agg')
from torch.utils.tensorboard import SummaryWriter
import time
dir_path = os.path.dirname(os.path.realpath(__file__))
class SegNNMF(MitralSeg):
def __init__(self, l1_mult, l21_mult, embedding_mult, epochs, n_steps, learning_rate, mlp_size, gmf_size, batchsize,
num_workers, device, embedding_nmf_init, gmf_net_init, mlp_layers, threshold_layers, window_size,
save_data_every, save_tensorboard_summary_every, search_window_size, opt_flow_window_size,
train_test_split, patience, min_delta, early_stopping, connected_struct, morph_op, option,
threshold_mv, threshold_wd, spat_temp_mult):
super(SegNNMF, self).__init__()
self.mlp_size = mlp_size
self.gmf_size = gmf_size
self.l1_mult = l1_mult
self.l21_mult = l21_mult
self.embedding_mult = embedding_mult
self.spat_temp_mult = spat_temp_mult
self.epochs = epochs
self.n_steps = n_steps
self.lr = learning_rate
self.batchsize = batchsize
self.num_workers = num_workers
self.device = device
self.embedding_nmf_init = embedding_nmf_init
self.gmf_net_init = gmf_net_init
self.mlp_layers = mlp_layers
self.threshold_layers = threshold_layers
self.option = option
self.nnmf = NNMF(self.gmf_size, self.mlp_size, self.mlp_layers, self.threshold_layers)
self.window_size = window_size
self.search_window_size = search_window_size
self.opt_flow_window_size = float(opt_flow_window_size)
self.connected_struct = connected_struct
self.morph_op = morph_op
self.train_test_split = train_test_split
self.patience = patience
self.min_delta = min_delta
self.early_stopping = early_stopping
self.save_data_every = save_data_every
self.save_tensorboard_summary_every = save_tensorboard_summary_every
self.x_hat = None
self.s = None
self.s_reshape = None
self.train_loader = None
self.val_loader = None
self.embedding_params = None
self.embedding_opt = None
self.neu_mf_opt = None
self.threshold_opt = None
self.threshold_mv = threshold_mv
self.threshold_wd = threshold_wd
def l1_loss(self, s_out):
loss = torch.mean(torch.abs(s_out))
return loss
def l21_loss(self, s):
return torch.mean(torch.norm(s, dim=1) / np.sqrt(s.shape[1]))
def set_x(self, matrix3d):
super(SegNNMF, self).set_x(matrix3d)
self.x_hat = torch.empty_like(torch.from_numpy(self.matrix2d), dtype=torch.float32)
self.s = torch.empty_like(torch.from_numpy(self.matrix2d), dtype=torch.float32)
embedding_nmf_init = self.initialize_embedding_gmf()
self.nnmf.set_matrix(self.matrix2d, embedding_nmf_init)
# create data loader
print('loading dataset')
if self.n_steps:
self.epochs = int(self.n_steps / (matrix3d.size / self.batchsize))
(self.train_loader, self.val_loader) = load_dataset(self.matrix2d, batch_size=self.batchsize,
num_workers=self.num_workers,
train_test_split=self.train_test_split)
# optimizers for mlp and latent features
self.embedding_params = self.nnmf.embedding_parameters()
self.embedding_opt = optim.Adam(self.embedding_params, lr=self.lr)
self.neu_mf_opt = optim.Adam(list(self.nnmf.mlp.parameters()) + list(self.nnmf.neu_mf.parameters()), lr=self.lr)
self.threshold_opt = optim.Adam(self.nnmf.threshold_mlp.parameters(), lr=self.lr)
def initialize_embedding_gmf(self):
if self.embedding_nmf_init:
model = NMF(n_components=self.gmf_size, init='random', random_state=0, max_iter=200, tol=0.0001)
w = model.fit_transform(self.matrix2d)
h = model.components_.transpose()
nmf_par = (w, h)
else:
nmf_par = None
return nmf_par
def train_threshold(self, mse_xs_loss, l1_loss, l21_loss):
self.threshold_opt.zero_grad()
loss_threshold = mse_xs_loss + self.l1_mult * l1_loss + self.l21_mult * l21_loss
loss_threshold.backward(retain_graph=True)
self.threshold_opt.step()
def train_neu_mf(self, mse_x_loss):
self.neu_mf_opt.zero_grad()
loss_neu_mf = mse_x_loss
loss_neu_mf.backward(retain_graph=True)
self.neu_mf_opt.step()
def train_embedding(self, mse_x_loss, embedding_reg, spatial_ref, temporal_reg, valve=None):
self.embedding_opt.zero_grad()
embedding_loss = mse_x_loss + self.embedding_mult * embedding_reg + \
self.spat_temp_mult * (spatial_ref + temporal_reg)
embedding_loss.backward(retain_graph=True)
if valve:
valve_frames = [int(list(v.keys())[0])-1 for v in valve]
mid = int((valve_frames[-1]+valve_frames[-2])/2)
for par in self.embedding_params:
if par.shape[0] == self.m:
par.grad[mid:, ...] = 0
self.embedding_opt.step()
def train(self, save_location=None):
# initialize epochs
self.nnmf.init_params(gmf_net_init=self.gmf_net_init)
self.nnmf.to(self.device)
self.x_hat = self.x_hat.to(self.device)
self.s = self.s.to(self.device)
train_writer = SummaryWriter(log_dir=save_location + '/train')
if self.val_loader:
val_writer = SummaryWriter(log_dir=save_location + '/val')
print('beginning training')
ep = 0
global_step = 0
self.save_tensorboard_summary(train_writer, initialization=True)
eval_dict = {}
if self.early_stopping:
self.early_stopping = EarlyStopping(patience=self.patience, mode='min', percentage=True,
min_delta=self.min_delta)
training_time = 0
while ep < self.epochs:
print("Epoch {} of {}".format(ep, self.epochs - 1))
start_time_epoch = time.time()
time_detach = 0
self.nnmf.train()
if self.early_stopping:
cum_mse_xs_loss = 0
cum_mse_x_loss = 0
cum_l1_loss = 0
cum_embedding_reg = 0
for batch_id, batch in enumerate(self.train_loader, 0):
pixel = Variable(batch[0])
frame = Variable(batch[1])
target = Variable(batch[2])
# send x_hat and s to gpu
pixel = pixel.to(self.device)
frame = frame.to(self.device)
target = target.to(self.device).float()
target = torch.reshape(target, shape=(target.shape[0], 1))
self.batchsize_eff = pixel.shape[0]
# forward pass
x_out, s_out = self.nnmf.forward(pixel, frame, target)
self.s[pixel, frame] = torch.squeeze(s_out)
# compute losses
mse_xs_loss = nn.functional.mse_loss(target, x_out + s_out)
mse_x_loss = nn.functional.mse_loss(target, x_out)
l1_loss = self.l1_loss(s_out)
l21_loss = 0 # self.l21_loss(self.s)
embedding_reg = self.nnmf.embedding_regularization(pixel, frame)
spatial_reg = self.nnmf.spatial_regularization(self.device)
temporal_reg = self.nnmf.temporal_regularization(self.device)
# backward and step
self.train_neu_mf(mse_x_loss)
self.train_embedding(mse_x_loss, embedding_reg, spatial_reg, temporal_reg)
self.train_threshold(mse_xs_loss, l1_loss, l21_loss)
# update x_hat and s
start_time = time.time()
self.x_hat[pixel, frame] = torch.squeeze(x_out.detach())
self.s = self.s.detach()
time_detach += time.time() - start_time
training_time += time.time() - start_time_epoch
time_detach = time_detach + time.time() - start_time
if global_step % (self.epochs*5) == 0:
data_dict = {'mse_x': mse_x_loss,
'mse_xs': mse_xs_loss,
'embedding_regularization': embedding_reg,
'l1_loss': l1_loss,
'l21_loss': l21_loss}
# Print training progress every 100 steps
print(data_dict)
self.save_scalar_summary(data_dict, train_writer, global_step)
self.s_reshape = np.reshape(self.s.cpu().numpy(), newshape=(self.vert, self.horz, self.m))
self.myocardium = np.reshape(self.x_hat.cpu().numpy(), newshape=(self.vert, self.horz, self.m))
if self.early_stopping:
cum_mse_xs_loss += mse_xs_loss.detach() / len(self.train_loader)
cum_mse_x_loss += mse_x_loss.detach() / len(self.train_loader)
cum_l1_loss += l1_loss.detach() / len(self.train_loader)
cum_embedding_reg += embedding_reg.detach() / len(self.train_loader)
# batches
global_step = global_step + 1
if ep == 0 or (ep % self.save_data_every == 0 or ep == self.epochs - 1):
print('extracting tensors for segmentation')
start_time = time.time()
self.s_reshape = np.reshape(self.s.cpu().numpy(), newshape=(self.vert, self.horz, self.m))
self.myocardium = np.reshape(self.x_hat.cpu().numpy(), newshape=(self.vert, self.horz, self.m))
print('finish extracting in ', time.time() - start_time, "seconds")
print("window detection...")
start_time = time.time()
# detect window
win, _, _ = window_detection(tensor=self.s_reshape, option=self.option,
time_series=self.nnmf.gmf_v.weight.detach().cpu().numpy(),
window_size=self.window_size,
search_window_size=self.search_window_size,
opt_flow_window_size=self.opt_flow_window_size,
threshold=self.threshold_wd,
stride=2)
self.mask = win[0]
print('finish window detection in ', time.time() - start_time, "seconds")
start_time = time.time()
self.valve = self.get_valve(self.s_reshape, self.mask, threshold=self.threshold_mv)
print('finish valve segmentation in ', time.time() - start_time, "seconds")
# saving segmentation and predicted window as well the embedding and sparse matrix
data_dict_save = self.create_dict(ep)
if ep != 0:
self.save_data(data_dict_save, save_location=save_location)
# get evaluation scores
start_time = time.time()
if self.valve_gt is not None and self.mask_gt is not None:
eval_dict = get_scores(self.mask, self.valve, self.mask_gt, self.valve_gt)
self.save_scalar_summary(eval_dict, train_writer, global_step)
print('finish scalar eval summary in ', time.time() - start_time, "seconds")
if self.early_stopping:
if self.early_stopping.step(cum_mse_x_loss):
self.save_tensorboard_summary(train_writer, initialization=False, global_step=global_step)
break
start_time = time.time()
if ep % self.save_tensorboard_summary_every == 0 or ep == self.epochs - 1:
self.save_tensorboard_summary(train_writer, initialization=False, global_step=global_step)
pass
print('finish image summary in ', time.time() - start_time, "seconds")
if self.val_loader:
with torch.no_grad():
self.nnmf.eval()
mse_xs_loss = 0
mse_x_loss = 0
l1_loss = 0
embedding_reg = 0
spatial_reg = 0
temporal_reg = 0
for batch_id, batch in enumerate(self.val_loader, 0):
pixel, frame, target = Variable(batch[0]), Variable(batch[1]), Variable(batch[2])
# send x_hat and s to gpu
pixel = pixel.to(self.device)
frame = frame.to(self.device)
target = target.to(self.device).float()
target = torch.reshape(target, shape=(target.shape[0], 1))
self.batchsize_eff = pixel.shape[0]
# forward pass
x_out, s_out = self.nnmf.forward(pixel, frame, target)
# compute losses
mse_xs_loss += nn.functional.mse_loss(target, x_out + s_out)
mse_x_loss += nn.functional.mse_loss(target, x_out)
l1_loss += self.l1_loss(s_out)
embedding_reg += self.nnmf.embedding_regularization(pixel,
frame) * self.embedding_mult / self.batchsize_eff
spatial_reg += self.nnmf.spatial_regularization(self.device)
temporal_reg += self.nnmf.temporal_regularization(self.device)
mse_xs_loss = mse_xs_loss / len(self.val_loader)
mse_x_loss = mse_x_loss / len(self.val_loader)
l1_loss = l1_loss / len(self.val_loader)
embedding_reg = embedding_reg / len(self.val_loader)
spatial_reg = spatial_reg / len(self.val_loader)
temporal_reg = temporal_reg / len(self.val_loader)
data_dict = {'mse_x': mse_x_loss,
'mse_xs': mse_xs_loss,
'embedding_regularization': embedding_reg,
'l1_loss': l1_loss,
'spatial_reg': spatial_reg,
'temporal_reg': temporal_reg}
self.save_scalar_summary(data_dict, val_writer, global_step)
ep = ep + 1
eval_dict.update({'time': training_time})
return eval_dict
def save_scalar_summary(self, dict, writer, global_step):
for name, loss in dict.items():
writer.add_scalar(name, loss, global_step=global_step)
def save_plots(self, data_dict, ep):
animate(self.matrix3d, data_dict['valve.npy'], self.dir + 'valve' + str(ep) + '.mp4')
animate(self.matrix3d, data_dict['myocardium.npy'], self.dir + 'myocardium' + str(ep) + '.mp4')
animate(self.matrix3d, data_dict['s.npy'], self.dir + 's' + str(ep) + '.mp4')
fig, ax = plt.subplots(figsize=(8, 8), nrows=2, ncols=2)
U = data_dict['U'].reshape((self.vert, self.horz, self.d))
ax[0, 0].imshow(U[:, :, 0], cmap='binary')
ax[0, 1].imshow(U[:, :, 1], cmap='binary')
plt.savefig(self.dir + 'U' + str(ep) + '.jpg')
plt.plot(data_dict['V'])
plt.savefig(self.dir + 'V' + str(ep) + '.jpg')
def save_tensorboard_embeddings(self, u, v, embedding_dim, name_u, name_v, writer, global_step, matrix_bin):
u = softplus(u.weight.detach().cpu().numpy().reshape((self.vert, self.horz, embedding_dim)))
u = np.expand_dims(np.stack([u, u, u], axis=0), axis=0)
writer.add_images(name_u, refactor(u), global_step=global_step)
v = softplus(v.weight.detach().cpu().numpy())
fig = plt.figure()
plt.plot(v)
writer.add_figure(name_v, fig, global_step=global_step)
dot_product = np.dot(u, v.T)[0, 0, ...]
myocardium_dot_prod = self.get_video(dot_product, matrix_bin, cmap='rainbow')
writer.add_video(name_u + '_' + name_v + '_dotprod', myocardium_dot_prod, global_step=global_step)
def save_tensorboard_summary(self, writer, initialization=True, global_step=0):
matrix = np.transpose(self.matrix3d, axes=(2, 0, 1))
matrix_bin = colorize(matrix, cmap='binary')
for j, l in enumerate(self.nnmf.neu_mf.parameters()):
writer.add_histogram('weights_neu_mf' + str(j), l.data, global_step=global_step)
inp = np.expand_dims(np.linspace(-1, 1), axis=1)
out = self.nnmf.threshold_mlp.forward(torch.from_numpy(inp).to(self.device).float()).detach().cpu().numpy()
fig = plt.figure()
plt.plot(inp, out)
writer.add_figure('threshold_fun', fig, global_step=global_step)
if self.mlp_size > 0:
#for j, l in enumerate(self.nnmf.mlp.parameters()):
# writer.add_histogram('weights_' + str(j), l.data, global_step=global_step)
self.save_tensorboard_embeddings(self.nnmf.mlp_u, self.nnmf.mlp_v, self.mlp_size, 'mlp_u', 'mlp_v', writer,
global_step, matrix_bin)
if self.gmf_size > 0:
self.save_tensorboard_embeddings(self.nnmf.gmf_u, self.nnmf.gmf_v, self.gmf_size, 'gmf_u', 'gmf_v', writer,
global_step, matrix_bin)
# for first time
if not initialization:
myocardium = self.get_video(self.myocardium, matrix_bin, cmap='rainbow')
noise = self.get_video(self.s_reshape, matrix_bin, cmap='rainbow')
myocardium = self.get_video(self.myocardium, matrix_bin, cmap='rainbow')
sparse = self.get_video(self.s_reshape, matrix_bin, cmap='rainbow')
writer.add_video('myocardium', myocardium, global_step=global_step)
writer.add_video('sparse', sparse, global_step=global_step)
valve = self.get_video(self.valve, matrix_bin, cmap='rainbow')
writer.add_video('valve', valve, global_step=global_step)
# predicted and ground truth valves
if self.valve_gt is not None:
fig, axs = plt.subplots(1, 3)
fig.set_size_inches(12, 4)
fig.suptitle('')
for i in range(len(self.valve_gt)):
axs[i].imshow(self.get_valve_image(i, initialization))
writer.add_figure('segmentation', fig, global_step=global_step)
# predicted and ground truth window
if self.mask_gt is not None:
fig = plt.figure()
frame = np.squeeze(self.matrix3d[..., 0])
if initialization:
mask = np.zeros(shape=self.mask_gt.shape)
else:
if len(self.mask.shape) == 3:
mask = np.squeeze(self.mask[..., 0])
else:
mask = self.mask
color_image = np.clip(np.dstack([0.75 * frame + mask, 0.75 * frame, 0.75 * frame + self.mask_gt]), a_min=0,
a_max=1)
plt.imshow(color_image)
writer.add_figure('window', fig, global_step=global_step)
def get_video(self, tensor, matrix_bin, cmap='binary'):
tensor = np.transpose(tensor, axes=(2, 0, 1))
tensor_col = colorize(tensor, cmap=cmap)
tensor = np.where(np.stack([tensor for _ in range(4)], axis=-1), tensor_col, matrix_bin)
tensor = np.transpose(tensor, axes=(0, 3, 1, 2))
tensor = np.expand_dims(tensor, axis=0)
return tensor
def create_dict(self, ep):
ep = str(ep)
get_name = lambda x: x + ep + '.npy'
data_dict = {get_name('valve'): self.valve,
get_name('myocardium'): self.myocardium,
get_name('mask'): self.mask,
get_name('s'): self.s_reshape}
if self.mlp_size != 0:
data_dict.update({get_name('mlp_u'): self.nnmf.mlp_u.weight.detach().cpu().numpy(),
get_name('mlp_v'): self.nnmf.mlp_v.weight.detach().cpu().numpy()})
if self.gmf_size != 0:
data_dict.update({get_name('gmf_u'): self.nnmf.gmf_u.weight.detach().cpu().numpy(),
get_name('gmf_v'): self.nnmf.gmf_v.weight.detach().cpu().numpy()})
inp = np.expand_dims(np.linspace(-1, 1), axis=1)
out = self.nnmf.threshold_mlp.forward(torch.from_numpy(inp).to(self.device).float()).detach().cpu().numpy()
data_dict.update({'threshold_fun' + ep + '.npy': out})
return data_dict
def get_valve_image(self, idx, initialization):
valve_idx = int(list(self.valve_gt[idx].keys())[0]) - 1
valve_values = list(self.valve_gt[idx].values())[0]
frame = np.squeeze(self.matrix3d[..., valve_idx])
if initialization:
valve_pred = np.zeros(shape=valve_values.shape)
else:
valve_pred = np.squeeze(self.valve[..., valve_idx])
valve_image = np.clip(np.dstack([0.75 * frame + valve_pred,
0.75 * frame,
0.75 * frame + valve_values]), a_min=0, a_max=1)
return valve_image
| 48.590308
| 125
| 0.582185
|
f240604d68729dbbbab7bf9c7338ae8223c95211
| 891
|
py
|
Python
|
challenges/print_level_order/test_print_level_order.py
|
seattlechem/data-structures-and-algorithms
|
376e465c0a5529ea7c5c4e972a9852b6340251ff
|
[
"MIT"
] | null | null | null |
challenges/print_level_order/test_print_level_order.py
|
seattlechem/data-structures-and-algorithms
|
376e465c0a5529ea7c5c4e972a9852b6340251ff
|
[
"MIT"
] | null | null | null |
challenges/print_level_order/test_print_level_order.py
|
seattlechem/data-structures-and-algorithms
|
376e465c0a5529ea7c5c4e972a9852b6340251ff
|
[
"MIT"
] | null | null | null |
"""Print level order test."""
from .print_level_order import print_level_order
import pytest
def test_print_level_order_true(small_tree):
"""Confirm if true for level order print."""
result = print_level_order(small_tree)
assert result == '5 \n 9 3 \n '
def test_level_print_large_tree(large_tree):
"""Confirm the result of level_order_print of large_tree."""
result = print_level_order(large_tree)
assert result == '12 \n 9 2 11 \n 1 3 99 \n 13 14 \n '
def test_type_error():
"""Confirm if error is thrown if input is not K-ary Tree."""
with pytest.raises(TypeError) as err:
print_level_order(34)
assert err is 'Input must be K-ary Tree.'
def test_value_error():
"""Confirm if error is thrown if input is missing."""
with pytest.raises(ValueError) as err:
print_level_order()
assert err is 'Input is missing.'
| 29.7
| 64
| 0.690236
|
ab323493cebe8b6bd7431270a57b1a15b093040f
| 11,369
|
py
|
Python
|
bitstream/decoder/cb_translate.py
|
mfkiwl/NEM-Relay-CGRA
|
1ad59b83b9a61ee56da43e5491a95d2f4e6c2ac4
|
[
"BSD-3-Clause"
] | 2
|
2021-03-28T08:03:01.000Z
|
2021-09-19T08:10:02.000Z
|
bitstream/decoder/cb_translate.py
|
mfkiwl/NEM-Relay-CGRA
|
1ad59b83b9a61ee56da43e5491a95d2f4e6c2ac4
|
[
"BSD-3-Clause"
] | null | null | null |
bitstream/decoder/cb_translate.py
|
mfkiwl/NEM-Relay-CGRA
|
1ad59b83b9a61ee56da43e5491a95d2f4e6c2ac4
|
[
"BSD-3-Clause"
] | 2
|
2021-03-28T08:02:57.000Z
|
2021-11-06T05:14:17.000Z
|
#!/usr/bin/python
import sys;
import re;
VERBOSE = False
TEST = False
# Test of pass by reference
# def modify(foo):
# foo["bar"] = 'baz'
#
# foo = {}
# modify(foo)
# print foo
# sys.exit(0)
def main():
if TEST:
VERBOSE = True
test_cbfix()
sys.exit(0)
# inputstream = sys.stdin; input_lines = inputstream; # for line in inputstream:
# input_lines = (); input_lines = sys.stdin;
input_lines = [] # for line in sys.stdin: input_lines.append(line)
for line in sys.stdin: input_lines.append(line)
print "################################################################"
print "# PRE-PASS to build cb, sb dictionaries"
print "################################################################"
# cb_config and cb_annote are dictionaries of strings
# containing cb config and annote strings e.g.
# cb_config["00020003"] = "01230456"
# cb_annote["00020003"] = "# connect a to foo"
# sb_config and sb_annote are dictionaries of lists of strings
# containing sb config and annote strings
# sb_config["00050007"] = ("02020000","00001100")
# sb_annote["00050007"] = ("# annote1...", "# annote2...")
cb_config = {}; cb_annote = {}
sb_config = {}; sb_annote = {}
for line in input_lines:
# print 'foo'+line
fix_cb(line,
cb_config, cb_annote,
sb_config, sb_annote,
DBG=1)
print "################################################################"
print "# Now use cb, sb dictionaries to translate v0 => v1"
print "################################################################"
for line in input_lines:
line = line.strip() # YES
# (reg,feature,tile,data) = parse_bs_line(line)
(addr, data) = parse_bs_line(line)
(reg,feature,tile) = parse_bs_addr(addr)
if addr in cb_config:
print "# %s" % line
print "# Found addr '%s' in CB %s" % (addr, cb_config)
print "%s %s" % (addr, cb_config[addr])
print "# "
del cb_config[addr]
elif addr in sb_config:
print "# %s" % line
# print "# Oh boy get to merge addr '%s' into SB %s" % (addr, sb_config)
print "# Oh boy found existing sb route(s) %s" % sb_config[addr]
newdata= merge_sbroutes( [data] + sb_config[addr])
print "%s %s" % (addr, newdata)
print "# "
del sb_config[addr]
else:
print line
print ""
print "# LEFTOVERS"
# print "CB: %s" % cb_config
print "# SB: %s" % sb_config
print "# "
for addr in sb_config:
newdata= merge_sbroutes(sb_config[addr])
print "%s %s" % (addr, newdata)
def merge_sbroutes(sb_list):
sb_merged = 0
for rte in sb_list:
sb_merged = sb_merged | int(rte,16)
sb_merged = "%08X" % sb_merged
print "# %s merged to %s" % (sb_list, sb_merged)
return sb_merged
def fix_cb(line,
cb_config, cb_annote,
sb_config, sb_annote,
DBG=0):
line = line.strip() # YES
# (reg,feature,tile,data) = parse_bs_line(line)
(addr, data) = parse_bs_line(line)
(reg,feature,tile) = parse_bs_addr(addr)
# Only interested in a, b connection boxes
# (Elsewhere, should have checked to make sure this is PE and not mem tile)
if feature != "02" and feature != "03": return False
# Translate!
# (cb_addr, cb_data, sbnew, sbstring) = fix_cba(line,DBG)
(cb_addr, cb_data, sb_addr, sb_data) = fix_cba(line,DBG)
cb_config[cb_addr] = cb_data
try:
sb_config[sb_addr].append(sb_data)
except:
sb_config[sb_addr] = []
sb_config[sb_addr].append(sb_data)
print "CB: %s" % cb_config
print "SB: %s" % sb_config
print ""
# This should be in decode library I guess
# and/or why not a single line
# (reg,feature,tile,data) = myparse(line, "(..)(..)(....) (........)", 4)
def parse_bs_line(line):
line = line.strip() # Why not
parse = re.search("(........) (........)", line)
if parse:
addr = parse.group(1)
data = parse.group(2)
return (addr,data)
else:
print "ERROR bad bitstream line:\n%s" % line
sys.stdout.flush(); traceback.print_stack(); sys.stderr.flush()
sys.exit(-1)
def parse_bs_addr(addr):
parse = re.search("(..)(..)(....)", addr)
if parse:
reg = parse.group(1)
feature = parse.group(2)
tile = parse.group(3)
return (reg,feature,tile)
else:
print "ERROR bad bitstream address:\n%s" % line
sys.stdout.flush(); traceback.print_stack(); sys.stderr.flush()
sys.exit(-1)
def fix_cba(line, DBG=0):
'''Connection box 'a' input'''
# (reg,feature,tile,data) = parse_bs_line(line)
(addr, data) = parse_bs_line(line)
(reg,feature,tile) = parse_bs_addr(addr)
################################################################
# Feature == "02" => op_a_in
# OLD (v0):
# xx02xxxx 0000 000[01234] # a <= in_s1t[01234]
# xx02xxxx 0000 000[56789] # a <= in_s3t[01234]
if feature == "02": (inside_lo,inside_hi) = (1,3)
# NEW (v1):
# xx02xxxx 0000 000[56789] # a <= out_s2t[01234]
if feature == "02": out_side = 2
# xx05xxxx 0010 0000 # out_s2t0 <= in_s1t0 [21:20]==1
# xx05xxxx 0040 0000 # out_s2t1 <= in_s1t1 [23:22]==1
# xx05xxxx 0100 0000 # out_s2t2 <= in_s1t2 [25:24]==1
# xx05xxxx 0400 0000 # out_s2t3 <= in_s1t3 [27:26]==1
# xx05xxxx 1000 0000 # out_s2t4 <= in_s1t4 [29:28]==1
# xx05xxxx 0020 0000 # out_s2t0 <= in_s3t0 [21:20]==2
# xx05xxxx 0080 0000 # out_s2t1 <= in_s3t1 [23:22]==2
# xx05xxxx 0200 0000 # out_s2t2 <= in_s3t2 [25:24]==2
# xx05xxxx 0800 0000 # out_s2t3 <= in_s3t3 [27:26]==2
# xx05xxxx 2000 0000 # out_s2t4 <= in_s3t4 [29:28]==2
################################################################
# Feature == "03" => op_b_in
# OLD:
# xx03xxxx 0000 000[01234] # b <= in_s0t[01234]
# xx03xxxx 0000 000[56789] # b <= in_s2t[01234]
if feature == "03": (inside_lo,inside_hi) = (0, 2)
# NEW:
# xx03xxxx 0000 000[56789] # b <= out_s1t[01234]
if feature == "03": out_side = 1
# xx05xxxx 0000 0000
#
# xx05xxxx 0000 0000 # out_s1t0 <= in_s0t0 [11:10]==0
# xx05xxxx 0000 0000 # out_s1t1 <= in_s0t1 [13:12]==0
# xx05xxxx 0000 0000 # out_s1t2 <= in_s0t2 [15:14]==0
# xx05xxxx 0000 0000 # out_s1t3 <= in_s0t3 [17:16]==0
# xx05xxxx 0000 0000 # out_s1t4 <= in_s0t4 [19:18]==0
# xx05xxxx 0000 0400 # out_s1t0 <= in_s2t0 [11:10]==1
# xx05xxxx 0000 1000 # out_s1t1 <= in_s2t1 [13:12]==1
# xx05xxxx 0000 4000 # out_s1t2 <= in_s2t2 [15:14]==1
# xx05xxxx 0001 0000 # out_s1t3 <= in_s2t3 [17:16]==1
# xx05xxxx 0004 0000 # out_s1t4 <= in_s2t4 [19:18]==1
sel = int(data, 16) # O.k. to use all 32 bits
track = sel%5
# v0: 'a' originally came from inwire on side 1 or 3 as indicated by sel bits
# v0: 'b' originally came from inwire on side 0 or 2 as indicated by sel bits
# if sel < 5: in_side = 1
# else: in_side = 3
if sel < 5: in_side = inside_lo
else: in_side = inside_hi
# v1 step 1: change cb so 'a' input comes from out_s2 (feature 02)
# and 'b' input comes from out_s1 (feature 03)
cb_addr = "%s%s%s" % (reg,feature,tile)
cb_data = "0000000%d" % (track+5)
# v1 step 2: tell sb to connect orig inwire to out_s2t<track> instead of 'a'
# or to out_s1t<track> instead of 'b'
(sel,shiftamt,sbstring) = sbconnect(in_side, out_side, track)
# sbnew = "%s%s%s %08x" % (reg,"05",tile, sel<<shiftamt)
sb_addr = "%s%s%s" % (reg,"05",tile)
sb_data = "%08x" % (sel<<shiftamt)
VERBOSE = True
if (VERBOSE):
# What we did. E.g.
#
# BEFORE: xx030007 00000008 # b <= in_s2t3
# AFTER: xx030007 00000008 # b <= out_s1t3
# xx050007 00010000 # out_s1t3 <= in_s2t3 ([17:16]=1)
if feature == '02': op='a'
else: op='b'
inwire = "%s <= in_s%dt%d" % (op, in_side, track)
outwire = "%s <= out_s%dt%d" % (op, out_side, track)
cbnew = "%s %s" % (cb_addr, cb_data)
sbnew = "%s %s" % (sb_addr, sb_data)
print "# BEFORE: %s # %s" % (line, inwire)
print "# AFTER: %s # %s" % (cbnew, outwire)
print "# %s # %s" % (sbnew, sbstring)
print ""
return(cb_addr, cb_data, sb_addr, sb_data)
def sbconnect(in_side, out_side, track):
'''
Build a switchbox that connects in_s<in_side>t<track> to out_s<out_side>t<track>
'''
# (input b)
# For now only needs to work for output sides 1 and 2
if out_side == 1:
shiftamt = 10 + 2*track # output to side 1 (out_s1)
# For now only needs to work for input sides 0 and 2
if in_side == 0: sel = 0
elif in_side == 2: sel = 1
else: print "ERROR in_side=%d when out_side=1" % in_side
# (input a)
# For now only needs to work for output sides 1 and 2
elif out_side == 2:
shiftamt = 20 + 2*track # output to side 2 (out_s2)
# For now only needs to work for input sides 1 and 3
if in_side == 1: sel = 1
elif in_side == 3: sel = 2
else: print "ERROR in_side=%d when out_side=2" % in_side
else: print "ERROR out_side=%d" % in_side
# For debug purposes, make a text string e.g.
# "out_s2t3 <= in_s2t3 ([17:16]=1)"
#
connection = "out_s%dt%d <= in_s%dt%d ([%d:%d]=%d)" \
% (out_side, track, in_side, track, shiftamt+1, shiftamt, sel)
return (sel, shiftamt, connection)
# NOTE MUST ADD 0005<TTTT> to a watchlist now!!!
# Keep a list of all switchboxes used;
# keep a list of all switchboxes modified;
# make sure the two lists have NO COMMON TILES.
def test_cbfix():
x = {} # dummy dict
fix_cb("xx020007 00000000", x, x, x, x, DBG=1)
fix_cb("xx020007 00000001", x, x, x, x, DBG=1)
fix_cb("xx020007 00000002", x, x, x, x, DBG=1)
fix_cb("xx020007 00000003", x, x, x, x, DBG=1)
fix_cb("xx020007 00000004", x, x, x, x, DBG=1)
fix_cb("xx020007 00000005", x, x, x, x, DBG=1)
fix_cb("xx020007 00000006", x, x, x, x, DBG=1)
fix_cb("xx020007 00000007", x, x, x, x, DBG=1)
fix_cb("xx020007 00000008", x, x, x, x, DBG=1)
fix_cb("xx020007 00000009", x, x, x, x, DBG=1)
print "##########################################################################"
fix_cb("xx030007 00000000", x, x, x, x, DBG=1)
fix_cb("xx030007 00000001", x, x, x, x, DBG=1)
fix_cb("xx030007 00000002", x, x, x, x, DBG=1)
fix_cb("xx030007 00000003", x, x, x, x, DBG=1)
fix_cb("xx030007 00000004", x, x, x, x, DBG=1)
fix_cb("xx030007 00000005", x, x, x, x, DBG=1)
fix_cb("xx030007 00000006", x, x, x, x, DBG=1)
fix_cb("xx030007 00000007", x, x, x, x, DBG=1)
fix_cb("xx030007 00000008", x, x, x, x, DBG=1)
fix_cb("xx030007 00000009", x, x, x, x, DBG=1)
main()
| 33.340176
| 86
| 0.53857
|
de579c3ca2ca0aa2feed1de6fa986e945e92028e
| 489
|
py
|
Python
|
easy/202.py
|
oneTaken/leetcode
|
f9357d839ac8fa6333b0d7eeb2028ba28a63764c
|
[
"Apache-2.0"
] | null | null | null |
easy/202.py
|
oneTaken/leetcode
|
f9357d839ac8fa6333b0d7eeb2028ba28a63764c
|
[
"Apache-2.0"
] | null | null | null |
easy/202.py
|
oneTaken/leetcode
|
f9357d839ac8fa6333b0d7eeb2028ba28a63764c
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
nums = dict()
square = lambda x: int(x) * int(x)
sum_str = lambda x: sum(list(map(square, x)))
flag = True
while n != 1:
print(n, nums.get(n))
if nums.get(n) is None:
nums[n] = 1
else:
flag = False
break
n = sum_str(str(n))
return flag
| 23.285714
| 53
| 0.400818
|
9886b6d2b7773026172cda2d30f08709afc7d734
| 191,401
|
py
|
Python
|
test/test_cuda.py
|
stungkit/pytorch
|
0f05e398705bf15406bce79f7ee57d3935ad2abd
|
[
"Intel"
] | null | null | null |
test/test_cuda.py
|
stungkit/pytorch
|
0f05e398705bf15406bce79f7ee57d3935ad2abd
|
[
"Intel"
] | 1
|
2022-01-10T18:39:28.000Z
|
2022-01-10T19:15:57.000Z
|
test/test_cuda.py
|
stungkit/pytorch
|
0f05e398705bf15406bce79f7ee57d3935ad2abd
|
[
"Intel"
] | 1
|
2022-03-26T14:42:50.000Z
|
2022-03-26T14:42:50.000Z
|
# Owner(s): ["module: cuda"]
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import contextlib
import ctypes
import gc
import io
import os
import pickle
import queue
import sys
import tempfile
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY, \
get_cycles_per_ms
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
TEST_BF16 = False
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
TEST_BF16 = torch.cuda.is_bf16_supported()
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes)).coalesce()
_cycles_per_ms = None
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
# Test the case where the pinned data_ptr is not equal to the storage data_ptr.
x_base = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
x = x_base[1:]
self.assertTrue(x.is_pinned())
self.assertTrue(x_base.is_pinned())
self.assertNotEqual(x_base.data_ptr(), x.data_ptr())
self.assertEqual(x_base.storage().data_ptr(), x.storage().data_ptr())
y = torch.ones(10000000 - 1, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.storage._TypedStorage))
self.assertTrue(isinstance(q_copy[3]._storage, torch._UntypedStorage))
q_copy[1].fill_(10)
self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
skip_tf32_cublas = 'TORCH_ALLOW_TF32_CUBLAS_OVERRIDE' in os.environ and\
int(os.environ['TORCH_ALLOW_TF32_CUBLAS_OVERRIDE'])
if skip_tf32_cublas:
self.assertTrue(torch.backends.cuda.matmul.allow_tf32)
return
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_float32_matmul_precision_get_set(self):
self.assertEqual(torch.get_float32_matmul_precision(), 'highest')
skip_tf32_cublas = 'TORCH_ALLOW_TF32_CUBLAS_OVERRIDE' in os.environ and\
int(os.environ['TORCH_ALLOW_TF32_CUBLAS_OVERRIDE'])
if not skip_tf32_cublas:
self.assertFalse(torch.backends.cuda.matmul.allow_tf32)
for p in ('medium', 'high'):
torch.set_float32_matmul_precision(p)
self.assertEqual(torch.get_float32_matmul_precision(), p)
if not skip_tf32_cublas:
self.assertTrue(torch.backends.cuda.matmul.allow_tf32)
torch.set_float32_matmul_precision('highest')
self.assertEqual(torch.get_float32_matmul_precision(), 'highest')
if not skip_tf32_cublas:
self.assertFalse(torch.backends.cuda.matmul.allow_tf32)
def test_cublas_allow_fp16_reduced_precision_reduction_get_set(self):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = not orig
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), not orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
# This test is flaky for ROCm, see issue #62602
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
@skipIfRocm
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.ExternalStream(
stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen([sys.executable, '-c', f"""\
import sys
import torch
from torch._six import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
'device-side assert triggered', # CUDA
'Assertion', # CUDA
'HSA_STATUS_ERROR_EXCEPTION', # ROCm
'Device-side assertion' # ROCm
]
self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))
@slowTest
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support device side asserts")
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
range_handle = torch.cuda.nvtx.range_start("range_start")
torch.cuda.nvtx.range_end(range_handle)
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Verifies that mem_get_info works, including when called for a different device
def test_mem_get_info(self):
def _test(idx):
before_free_bytes, before_available_bytes = torch.cuda.mem_get_info(idx)
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
t = torch.randn(1024 * 1024 * 8, device='cuda:' + str(idx))
after_free_bytes, after_available_bytes = torch.cuda.mem_get_info(idx)
self.assertTrue(after_free_bytes < before_free_bytes)
self.assertEqual(before_available_bytes, after_available_bytes)
_test(0)
if TEST_MULTIGPU:
_test(1)
# Test that wrap_with_cuda_memory_check successfully detects leak
# skip for ROCM. Look into #62533.
@skipIfRocm
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
l.append(torch.randn(1024 * 1024 * 8, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(RuntimeError, r"CUDA driver API confirmed .+ on device 0.+"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
# increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms
l.append(torch.randn(1024 * 1024 * 8, device=torch.device("cuda:1")))
with self.assertRaisesRegex(RuntimeError, r"CUDA driver API confirmed .+ on device 1.+"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1),
(False, 1),
(False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
model = StreamModel().cuda()
x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0))
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support device side asserts")
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s / 4).to_dense())
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s.half() / 4).to_dense())
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertEqual(c, s, atol=atol, rtol=1e-05)
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.autocast('cuda', enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16
self.assertFalse(torch.is_autocast_enabled())
with torch.autocast('cuda', dtype=fast_dtype):
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.autocast('cuda', enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
should_error_from_not_implemented = 'cudnn' in op or 'prelu' in op or 'thnn' in op \
or 'fused' in op or 'gru' in op or op == '_thnn_fused_lstm_cell' or op == 'lstm_cell'
if not skip_test:
if should_error_from_not_implemented:
with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.autocast('cuda'):
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.autocast('cuda'):
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.autocast('cuda', ):
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.autocast('cuda', enabled=True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.autocast('cuda', ):
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward0")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.autocast('cuda', ):
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.autocast('cuda', ):
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
def test_graph_is_current_stream_capturing(self):
self.assertFalse(torch.cuda.is_current_stream_capturing())
if (TEST_CUDA and (not TEST_WITH_ROCM) and int(torch.version.cuda.split(".")[0]) >= 11):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
g = torch.cuda.CUDAGraph()
self.assertFalse(torch.cuda.is_current_stream_capturing())
g.capture_begin()
self.assertTrue(torch.cuda.is_current_stream_capturing())
g.capture_end()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_oom(self):
with self.assertRaisesRegex(RuntimeError, "out of memory"):
with torch.cuda.graph(torch.cuda.CUDAGraph()):
torch.zeros(2 ** 40, device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
g2 = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
torch.cuda.empty_cache()
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
torch.cuda.current_stream().wait_stream(s)
opt.zero_grad(set_to_none=True)
# capture
with torch.cuda.graph(g):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_make_graphed_callables(self):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, D_out = 640, 4096, 2048, 1024
models = []
for _ in range(2):
model_section1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),
torch.nn.Dropout(p=0.1)).cuda()
model_section2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),
torch.nn.Dropout(p=0.2)).cuda()
models.append(torch.nn.Sequential(model_section1, model_section2))
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)
opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)
x = torch.randn(N, D_in, device='cuda')
h = torch.randn(N, H, device='cuda', requires_grad=True)
y_pred = torch.randn(N, D_out, device='cuda', requires_grad=True)
y = torch.randn(N, D_out, device='cuda')
loss_fn_control = torch.nn.functional.mse_loss
relu_control = torch.nn.functional.relu
# This is a good stress test. It graphs four callables: two Modules and two python functions.
model_graphed[0], model_graphed[1], relu_graphed, loss_fn_graphed = \
torch.cuda.make_graphed_callables((model_graphed[0], model_graphed[1], relu_control, loss_fn_control),
((x,), (h,), (y_pred,), (y_pred, y)))
real_inputs = [torch.rand_like(x) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m, opt, relu, loss_fn in zip((model_graphed, model_control),
(opt_graphed, opt_control),
(relu_graphed, relu_control),
(loss_fn_graphed, loss_fn_control)):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, target in zip(real_inputs, real_targets):
opt.zero_grad(set_to_none=True)
y_pred = m(data)
y_pred = relu(y_pred)
loss = loss_fn(y_pred, target)
loss.backward()
opt.step()
for p, pc in zip(model_graphed.parameters(), model_control.parameters()):
self.assertEqual(p, pc)
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(model_graphed(real_inputs[0]), model_control(real_inputs[0]))
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
@unittest.skipIf(not TEST_WITH_ROCM, "ROCm-only test")
def test_rocm_backward_pass_guard(self):
# The test exercises a ROCm-specific feature.
class MyFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, tensor, constant):
self.assertFalse(torch._C._rocm_is_backward_pass())
ctx.constant = constant
return tensor * constant
@staticmethod
def backward(ctx, grad_output):
self.assertTrue(torch._C._rocm_is_backward_pass())
return grad_output * ctx.constant, None
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.nn.Parameter(torch.randn(()))
def forward(self, x):
return MyFunction.apply(x, self.a)
model = MyModule()
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-6)
x = torch.randn(5, 5)
result = model(x)
loss = criterion(result, x)
optimizer.zero_grad()
loss.backward()
optimizer.step()
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r.coalesce() if r.is_sparse else r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
if __name__ == '__main__':
run_tests()
| 44.132119
| 126
| 0.594851
|
4132e126f06ee3e98fca3c32a13b83ac619d79c4
| 3,759
|
py
|
Python
|
rbf.py
|
antoinebrl/practice-ML
|
b2c87987871db39255f87732e4ec4dee7179c9b7
|
[
"MIT"
] | null | null | null |
rbf.py
|
antoinebrl/practice-ML
|
b2c87987871db39255f87732e4ec4dee7179c9b7
|
[
"MIT"
] | null | null | null |
rbf.py
|
antoinebrl/practice-ML
|
b2c87987871db39255f87732e4ec4dee7179c9b7
|
[
"MIT"
] | null | null | null |
# Author : Antoine Broyelle
# Licence : MIT
# inspired by : KTH - DD2432 : Artificial Neural Networks and Other Learning Systems
# https://www.kth.se/student/kurser/kurs/DD2432?l=en
import numpy as np
from kmeans import Kmeans
from pcn import PCN
from utils.distances import euclidianDist
class RBF:
'''Radial Basis Function Network. Can be used for classification or function approximation'''
def __init__(self, inputs, targets, n=1, sigma=0, distance=euclidianDist,
weights=None, usage='class', normalization=False):
'''
:param inputs: set of data points as row vectors
:param targets: set of targets as row vectors
:param n: (int) number of weights.
:param sigma: (float) spread of receptive fields
:param distance: (function) compute metric between points
:param weights: set of weights. If None, weights are generated with K-means algorithm.
Otherwise provided weights are used no matter the value of n.
:param usage: (string) Should be equal to 'class' for classification and 'fctapprox' for
function approximation. Otherwise raise an error.
:param normalization: (bool) If true, perform a normalization of the hidden layer.
'''
if not usage is 'class' and not usage is 'fctapprox':
raise Exception('[RBF][__init__] the usage is unrecognized. Should be equal to '
'"class" for classification and "fctapprox" for function approximation')
self.targets = targets
self.inputs = inputs
self.dist = distance
self.n = n
self.weights = weights
self.usage = usage
self.normalization = normalization
if sigma == 0:
self.sigma = (inputs.max(axis=0)-inputs.min(axis=0)).max() / np.sqrt(2*n)
else:
self.sigma = sigma
def fieldActivation(self, inputs, weights, sigma, dist):
hidden = dist(inputs, weights)
hidden = np.exp(- hidden / sigma)
return hidden
def train(self, nbIte=100):
if self.weights is None:
km = Kmeans(self.inputs, k=self.n, distance=self.dist)
km.train(nbIte=1000)
self.weights = km.centers
hidden = self.fieldActivation(self.inputs, self.weights, self.sigma, self.dist)
if self.normalization:
hidden = hidden / np.sum(hidden, axis=1)[:, np.newaxis]
if self.usage is 'class':
self.pcn = PCN(inputs=hidden, targets=self.targets, delta=True)
return self.pcn.train(nbIte=nbIte)
else : # linear regression
self.weights2 = np.linalg.inv(np.dot(hidden.T, hidden))
self.weights2 = np.dot(self.weights2, np.dot(hidden.T, self.targets))
return np.dot(hidden, self.weights2)
def predict(self, data):
h = self.fieldActivation(data, self.weights, self.sigma, self.dist)
if self.usage is 'class':
return self.pcn.predict(h)
else:
return np.dot(h, self.weights2)
if __name__ == "__main__":
# Classification
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
XORtargets = np.array([[0], [1], [1], [0]])
rbf = RBF(inputs=inputs, targets=XORtargets, n=4)
print rbf.train(nbIte=300)
# Function approximation
import matplotlib.pyplot as plt
x = np.linspace(start=0, stop=2*np.pi, num=63)
y = np.sin(x)
w = np.linspace(start=0, stop=2 * np.pi, num=8)
x = x[:, np.newaxis]
y = y[:, np.newaxis]
w = w[:, np.newaxis]
rbf = RBF(inputs=x, targets=y, usage='fctapprox', weights=w, normalization=True)
out = rbf.train()
plt.plot(x,y, 'r')
plt.plot(x,out, 'b')
plt.show()
| 37.217822
| 100
| 0.617717
|
1dcf333b31621bda63bfd85eff18831cf4f003de
| 12,471
|
py
|
Python
|
Assignment_01_Linear_Regression/linear_regression_pytorch.py
|
luwis93choi/ML2020_Class
|
eb6e0a5b69a68b5c45535cf9738ac697bd722557
|
[
"MIT"
] | null | null | null |
Assignment_01_Linear_Regression/linear_regression_pytorch.py
|
luwis93choi/ML2020_Class
|
eb6e0a5b69a68b5c45535cf9738ac697bd722557
|
[
"MIT"
] | null | null | null |
Assignment_01_Linear_Regression/linear_regression_pytorch.py
|
luwis93choi/ML2020_Class
|
eb6e0a5b69a68b5c45535cf9738ac697bd722557
|
[
"MIT"
] | null | null | null |
# Reference 01 (Anaconda Installation) : https://docs.anaconda.com/anaconda/install/linux/
# Reference 02 (Anaconda Package Control) : https://niceman.tistory.com/86
# Reference 03 (Anaconda Pytorch Setup) : https://pytorch.org/get-started/locally/
# Reference 04 (Pytorch Linear Regression) : https://greeksharifa.github.io/pytorch/2018/11/02/pytorch-usage-02-Linear-Regression-Model/
# Reference 05 (Google Machine Learning Crash Course - Linear Regression) : https://developers.google.com/machine-learning/crash-course/descending-into-ml/video-lecture
# Reference 06 : 인공지능을 위한 수학 (이시카와 아키히코) - 5장 선형회귀
# Reference 07 (Linear Regression From Scratch without any Library) : https://www.kaggle.com/siddhrath/linear-regression-from-scratch-without-any-library
import pandas as pd # pandas (Python Data Analysis Library) - 데이터셋을 읽고 분석하기 위해 사용 pandas 라이브러리 사용
import torch # torch (Pytorch) - Machine Learning 및 Neural Network 라이브러리 사용
from torch import nn # Pytorch의 Neural Network 라이브러리 사용
import matplotlib.pyplot as plt # matplotlib - 그래프 작성 및 출력을 위한 라이브러리 사용
print("--- ML2020 Assignment 01 : Linear Regression ---")
# pandas를 이용하여 csv 파일로 저장된 데이터셋 읽음
data = pd.read_csv('./train_data.csv')
# Hyper Parameters - Machine Learning의 Training 및 Inference 전반적으로 영향을 주는 Hyper Parameter 변수 선언
learning_rate = 0.01 # 학습 과정시 Loss Function 최소값를 도달하기 위해 매번마다 Weight와 Bias에 얼마나 크게 변화를 줄 지 결정 / Loss Function 최소값 탐색의 한 단계의 크기 결정
train_step = 3000 # 전체 학습의 횟수 결정
# PyTorch를 사용한 선형 회귀 (Linear Regression) 함수
def linear_regression_Pytorch(data, _lr, train_step):
x = torch.from_numpy(data['x'].values).unsqueeze(dim=1).float() # csv에서 읽은 x축 데이터를 numpy 배열에서 tensor 형태로 변환
y = torch.from_numpy(data['y'].values).unsqueeze(dim=1).float() # csv에서 읽은 y축 데이터를 numpy 배열에서 tensor 형태로 변환
model = nn.Linear(in_features=1, out_features=1, bias=True) # Linear Regression 모델 선언
# - in_features : Linear Regression 모델의 입력 데이터의 개수 (x 1개) (weight가 아닌 모델에 입력되는 데이터)
# - out_features : Linear Regression 모델의 출력 데이터의 개수 (y 1개) (weight가 아닌 모델이 출력하는 데이터)
# *** 학습의 결과가 ML/NN 모델이며, 모델은 입력 데이터를 받아 특정 데이터를 출력되게 만들어짐 ***
criterion = nn.MSELoss() # MSE (Mean Squared Error)를 Loss Function으로 사용
optimizer = torch.optim.Adam(params=model.parameters(), lr=_lr) # Loss Function 최소값 탐색을 위해 사용할 Optimizer로 Adam Optimizer 사용하며, 탐색의 한 단계의 크기 Learning Rate를 설정함
loss_list = [] # 추후 Loss Function 그래프를 그리기 위해 Loss 값을 저장할 리스트
fig, axes = plt.subplots(1, 2) # Linear Regression 결과를 1x2 그래프 2개로 그릴 예정
# 상기 정의한 train_step 값만큼 전체 데이터셋을 가지고 학습을 반복함
for step in range(train_step):
prediction = model(x) # 현재 모델 / 현재까지 학습된 모델을 가지고 입력 데이터 x에 대한 출력 데이터를 산출함
loss = criterion(input=prediction, target=y) # 현재 모델 / 현재까지 학습된 모델이 출력한 데이터가 목표하는 원래 y값에 비교해서 얼마나 Loss가 발생하는지 MSE Loss Function값으로 구함
loss_list.append(loss.data.item()) # 현재 모델의 Loss값을 저장함
optimizer.zero_grad() # Optimizer의 grad값을 0으로 설정함 / PyTorch는 Parameter들이 gradient를 계산해줄때 grad가 누적됨. 이에 따라 Gradient를 다시 계산할 때는 0으로 초기화해야함
loss.backward() # Gradient 계산에 따른 Backpropagation(역전파) 수행
optimizer.step() # 계산한 Gradient값을 기반으로 Parameter (Weight, Bias)를 Learning Rate에 비례해서 업데이트해줌
# 학습의 마지막 단계에 도달했을 시 학습 결과에 대한 그래프를 저장함
if step == train_step-1:
plt.suptitle('Linear Regression using PyTorch') # 메인 Title 작성
# 좌측 그래프는 현재 학습된 결과(Weight, Bias)에 기반한 Linear 그래프를 데이터셋 분포도 상에 그림
axes[0].set_title('loss={:.4}, w={:.4}, b={:.4}'.format(loss.data.item(), model.weight.item(), model.bias.item())) # 좌측 그래프 제목에 현재 학습 모델의 Loss, Weight, Bias를 보여줌
axes[0].set_xlim(0, 11) # 그래프상 x축의 범위 설정
axes[0].set_ylim(0, 8) # 그래프상 y축의 범위 설정
axes[0].scatter(x.data.numpy(), y.data.numpy()) # 데이터셋 분포도 그래프 그림
axes[0].plot(x.data.numpy(), prediction.data.numpy(), 'b--') # 데이터셋 분포도 상에 현재 학습된 결과(Weight, Bias)에 기반한 Linear 그래프를 그림
# 우측 그래프는 학습 과정에서 산출된 Loss의 기록을 그림
axes[1].set_title('MSE Loss Function') # 우측 그래프의 제목 설정
axes[1].plot(range(len(loss_list)), loss_list, 'b') # 우측 그래프에 현재까지의 Loss값 기록을 그래프로 그림
plt.savefig('./linear_regression_result_with_PyTorch.png') # 최종 결과 그래프 저장
plt.draw()
# 20번마다 Loss 값을 그래프에 기록하고, 학습결과 (Weight, Bias)에 따라 데이터셋 분포도에 Linear 그래프를 그림
elif step % 20 == 0:
print('MSE Loss : ' + str(loss.data.item())) # 현재 Loss값을 CLI (Command Line Interface) 콘솔창에 보여줌
plt.suptitle('Linear Regression using Pytorch')
# 좌측 그래프는 현재 학습된 결과에 기반한 Linear 그래프를 데이터셋 분포도상에 그림
axes[0].set_title('loss={:.4}, w={:.4}, b={:.4}'.format(loss.data.item(), model.weight.item(), model.bias.item())) # 우측 그래프 제목에 현재 학습 모델의 Loss, Weight, Bias를 보여줌
axes[0].set_xlim(0, 11) # 그래프상 x축의 범위 설정
axes[0].set_ylim(0, 8) # 그래프상 y축의 범위 설정
axes[0].scatter(x.data.numpy(), y.data.numpy()) # 데이터셋 분포도 그래프 그림
axes[0].plot(x.data.numpy(), prediction.data.numpy(), 'b--') # 데이터셋 분포도 상에 현재 학습된 결과(Weight, Bias)에 기반한 Linear 그래프를 그림
# 우측 그래프는 학습 과정에서 산출된 Loss의 기록을 그림
axes[1].set_title('MSE Loss Function') # 우측 그래프의 제목 설정
axes[1].plot(range(len(loss_list)), loss_list, 'b') # 우측 그래프에 현재까지의 Loss값 기록을 그래프로 그림
plt.draw() # 좌측/우측 그래프 렌더링 수행
plt.pause(0.01) # 그래프 작성 딜레이
axes[0].clear() # 좌측 그래프 업데이트를 위한 화면 Clear
# 주어진 Weight와 Bias를 기반으로 입력 데이터 x에 대한 결과값 출력함수
def predict(_x, _w, _b):
return _w * _x + _b # Linear 함수 : y = w * x + b
# 주어진 Weight와 Bias를 기반으로 입력 데이터 x가 출력한 값과 출력 데이터 y 간의 MSE를 계산하여 Loss를 산출하는 함수
def MSE_loss_function(_x, _y, _w, _b):
data_num = len(_x) # 전체 데이터 개수
total_error = 0.0 # Squared Error 누적값
# 전체 x와 y 데이터에 대해서 Squared Error 누적값을 구함
for i in range(data_num):
total_error += (_y[i] - (_w * _x[i] + _b))**2 # 모델이 현재까지 학습한 Weight와 Bias값을 기반으로
# i번째 x 데이터의 출력 결과(_w * _x[i] + _b)와 i번째 y 데이터(y[i]) 간의 Error값을 구하고 제곱함
# Error값을 누적하여 Squared Error 누적값을 산출함
return total_error / data_num # 누적된 Error값에 대해서 평균을 내어 MSE값을 출력값으로 제공함
# Gradient Descent를 이용하여 Loss 최소값에 도달하는 함수
def update_gradient_descent(_x, _y, _w, _b, learning_rate):
# MSE Loss Function은 학습의 대상인 Weight와 Bias에 대해서 2차함수임
# Loss가 최소값에 곳은 MSE Loss의 변화량/미분값이 0이 되는 순간임
# MSE Loss는 2개의 변수에 의해서 제어되기 때문에 미분을 할 경우 Weight와 Bias에 대해서 각각 편미분을 해줘야함
#
# Loss Gradient = (d Loss/d weight, d Loss/d bias)
weight_derivative = 0 # Weight값에 대한 편미분 결과값
bias_derivative = 0 # Bias값에 대한 편미분 결과값
data_num = len(_x) # 전체 데이터 개수
# Gradient Descent는 전체 학습 데이터에 대해서 Loss Gradient를 모두 합해서 최소값에 도달하는지 판단함
for i in range(data_num):
weight_derivative += -2 * _x[i] * (_y[i] - (_x[i] * _w + _b)) # MSE Loss 공식을 Weight에 대해서 편미분한 결과에 각각 현재 Weight, 현재 Bias, x, y값을 넣어서 누적함
bias_derivative += -2 * (_y[i] - (_x[i] * _w + _b)) # MSE Loss 공식을 Bias에 대해서 편미분한 결과에 각각 현재 Weight, 현재 Bias, x, y값을 넣어서 누적함
_w -= (weight_derivative / data_num) * learning_rate # 새롭게 업데이트 되는 Weight값은 누적된 편미분값(누적 변화량)을 평균내어 평균 변화량에 Learning Rate를 반영하여 차감함
# (최소값을 향해서 가는 Negative Gradient Descent)
# Learning Rate에 비례해서 Weight가 한 단계 넘어갈 수 있게함
_b -= (bias_derivative / data_num) * learning_rate # 새롭게 업데이트 되는 Bias값은 누적된 편미분값(누적 변화량)을 평균내어 평균 변화량에 Learning Rate를 반영하여 차감함
# (최소값을 향해서 가는 Negative Gradient Descent)
# Learning Rate에 비례해서 Bias가 한 단계 넘어갈 수 있게함
return _w, _b # 매 학습은 Gradient Descent를 통해 최소값을 한단계 씩 도달하는 과정이며, 이에 따라 업데이트된 Weight, Bias값을 출력해줌
# PyTorch를 사용하지 않고 MSE Loss Function 공식과 Gradient Descent의 편미분 공식을 기반으로한 선형 회귀 (Linear Regression) 함수
def linear_regression_NoPytorch(data, init_w, init_b, learning_rate, train_step):
x = data['x'].values # 'x' 라벨의 입력 데이터를 읽어옴
y = data['y'].values # 'y' 라벨의 출력 데이터를 읽어옴
loss_list = [] # 추후 Loss Function 그래프를 그리기 위해 Loss 값을 저장할 리스트
fig, axes = plt.subplots(1, 2) # Linear Regression 결과를 1x2 그래프 2개로 그릴 예정
# 상기 정의한 train_step 값만큼 전체 데이터셋을 가지고 학습을 반복함
for step in range(train_step):
# 매단계마다 현재 모델의 Weight, Bias를 기반으로 Loss를 계산하고,
# Negative Gradient Descent를 통해 Weight와 Bias값이 Gradient가 최소가 되는 방향으로 업데이트 되게 만듬
init_w, init_b = update_gradient_descent(x, y, init_w, init_b, learning_rate)
loss = MSE_loss_function(x, y, init_w, init_b) # 현재 모델 / 현재까지 학습된 모델이 출력한 데이터가 목표하는 원래 y값에 비교해서 얼마나 Loss가 발생하는지 MSE Loss Function값으로 구함
loss_list.append(loss) # 현재 모델의 Loss값을 저장함
# 학습의 마지막 단계에 도달했을 시 학습 결과에 대한 그래프를 저장함
if step == train_step-1:
plt.suptitle('Linear Regression without PyTorch') # 메인 Title 작성
# 좌측 그래프는 현재 학습된 결과(Weight, Bias)에 기반한 Linear 그래프를 데이터셋 분포도 상에 그림
axes[0].set_title('loss={:.4}, w={:.4}, b={:.4}'.format(loss, init_w, init_b)) # 좌측 그래프 제목에 현재 학습 모델의 Loss, Weight, Bias를 보여줌
axes[0].set_xlim(0, 11) # 그래프상 x축의 범위 설정
axes[0].set_ylim(0, 8) # 그래프상 y축의 범위 설정
axes[0].scatter(x, y) # 데이터셋 분포도 그래프 그림
axes[0].plot(x, predict(x, init_w, init_b), 'b--') # 데이터셋 분포도 상에 현재 학습된 결과(Weight, Bias)에 기반한 Linear 그래프를 그림
# 우측 그래프는 학습 과정에서 산출된 Loss의 기록을 그림
axes[1].set_title('MSE Loss Function') # 우측 그래프의 제목 설정
axes[1].plot(range(len(loss_list)), loss_list, 'b') # 우측 그래프에 현재까지의 Loss값 기록을 그래프로 그림
plt.savefig('./linear_regression_result_without_PyTorch.png') # 최종 결과 그래프 저장
plt.show()
# 20번마다 Loss 값을 그래프에 기록하고, 학습결과 (Weight, Bias)에 따라 데이터셋 분포도에 Linear 그래프를 그림
elif step % 20 == 0:
print('MSE Loss : ' + str(loss)) # 현재 Loss값을 CLI (Command Line Interface) 콘솔창에 보여줌
plt.suptitle('Linear Regression without Pytorch')
# 좌측 그래프는 현재 학습된 결과에 기반한 Linear 그래프를 데이터셋 분포도상에 그림
axes[0].set_title('loss={:.4}, w={:.4}, b={:.4}'.format(loss, init_w, init_b)) # 우측 그래프 제목에 현재 학습 모델의 Loss, Weight, Bias를 보여줌
axes[0].set_xlim(0, 11) # 그래프상 x축의 범위 설정
axes[0].set_ylim(0, 8) # 그래프상 y축의 범위 설정
axes[0].scatter(x, y) # 데이터셋 분포도 그래프 그림
axes[0].plot(x, predict(x, init_w, init_b), 'b--') # 데이터셋 분포도 상에 현재 학습된 결과(Weight, Bias)에 기반한 Linear 그래프를 그림
# 우측 그래프는 학습 과정에서 산출된 Loss의 기록을 그림
axes[1].set_title('MSE Loss Function') # 우측 그래프의 제목 설정
axes[1].plot(range(len(loss_list)), loss_list, 'b') # 우측 그래프에 현재까지의 Loss값 기록을 그래프로 그림
plt.draw() # 좌측/우측 그래프 렌더링 수행
plt.pause(0.01) # 그래프 작성 딜레이
axes[0].clear() # 좌측 그래프 업데이트를 위한 화면 Clear
return init_w, init_b # 최종 학습 모델의 Weight, Bias 출력
linear_regression_Pytorch(data, learning_rate, train_step) # PyTorch를 사용하여 Linear Regerssion 수행
linear_regression_NoPytorch(data, 0, 0, learning_rate, train_step) # PyTorch를 사용하지 않고 MSE Loss Function 공식과 Gradient Descent의 편미분 공식을 기반으로한 Linear Regerssion 수행
| 59.385714
| 174
| 0.571085
|
1e4dd9269240652d35e1804e0494bc2365b608ec
| 12,756
|
py
|
Python
|
tests/logictest/logictest.py
|
ZeaLoVe/databend
|
4ebc55c5ccff15ed510e93d8c63965f6aa1bd76c
|
[
"Apache-2.0"
] | null | null | null |
tests/logictest/logictest.py
|
ZeaLoVe/databend
|
4ebc55c5ccff15ed510e93d8c63965f6aa1bd76c
|
[
"Apache-2.0"
] | null | null | null |
tests/logictest/logictest.py
|
ZeaLoVe/databend
|
4ebc55c5ccff15ed510e93d8c63965f6aa1bd76c
|
[
"Apache-2.0"
] | null | null | null |
import abc
import collections
import glob
import os
import re
import six
from hamcrest import assert_that, is_, none, is_not
from log import log
# statement is a statement in sql logic test
state_regex = r"^\s*statement\s+(?P<statement>((?P<ok>OK)|((?P<error>)ERROR\s*(?P<expectError>.*))|(?P<query>QUERY\s*((" \
r"ERROR\s+(?P<queryError>.*))|(?P<queryOptions>.*)))))$"
result_regex = r"^----\s*(?P<label>.*)?$"
# return the statement type
# `None` represent that the current format is not a statement type
def get_statement_type(line):
return re.match(state_regex, line, re.MULTILINE | re.IGNORECASE)
def get_result_label(line):
return re.match(result_regex, line, re.MULTILINE | re.IGNORECASE)
# return false if the line is not empty
def is_empty_line(line):
if line.split():
return False
return True
# iterate lines over a file and return a iterator
def get_lines(suite_path):
with open(suite_path, encoding="UTF-8") as reader:
for line_idx, line in enumerate(reader.readlines()):
yield line_idx, line.rstrip('\n ') # keep tab /t
# return a single statement
def get_single_statement(lines):
statement_lines = []
for line_idx, line in lines:
if is_empty_line(line):
statement = "\n".join(statement_lines)
return statement
statement_lines.append(line)
return "\n".join(statement_lines)
def get_result(lines):
result_lines = []
result_label = None
val = 0
for line_idx, line in lines:
val = line_idx
if line.startswith('\t'): # tab as empty row in results
result_lines.append(line)
continue
if is_empty_line(line) and result_label is None:
continue
if is_empty_line(line) and result_label is not None:
return result_label, line_idx, "\n".join(result_lines)
if result_label is not None:
result_lines.append(line)
if result_label is None:
result_label = get_result_label(line)
if result_label is not None:
return result_label, val, "\n".join(result_lines)
def parse_token_args(tokens, arg):
i = 0
while i < len(tokens):
if tokens[i].startswith(
"{}(".format(arg)) and tokens[i].endswith(")") is False:
tokens[i] = tokens[i] + "," + tokens[i + 1]
del tokens[i + 1]
i -= 1
i += 1
class LogicError(Exception):
def __init__(self, message, expected):
self.message = message
self.expected = expected
def __str__(self):
return "Expected regex{}, Actual: {}".format(self.expected,
self.message)
class Statement:
def __init__(self, matched):
assert matched is not None
self.matched = matched
self.label = None
self.retry = False
self.query_type = None
self.expect_error = None
if matched.group("ok") is not None:
self.type = "ok"
elif matched.group("error") is not None:
self.type = "error"
self.expect_error = matched.group("expectError")
elif matched.group("query"):
self.type = "query"
if matched.group("queryError"):
self.query_error = matched.group("queryError")
else:
qo = matched.group("queryOptions")
s = qo.split(" ", 1)
if len(s) < 1:
raise Exception("Invalid query options: {}".format(qo))
if len(s) == 1:
if is_empty_line(s[0]):
raise Exception(
"Invalid query options, query type should not be empty: {}"
.format(qo))
self.query_type = s[0]
return
query_type, options = qo.split(" ", 1)
tokens = options.split(",")
tokens = [t.strip() for t in tokens]
parse_token_args(tokens, "label")
self.query_type = query_type
for token in tokens:
if token.startswith("label(") and token.endswith(")"):
trimed = token[len("label("):-1]
self.label = trimed.split(",")
if token == "retry":
self.retry = True
else:
raise Exception("Unknown statement type {}".format(matched.group()))
def __str__(self):
s = "Statement: {}, type: {}".format(self.type, self.query_type)
if self.type == "query":
if self.query_type is not None:
s += ", query_type: {}".format(self.query_type)
if self.label is not None:
s += ", label: {}".format(self.label)
s += ", retry: {}".format(self.retry)
return s
class ParsedStatement(
collections.namedtuple(
'ParsedStatement',
["at_line", "s_type", "suite_name", "text", "results"])):
def get_fields(self):
return self._fields
def __str__(self):
result = ["", "Parsed Statement"]
for field in self.get_fields():
value = str(getattr(self, field))
if field != 'text':
result.append(' ' * 4 + '%s: %s,' % (field, value))
else:
result.append(' ' * 4 + '%s:' % field)
result.extend([' ' * 8 + row for row in value.split('\n')])
return "\n".join(result)
# return all statements in a file
def get_statements(suite_path, suite_name):
lines = get_lines(suite_path)
for line_idx, line in lines:
if is_empty_line(line):
# empty line or junk lines
continue
statement_type = get_statement_type(line)
if statement_type is None:
continue
s = Statement(statement_type)
text = get_single_statement(lines)
results = []
if s.type == "query" and s.query_type is not None:
# TODO need a better way to get all results
if s.label is None:
results.append(get_result(lines))
else:
for i in s.label:
results.append(get_result(lines))
yield ParsedStatement(line_idx, s, suite_name, text, results)
def format_value(vals, val_num):
row = len(vals) // val_num
width = len(str(vals[0])) + 2
for i in range(len(vals)):
width = max(width, len(vals[i]) + 2)
table = ""
for i in range(row):
ans = []
for j in range(val_num):
ans.append('{: >{w}}'.format(str(vals[i * val_num + j]), w=width))
table += "".join(ans)
table += "\n"
return table
def safe_execute(method, *info):
try:
return method()
except Exception as e:
collected = "\n".join([str(e)] + [str(el) for el in info])
raise RuntimeError("Failed to execute. Collected info: %s" % collected)
# factory class to abstract runtime interface
@six.add_metaclass(abc.ABCMeta)
class SuiteRunner(object):
def __init__(self, kind, pattern):
self.label = None
self.retry_time = 3
self.driver = None
self.path = "./suites/"
self.statement_files = []
self.kind = kind
self.show_query_on_execution = True
self.on_error_return = False
self.pattern = pattern
# return all files under the path
# format: a list of file absolute path and name(relative path)
def fetch_files(self):
skip_files = os.getenv("SKIP_TEST_FILES")
skip_tests = skip_files.split(",") if skip_files is not None else []
log.debug("Skip test file list {}".format(skip_tests))
for filename in glob.iglob('{}/**'.format(self.path), recursive=True):
if os.path.isfile(filename):
if os.path.basename(filename) in skip_tests:
log.info("Skip test file {}".format(filename))
continue
if re.match(self.pattern, filename):
self.statement_files.append(
(filename, os.path.relpath(filename, self.path)))
self.statement_files.sort()
def execute(self):
# batch execute use single session
if callable(getattr(self, "batch_execute")):
# case batch
for (file_path, suite_name) in self.statement_files:
log.info("Batch execute, suite name:{} in file {}".format(suite_name, file_path))
statement_list = list()
for state in get_statements(file_path, suite_name):
statement_list.append(state)
self.batch_execute(statement_list)
else:
# case one by one
for (file_path, suite_name) in self.statement_files:
log.info("One by one execute, suite name:{} in file {}".format(suite_name, file_path))
for state in get_statements(file_path, suite_name):
self.execute_statement(state)
def execute_statement(self, statement):
if self.show_query_on_execution:
log.info("executing statement, type {}\n{}\n".format(
statement.s_type.type, statement.text))
if statement.s_type.type == "query":
self.assert_execute_query(statement)
elif statement.s_type.type == "error":
self.assert_execute_error(statement)
elif statement.s_type.type == "ok":
self.assert_execute_ok(statement)
else:
raise Exception("Unknown statement type")
# expect the query just return ok
def assert_execute_ok(self, statement):
actual = safe_execute(lambda: self.execute_ok(statement.text),
statement)
assert_that(
actual,
is_(none()),
str(statement),
)
def assert_query_equal(self, f, resultset, statement):
# use join after split instead of strip
compare_f = "".join(f.split())
compare_result = "".join(resultset[2].split())
assert compare_f == compare_result, "Expected:\n{}\n Actual:\n{}\n Statement:{}\n Start " \
"Line: {}, Result Label: {}".format(resultset[2].rstrip(),
f.rstrip(),
str(statement), resultset[1],
resultset[0].group("label"))
def assert_execute_query(self, statement):
actual = safe_execute(lambda: self.execute_query(statement), statement)
try:
f = format_value(actual, len(statement.s_type.query_type))
except Exception:
log.warning("{} statement type is query but return nothing".format(
statement))
raise
assert statement.results is not None and len(
statement.results) > 0, "No result found {}".format(statement)
hasResult = False
for resultset in statement.results:
if resultset[0].group("label") is not None and resultset[0].group(
"label") == self.kind:
self.assert_query_equal(f, resultset, statement)
hasResult = True
if not hasResult:
for resultset in statement.results:
if resultset[0].group("label") is None or len(
resultset[0].group("label")) == 0:
self.assert_query_equal(f, resultset, statement)
hasResult = True
assert hasResult, "No result found {}".format(statement)
# expect the query just return error
def assert_execute_error(self, statement):
actual = safe_execute(lambda: self.execute_error(statement.text),
statement)
if actual is None:
raise Exception("Expected error but got none")
match = re.search(statement.s_type.expect_error, actual.msg)
assert_that(
match, is_not(none()),
"statement {}, expect error regex {}, found {}".format(
str(statement), statement.s_type.expect_error, actual))
def run_sql_suite(self):
log.info("run_sql_suite for {} on base {}".format(
self.kind, os.path.abspath(self.path)))
self.fetch_files()
self.execute()
def set_label(self, label):
self.label = label
def set_driver(self, driver):
self.driver = driver
| 36.238636
| 122
| 0.556209
|
1a1498464540acb0973a2d201ac0501786cab89d
| 4,839
|
py
|
Python
|
cytoskeleton_analyser/fitting/__init__.py
|
vsukhor/cytoskeleton-analyser
|
681a1f6ba1381a5fb293f2310fce5e97d400cfcb
|
[
"BSD-3-Clause"
] | null | null | null |
cytoskeleton_analyser/fitting/__init__.py
|
vsukhor/cytoskeleton-analyser
|
681a1f6ba1381a5fb293f2310fce5e97d400cfcb
|
[
"BSD-3-Clause"
] | null | null | null |
cytoskeleton_analyser/fitting/__init__.py
|
vsukhor/cytoskeleton-analyser
|
681a1f6ba1381a5fb293f2310fce5e97d400cfcb
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2021 Valerii Sukhorukov. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
""" Model classes and auxiliary functions for data fitting.
"""
import sys
from typing import Callable, Optional, Sequence, Union
from types import ModuleType
import numpy as np
from .base import set_logger
from .rayleigh import Rayleigh
from .weibull import Weibull
from .exponential import Exponential
from .von_mises import VonMisesDouble
from .von_mises import VonMisesTriple
from .gamma import Gamma
from .normal import Normal
from .lognorm import Lognorm
from .const import MockConst
__all__ = [
'set_logger',
'Rayleigh',
'Weibull',
'Exponential',
'VonMisesDouble',
'VonMisesTriple',
'Gamma',
'Normal',
'Lognorm',
'MockConst',
]
def is_const(model) -> bool:
"""Check if ``model`` is a mock wrapping a constant.
:return: True if the ``model`` is a mock model representing a
constant number.
"""
return isinstance(model, MockConst)
def is_mock(model) -> bool:
"""Check if ``model`` is a constant number.
:return: True if the ``model`` is a mock model representing a
constant value.
"""
return \
isinstance(model, np.float64) or \
isinstance(model, np.float32)
def fit(
ft: Sequence,
x: np.ndarray,
y: np.ndarray,
) -> list:
"""Fit alternative models to the data.
For each element of alternative fitters ``ft`` create instance of
fitting the class and fit specific function to the data.
:param ft: Collection of alternative fitters to apply to the data.
:param x: Data x values.
:param y: Data y values.
:return: Parametrised models resulting from the fitting.
"""
res = []
for f in ft:
if hasattr(f[0], '__self__'):
res.append(f[0].__self__(x, f[1]).fit(f[0], y))
else:
res.append(f[0].outer(x, f[1], f[2]).fit(f[0], y))
return res
def restore(
f: Union[Callable, type],
p: np.array,
tu: str,
x: np.ndarray,
y: np.ndarray,
fun: Optional[Callable] = None,
):
"""Generic function for restoring fitted models.
"""
if hasattr(f, '__self__'):
return f.__self__(x, p).restore(f, p, x, y)
elif hasattr(f, 'outer'):
return f.outer(x, p, tu).restore(f, y)
else:
return f(x, p).restore(fun, y)
def class_from_classname(
module: ModuleType,
classname: str,
) -> type:
"""Class object by name and module.
"""
return getattr(sys.modules[module.__name__],
classname.split('.')[0])
def subtype_from_classname(
c: type,
classname: str,
) -> type:
"""Child class (if exists) od a class.
Uses compound name ``classname`` containng child class name to
return child class object of a given class.
"""
if hasattr(c, 'subtype'):
return c.subtype(classname.split('.')[1])
else:
return c
def method_from_classname(
c: type,
classname: str,
) -> Optional[Callable]:
"""Method od a class (if exists)..
Uses compound name ``classname`` containng method name to return
method of a given class.
"""
a = classname.split('.')[1]
return getattr(c, a) if hasattr(c, a) else None
| 29.150602
| 79
| 0.659434
|
3e76c3c1638989385ac47a0eafeb7ca3ac7d2038
| 165
|
py
|
Python
|
game/screen_upbar.py
|
SHI3DO/Andante
|
beb6fdf96ef86a10de9f802cef2d97dd81b3e688
|
[
"MIT"
] | 2
|
2021-12-26T06:26:06.000Z
|
2022-02-24T23:54:58.000Z
|
game/screen_upbar.py
|
SHI3DO/Andante
|
beb6fdf96ef86a10de9f802cef2d97dd81b3e688
|
[
"MIT"
] | null | null | null |
game/screen_upbar.py
|
SHI3DO/Andante
|
beb6fdf96ef86a10de9f802cef2d97dd81b3e688
|
[
"MIT"
] | null | null | null |
def draw(screen, pg, screenx, screeny):
upbar = pg.Surface((screenx, screeny/12))
upbar.set_alpha(100)
upbar.fill((0,0,0))
screen.blit(upbar, (0, 0))
| 33
| 45
| 0.636364
|
4fe894dcd36a55d56f87ab955dc416e20091575c
| 4,927
|
py
|
Python
|
tests/test_tile_stitcher.py
|
thiloSchlemmer/TileStitcher
|
c72d7944ebe55992a538cce22be8787a70d62530
|
[
"MIT"
] | 6
|
2018-06-14T13:15:19.000Z
|
2020-09-11T00:55:20.000Z
|
tests/test_tile_stitcher.py
|
thiloSchlemmer/TileStitcher
|
c72d7944ebe55992a538cce22be8787a70d62530
|
[
"MIT"
] | null | null | null |
tests/test_tile_stitcher.py
|
thiloSchlemmer/TileStitcher
|
c72d7944ebe55992a538cce22be8787a70d62530
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import platform
import unittest
from quantized_mesh_tile import TerrainTile
import tile_stitcher
def get_neighbours(z, x, y):
return {'west': (z, x - 1, y),
'north': (z, x, y + 1),
'south': (z, x, y - 1),
'east': (z, x + 1, y)}
def get_tmp_path():
current_system = platform.system()
if 'Windows' is current_system:
return 'c:/Temp/'
else:
return '/tmp/'
def get_tile(z, x, y):
terrain_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'data/%s_%s_%s.terrain' % (z, x, y))
return tile_stitcher.load_tile(terrain_path, x, y, z)
class TestTileStitcher(unittest.TestCase):
def test_constructor(self):
# arrange
center_x = 17388
center_y = 12517
center_z = 14
neighbour_x = 17388
neighbour_y = 12518
neighbour_z = 14
# act
center_tile = get_tile(center_z, center_x, center_y)
neighbour_tile = get_tile(neighbour_z, neighbour_x, neighbour_y)
tile_stitcher.TileStitcher(center_tile)
# assert
self.assertIsInstance(center_tile, TerrainTile)
self.assertIsInstance(neighbour_tile, TerrainTile)
def test_getEdgeConnection(self):
# arrange
center_x = 17388
center_y = 12517
center_z = 14
neighbour_x = 17388
neighbour_y = 12518
neighbour_z = 14
# act
center_tile = get_tile(center_z, center_x, center_y)
neighbour_tile = get_tile(neighbour_z, neighbour_x, neighbour_y)
stitcher = tile_stitcher.TileStitcher(center_tile)
edge_connection = stitcher._get_edge_connection(neighbour_tile)
# assert
self.assertIs(edge_connection, 'n')
self.assertIsNotNone(edge_connection)
def test_stitch_together_with_south(self):
# arrange
center_x = 4347
center_y = 3128
center_z = 12
neighbour_x = 4347
neighbour_y = 3127
neighbour_z = 12
center_tile = get_tile(center_z, center_x, center_y)
neighbour_tile = get_tile(neighbour_z, neighbour_x, neighbour_y)
# act
stitcher = tile_stitcher.TileStitcher(center_tile)
stitcher.add_neighbour(neighbour_tile)
stitcher.stitch_together()
stitcher.save_to(get_tmp_path())
# assert
center_tile = tile_stitcher.load_tile(
os.path.join(get_tmp_path(), '12_4347_3128.terrain'),
center_x,
center_y,
center_z)
neighbour_tile = tile_stitcher.load_tile(
os.path.join(get_tmp_path(), '12_4347_3127.terrain'),
neighbour_x,
neighbour_y,
neighbour_z)
center_vertices_count = len(center_tile.get_edge_vertices(edge='s'))
neighbour_vertices_count = len(neighbour_tile.get_edge_vertices(edge='n'))
self.assertTrue(center_vertices_count == neighbour_vertices_count)
def test_stitch_with_west_east(self):
# arrange
center_x = 4347
center_y = 3128
center_z = 12
neighbour_x = 4348
neighbour_y = 3128
neighbour_z = 12
center_tile = get_tile(center_z, center_x, center_y)
neighbour_tile = get_tile(neighbour_z, neighbour_x, neighbour_y)
# act
stitcher = tile_stitcher.TileStitcher(center_tile)
stitcher.add_neighbour(neighbour_tile)
stitcher.stitch_together()
# assert
center_vertices_count = len(center_tile.get_edge_vertices(edge='e'))
neighbour_vertices_count = len(neighbour_tile.get_edge_vertices(edge='w'))
self.assertTrue(center_vertices_count == neighbour_vertices_count)
def test_stitch_with_east_and_south(self):
# arrange
center_x = 4346
center_y = 3127
center_z = 12
east_x = 4347
east_y = 3127
east_z = 12
south_x = 4346
south_y = 3126
south_z = 12
center_tile = get_tile(center_z, center_x, center_y)
east_tile = get_tile(east_z, east_x, east_y)
south_tile = get_tile(south_z, south_x, south_y)
# act
stitcher = tile_stitcher.TileStitcher(center_tile)
stitcher.add_neighbour(east_tile)
stitcher.add_neighbour(south_tile)
stitcher.stitch_together()
# assert
center_to_east_vertices_count = len(center_tile.get_edge_vertices(edge='e'))
center_to_south_vertices_count = len(center_tile.get_edge_vertices(edge='s'))
east_vertices_count = len(east_tile.get_edge_vertices(edge='w'))
south_vertices_count = len(south_tile.get_edge_vertices(edge='n'))
self.assertTrue(center_to_east_vertices_count == east_vertices_count)
self.assertTrue(center_to_south_vertices_count == south_vertices_count)
| 30.226994
| 85
| 0.641567
|
d29a4ef48a1671181157b55fd83662e1f8c5616f
| 12,146
|
py
|
Python
|
config/settings/base.py
|
qingfulishang/models_test
|
b3c93bd644acbcbb1a0273c4a04907dc9430c941
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/base.py
|
qingfulishang/models_test
|
b3c93bd644acbcbb1a0273c4a04907dc9430c941
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/base.py
|
qingfulishang/models_test
|
b3c93bd644acbcbb1a0273c4a04907dc9430c941
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = (
environ.Path(__file__) - 3
) # (models_test/config/settings/base.py - 3 = models_test/)
APPS_DIR = ROOT_DIR.path("models_test")
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path(".env")))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "UTC"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [ROOT_DIR.path("locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
"rest_framework",
"django_celery_beat",
]
LOCAL_APPS = [
"models_test.users.apps.UsersConfig",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "models_test.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR("staticfiles"))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR.path("static"))]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR("media"))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR.path("templates"))],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
],
},
}
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR.path("fixtures")),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/2.2/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""ningzeyang@qq.com""", "ningzeyang@qq.com@example.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# Celery
# ------------------------------------------------------------------------------
if USE_TZ:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone
CELERY_TIMEZONE = TIME_ZONE
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
CELERY_BROKER_URL = env("CELERY_BROKER_URL")
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ["json"]
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_TIME_LIMIT = 5 * 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_SOFT_TIME_LIMIT = 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#beat-scheduler
CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "models_test.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "models_test.users.adapters.SocialAccountAdapter"
# Your stuff...
# ------------------------------------------------------------------------------
| 42.320557
| 100
| 0.645974
|
c0480fa061a8094583f5115dd0186bf42bafcdf6
| 3,911
|
py
|
Python
|
templates/stm32f030-cct6/osconfig.py
|
flyghost/OneOS-V2.1.0
|
6fedab0558c07fe679d63ba1eb8ee9992c044d86
|
[
"Apache-2.0"
] | null | null | null |
templates/stm32f030-cct6/osconfig.py
|
flyghost/OneOS-V2.1.0
|
6fedab0558c07fe679d63ba1eb8ee9992c044d86
|
[
"Apache-2.0"
] | null | null | null |
templates/stm32f030-cct6/osconfig.py
|
flyghost/OneOS-V2.1.0
|
6fedab0558c07fe679d63ba1eb8ee9992c044d86
|
[
"Apache-2.0"
] | null | null | null |
import os
# toolchains options
ARCH = 'arm'
CPU = 'cortex-m0'
CROSS_TOOL = 'gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('OS_CC'):
CROSS_TOOL = os.getenv('OS_CC')
if os.getenv('OS_ROOT'):
OS_ROOT = os.getenv('OS_ROOT')
# cross_tool provides the cross compiler
# COMPILER_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
COMPILER = 'gcc'
COMPILER_PATH = ''
elif CROSS_TOOL == 'keil':
COMPILER = 'armcc'
# Notice: The installation path of armcc cannot have Chinese
COMPILER_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
COMPILER = 'iar'
COMPILER_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
BUILD = 'debug'
if COMPILER == 'gcc':
# toolchains
if COMPILER_PATH == '':
COMPILER_PATH = os.getenv('OS_EXEC_PATH')
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
RESULT_SUFFIX = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m0 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=oneos.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -R .reserved_ram -O binary $TARGET oneos.bin\n' + SIZE + ' $TARGET \n'
elif COMPILER == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
RESULT_SUFFIX = 'axf'
DEVICE = ' --cpu Cortex-M0 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --split_sections --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list oneos.map --strict'
CFLAGS += ' -I "' + COMPILER_PATH + '/ARM/ARMCC/include"'
LFLAGS += ' --libpath="' + COMPILER_PATH + '/ARM/ARMCC/lib"'
#CFLAGS += ' -D__MICROLIB '
#AFLAGS += ' --pd "__MICROLIB SETA 1" '
#LFLAGS += ' --library_type=microlib '
COMPILER_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = 'fromelf --bin $TARGET --output oneos.bin \nfromelf -z $TARGET'
elif COMPILER == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
RESULT_SUFFIX = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv4_sp'
CFLAGS += ' --dlib_config "' + COMPILER_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu VFPv4_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
COMPILER_PATH = COMPILER_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET oneos.bin'
| 27.159722
| 148
| 0.573766
|
ce83660d3da39af07faad3db38b211ba181a5bf2
| 1,828
|
py
|
Python
|
justenough/transforms/dataloaders.py
|
DamLabResources/justenough
|
3ee5887430598a9e1c042d64d2b9827ee879961a
|
[
"Apache-2.0"
] | null | null | null |
justenough/transforms/dataloaders.py
|
DamLabResources/justenough
|
3ee5887430598a9e1c042d64d2b9827ee879961a
|
[
"Apache-2.0"
] | null | null | null |
justenough/transforms/dataloaders.py
|
DamLabResources/justenough
|
3ee5887430598a9e1c042d64d2b9827ee879961a
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/12_transforms.dataloaders.ipynb (unless otherwise specified).
__all__ = ['HFBertDataLoaders']
# Cell
from fastai.text.all import *
from .sequence import *
# Cell
# export
class HFBertDataLoaders(DataLoaders):
@staticmethod
def from_df(frame, tokenizer, model, sequence_col = 'sequence', label_col = None, vocab=None,
max_length = 128, device = 'cuda', bs = 32, precompute = True,
splitter = None, num_workers = 0):
if splitter is None:
splitter = RandomSplitter()
seq_tfms = [ColReader(sequence_col),
SpaceTransform(),
HFTokenizerWrapper(tokenizer,
max_length=max_length,
tokens_only=False,
device = device),
HFPoolingTransform(model, bs=bs)]
if label_col is None:
label_tfms = seq_tfms
else:
label_tfms = [ColReader(label_col), Categorize(vocab=vocab)]
if precompute:
seq_pipe = Pipeline(seq_tfms)
seq_tls = seq_pipe(frame)
if label_col is None:
label_tls = seq_tls
else:
label_tls = TfmdLists(frame, label_tfms)
tls = TfmdLists(zip(seq_tls, label_tls), [])
train, test = splitter(tls)
return DataLoaders.from_dsets(tls[train], tls[test], num_workers=0, bs=bs).to(device)
else:
train, test = splitter(frame)
feat_tls = Datasets(frame, [seq_tfms, label_tfms],
splits = (train, test))
dls = feat_tls.dataloaders(num_workers=0, bs=bs).to(device)
return dls
| 28.123077
| 115
| 0.54814
|
dfea265646db1d93e04912f398cf29d1c32f5913
| 41
|
py
|
Python
|
config.py
|
soybean217/opencv_python_cam_windows_demo
|
aab0a4cebc34760c437b32fb43c5e794fa181081
|
[
"MIT"
] | null | null | null |
config.py
|
soybean217/opencv_python_cam_windows_demo
|
aab0a4cebc34760c437b32fb43c5e794fa181081
|
[
"MIT"
] | null | null | null |
config.py
|
soybean217/opencv_python_cam_windows_demo
|
aab0a4cebc34760c437b32fb43c5e794fa181081
|
[
"MIT"
] | null | null | null |
GLOBAL_SETTINGS = {
"port": 11111,
}
| 10.25
| 19
| 0.585366
|
98b2b35d50f1390c40846d67d07850d93b383186
| 1,909
|
py
|
Python
|
eds/openmtc-gevent/server/openmtc-cse/src/openmtc_cse/methoddomain/filtercriteria/__init__.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
eds/openmtc-gevent/server/openmtc-cse/src/openmtc_cse/methoddomain/filtercriteria/__init__.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
eds/openmtc-gevent/server/openmtc-cse/src/openmtc_cse/methoddomain/filtercriteria/__init__.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
from futile.logging import get_logger
from openmtc.model import ModelTypeError
from openmtc_cse.methoddomain.filtercriteria import filters
from openmtc_onem2m.exc import CSEBadRequest
from openmtc_onem2m.model import FilterCriteria
_logger = get_logger(__name__)
def check_match(resource, filter_criteria):
_logger.debug("checking if filter criteria '%s' are matched by "
"resource '%s'", filter_criteria, resource)
for criteria, value in filter_criteria.get_values(True).iteritems():
if not value:
continue
_logger.debug("checking if resource matches: %s=%s", criteria, value)
try:
filter_function = getattr(filters, criteria)
except AttributeError:
_logger.error("'%s' is not a valid filter criterion", criteria)
return False
else:
if not filter_function(resource, value):
_logger.debug("resource '%s' does not match criterion '%s=%s'",
resource, criteria, value)
return False
_logger.debug("resource '%s' matches filter criteria '%s'",
resource, filter_criteria)
return True
def parse_filter_criteria(filter_criteria):
if filter_criteria is None:
filter_criteria = {}
_logger.debug("parsing '%s'", filter_criteria)
int_criteria = ('stateTagSmaller', 'stateTagBigger', 'resourceType',
'sizeAbove', 'sizeBelow', 'filterUsage', 'limit')
parsed_criteria = {}
for k, v in filter_criteria.iteritems():
if k in int_criteria:
if isinstance(v, list):
parsed_criteria[k] = map(int, v)
else:
parsed_criteria[k] = int(v)
else:
parsed_criteria[k] = v
try:
return FilterCriteria(**parsed_criteria)
except ModelTypeError:
raise CSEBadRequest()
| 36.018868
| 79
| 0.631221
|
9514da467b512ec91d9dfb34a8a63b900fd85bf6
| 471
|
py
|
Python
|
source/MatematicaFinanciera/valorPresente/valor_presente.py
|
AbelMurgas/Python-Aplicado
|
0f01f54d64aae8b7689921741241be76383f3662
|
[
"MIT"
] | null | null | null |
source/MatematicaFinanciera/valorPresente/valor_presente.py
|
AbelMurgas/Python-Aplicado
|
0f01f54d64aae8b7689921741241be76383f3662
|
[
"MIT"
] | null | null | null |
source/MatematicaFinanciera/valorPresente/valor_presente.py
|
AbelMurgas/Python-Aplicado
|
0f01f54d64aae8b7689921741241be76383f3662
|
[
"MIT"
] | null | null | null |
class Valor_Presente:
def obtener_valor_presente(tiempo_anual: float, periodo: int, interes_porcentual: float, valor_futuro: float):
i = (interes_porcentual/100)/periodo
n = tiempo_anual*periodo
resultado = valor_futuro/(1+i)**n
return round(resultado, 2)
def obtener_valor_presente_mediante_descuento(monto: float, descuento_simple: float) -> float:
respuesta = monto-descuento_simple
return round(respuesta, 2)
| 39.25
| 114
| 0.713376
|
cbcb6bc1f37ce680cb8c2ea040f5197c5a328dd0
| 7,008
|
py
|
Python
|
EndPoint/app/RDF/validator.py
|
osoc-es/data-quality-madrid
|
500f540b5f57d5292f53eba4bac6f67947710ba7
|
[
"Apache-2.0"
] | null | null | null |
EndPoint/app/RDF/validator.py
|
osoc-es/data-quality-madrid
|
500f540b5f57d5292f53eba4bac6f67947710ba7
|
[
"Apache-2.0"
] | 19
|
2021-07-05T09:31:17.000Z
|
2021-07-16T10:27:36.000Z
|
EndPoint/app/RDF/validator.py
|
osoc-es/data-quality-madrid
|
500f540b5f57d5292f53eba4bac6f67947710ba7
|
[
"Apache-2.0"
] | null | null | null |
import csv
from datetime import datetime
import re
import time
def initializeProcess(csvFile:str):
start = time.time()
# Initialize the process
f = open(csvFile, "r",encoding="ISO8859-1")
lines = f.readlines()
delimiter = findDelimiter(lines[0:10])
rowCount = 0
dictErrs = {
"columnas":
{
"repeticion" :[]
},
"CamposTexto":
{
"ceroizquierda": []
},
"CampoNumerico":
{
"Region" :[]
},
"Fechas":
{
"formatoFecha" :[]
},
"CampoTelefono":
{
"codigopais":[]
},
"CoordGeoGraficas":{
"noSeparadas":[]
},
"errorProcessing":""
}
f.close()
for line in lines:
row = line.split(delimiter)
if(rowCount == 0):
dictErrs["repetition"] = hasRepetitiveCols(row,rowCount)
rowCount += 1
continue
col = 0
for val in row:
# Pasar filtros
# ((rowCount,col), <Error/Valor>)
try:
dictErrs["CamposTexto"]["ceroizquierda"] += [((rowCount,col), val)] if checkNumber(val) else []
dictErrs["CampoNumerico"]["Region"] += [((rowCount,col), val)] if checkCorrectNumber(val) else []
dictErrs["Fechas"]["formatoFecha"] += [((rowCount,col), val)] if checkFechas(val) else []
dictErrs["CampoTelefono"]["codigopais"] += [((rowCount,col), val)] if checkTelephone(val) else []
dictErrs["CoordGeoGraficas"]["noSeparadas"] += [((rowCount,col), val)] if checkCoordinates(val) else []
except Exception as e:
print(e)
continue
col += 1
if(rowCount>50):
break
rowCount += 1
end = time.time()
print(f"initializeProcess >> TIME ELAPSE IN CSV : {end-start}")
return dictErrs
def hasRepetitiveCols(row,rowCount):
'''Check if the row has repetitive columns
Args:
row: the row to be checked
Returns:
True if the row has repetitive columns
'''
repetitives = []
cols = set()
colNum = 0
for col in row:
if col in cols:
repetitives += [((rowCount,colNum),col)]
cols.add(col)
colNum += 1
return repetitives
def findDelimiter(lines):
'''Find delimiter
Args:
csvFile: csv file path
Returns:
delimiter
'''
def most_frequent(List):
return max(set(List), key = List.count)
delimitadores = []
for i in lines:
dialect = csv.Sniffer().sniff(i)
delimitadores += [dialect.delimiter]
return most_frequent(delimitadores)
def checkNumber(value):
# Se puede convertir 009 --> 9
try:
float(value)
except Exception:
return False
# Se pueden pasar numeros, en tal caso esta formateado
if(type(value) == float or type(value) == int or value == "0"):
return False
# Es String y es numerico
return (not not (re.match(r"[0][0-9]*",value)) )
def checkFechas(value):
if not (type(value) == str):
return False
# Puede ser fecha al tener - y /
if possibleDateEncounter(value):
# ¿Puede se hora con :?
if ":" in value:
try:
datetime.strptime(value, "%Y-%m-%d:%H:%M:%S")
except ValueError:
return True
else:
try:
datetime.strptime(value, "%Y-%m-%d")
except ValueError:
return True
return False
def possibleDateEncounter(value):
if value.count('-') == 2 or value.count('/') == 2:
res = value.split("-") if "-" in value else value.split("/")
if ":" in res[2] :
res = [res[0],res[1]]+res[2].split(":")
for i in res:
try:
int(i)
if(len(i)>4):
return False
except Exception:
return False
return True
def checkTelephone(value):
if type(value) == int:
value = str(value)
if len(value) == 9:
return True
def checkCorrectNumber(value):
#Si tenemos un entero, hemos de revisar que en caso de ser negativo no vaya entre parentesis.
#El valor es negativo, luego revisamos que no haya instancias de "("
if(((value.count(")")) > 0) and ((value.count("(")) > 0)):
if(int((value.replace("(","")).replace(")","")) < 0):
return True
else:
#Revisamos que no tenga separador de millares.
if value.count(".") > 0:
res = value.split(".")
#Vemos que lo que queda a ambos lados del punto sean enteros.
for i in res:
try:
int(i)
except Exception:
pass
return True
#Revisamos si es un numero con separador regional (en ESP, comas).
if value.count(",") > 0:
if(float("123,123".replace(",","."))):
pass
else:
res = value.split(",")
for i in res:
try:
int(i)
except Exception:
pass
return True
def checkCoordinates(value):
'''
Given a value checks if it is a coordinate
'''
regex_string =[
r"(?:(-?[1-8]?d(?:.d{1,18})?|90(?:.0{1,18})?),(-?(?:1[0-7]|[1-9])?d(?:.d{1,18})?|180(?:.0{1,18})?))(?:|(?:(-?[1-8]?d(?:.d{1,18})?|90(?:.0{1,18})?),(-?(?:1[0-7]|[1-9])?d(?:.d{1,18})?|180(?:.0{1,18})?)))*$",
r"^[-+]?([1-8]?\d(\.\d+)?|90(\.0+)?),\s*[-+]?(180(\.0+)?|((1[0-7]\d)|([1-9]?\d))(\.\d+)?)$",
r"^([-+]?)([\d]{1,2})(((\.)(\d+)(,)))(\s*)(([-+]?)([\d]{1,3})((\.)(\d+))?)$",
r"^([-+]?)([\d]{1,3})(((\.)(\d+)(,)))(\s*)(([-+]?)([\d]{1,2})(((\.)(\d+)(,)))(\s*)([-+]?)([\d]{1,3})((\.)(\d+))?)$",
r"^(\()([-+]?)([\d]{1,2})(((\.)(\d+)(,)))(\s*)(([-+]?)([\d]{1,3})((\.)(\d+))?(\)))$",
r"(-?(90[ :°d]*00[ :\'\'m]*00(\.0+)?|[0-8][0-9][ :°d]*[0-5][0-9][ :\'\'m]*[0-5][0-9](\.\d+)?)[ :\?\"s]*(N|n|S|s)?)[ ,]*(-?(180[ :°d]*00[ :\'\'m]*00(\.0+)?|(1[0-7][0-9]|0[0-9][0-9])[ :°d]*[0-5][0-9][ :\'\'m]*[0-5][0-9](\.\d+)?)[ :\?\"s]*(E|e|W|w)?)"
r"""([0-8]?\d(°|\s)[0-5]?\d('|\s)[0-5]?\d(\.\d{1,6})?"?|90(°|\s)0?0('|\s)0?0"?)\s{0,}[NnSs]\s{1,}([0-1]?[0-7]?\d(°|\s)[0-5]?\d('|\s)[0-5]?\d(\.\d{1,6})?"?|180(°|\s)0?0('|\s)0?0"?)\s{0,}[EeOoWw]"""
r"^(?:(-?[1-8]?d(?:.d{1,18})?|90(?:.0{1,18})?),(-?(?:1[0-7]|[1-9])?d(?:.d{1,18})?|180(?:.0{1,18})?))(?:|(?:(-?[1-8]?d(?:.d{1,18})?|90(?:.0{1,18})?),(-?(?:1[0-7]|[1-9])?d(?:.d{1,18})?|180(?:.0{1,18})?)))*$"]
for i in regex_string:
if re.match(i, value):
return True
return False
| 33.056604
| 256
| 0.446062
|
fd70654cb9f8bcfaa789861fe7c0591c09e3395c
| 109,919
|
py
|
Python
|
BaseTools/Source/Python/Ecc/c.py
|
James992927108/uEFI_Edk2_Practice
|
2cac7618dfee10bfa5104a2e167c85425fde0100
|
[
"BSD-2-Clause"
] | 6
|
2020-01-10T05:16:15.000Z
|
2022-01-06T17:41:58.000Z
|
BaseTools/Source/Python/Ecc/c.py
|
James992927108/uEFI_Edk2_Practice
|
2cac7618dfee10bfa5104a2e167c85425fde0100
|
[
"BSD-2-Clause"
] | null | null | null |
BaseTools/Source/Python/Ecc/c.py
|
James992927108/uEFI_Edk2_Practice
|
2cac7618dfee10bfa5104a2e167c85425fde0100
|
[
"BSD-2-Clause"
] | 3
|
2018-04-21T07:59:33.000Z
|
2018-04-23T02:06:01.000Z
|
## @file
# This file is used to be the c coding style checking of ECC tool
#
# Copyright (c) 2009 - 2017, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import sys
import Common.LongFilePathOs as os
import re
import string
import CodeFragmentCollector
import FileProfile
from CommonDataClass import DataClass
import Database
from Common import EdkLogger
from EccToolError import *
import EccGlobalData
import MetaDataParser
IncludeFileListDict = {}
AllIncludeFileListDict = {}
IncludePathListDict = {}
ComplexTypeDict = {}
SUDict = {}
IgnoredKeywordList = ['EFI_ERROR']
def GetIgnoredDirListPattern():
skipList = list(EccGlobalData.gConfig.SkipDirList) + ['.svn']
DirString = string.join(skipList, '|')
p = re.compile(r'.*[\\/](?:%s)[\\/]?.*' % DirString)
return p
def GetFuncDeclPattern():
p = re.compile(r'(?:EFIAPI|EFI_BOOT_SERVICE|EFI_RUNTIME_SERVICE)?\s*[_\w]+\s*\(.*\)$', re.DOTALL)
return p
def GetArrayPattern():
p = re.compile(r'[_\w]*\s*[\[.*\]]+')
return p
def GetTypedefFuncPointerPattern():
p = re.compile('[_\w\s]*\([\w\s]*\*+\s*[_\w]+\s*\)\s*\(.*\)', re.DOTALL)
return p
def GetDB():
return EccGlobalData.gDb
def GetConfig():
return EccGlobalData.gConfig
def PrintErrorMsg(ErrorType, Msg, TableName, ItemId):
Msg = Msg.replace('\n', '').replace('\r', '')
MsgPartList = Msg.split()
Msg = ''
for Part in MsgPartList:
Msg += Part
Msg += ' '
GetDB().TblReport.Insert(ErrorType, OtherMsg=Msg, BelongsToTable=TableName, BelongsToItem=ItemId)
def GetIdType(Str):
Type = DataClass.MODEL_UNKNOWN
Str = Str.replace('#', '# ')
List = Str.split()
if List[1] == 'include':
Type = DataClass.MODEL_IDENTIFIER_INCLUDE
elif List[1] == 'define':
Type = DataClass.MODEL_IDENTIFIER_MACRO_DEFINE
elif List[1] == 'ifdef':
Type = DataClass.MODEL_IDENTIFIER_MACRO_IFDEF
elif List[1] == 'ifndef':
Type = DataClass.MODEL_IDENTIFIER_MACRO_IFNDEF
elif List[1] == 'endif':
Type = DataClass.MODEL_IDENTIFIER_MACRO_ENDIF
elif List[1] == 'pragma':
Type = DataClass.MODEL_IDENTIFIER_MACRO_PROGMA
else:
Type = DataClass.MODEL_UNKNOWN
return Type
def SuOccurInTypedef (Su, TdList):
for Td in TdList:
if Su.StartPos[0] == Td.StartPos[0] and Su.EndPos[0] == Td.EndPos[0]:
return True
return False
def GetIdentifierList():
IdList = []
for comment in FileProfile.CommentList:
IdComment = DataClass.IdentifierClass(-1, '', '', '', comment.Content, DataClass.MODEL_IDENTIFIER_COMMENT, -1, -1, comment.StartPos[0], comment.StartPos[1], comment.EndPos[0], comment.EndPos[1])
IdList.append(IdComment)
for pp in FileProfile.PPDirectiveList:
Type = GetIdType(pp.Content)
IdPP = DataClass.IdentifierClass(-1, '', '', '', pp.Content, Type, -1, -1, pp.StartPos[0], pp.StartPos[1], pp.EndPos[0], pp.EndPos[1])
IdList.append(IdPP)
for pe in FileProfile.PredicateExpressionList:
IdPE = DataClass.IdentifierClass(-1, '', '', '', pe.Content, DataClass.MODEL_IDENTIFIER_PREDICATE_EXPRESSION, -1, -1, pe.StartPos[0], pe.StartPos[1], pe.EndPos[0], pe.EndPos[1])
IdList.append(IdPE)
FuncDeclPattern = GetFuncDeclPattern()
ArrayPattern = GetArrayPattern()
for var in FileProfile.VariableDeclarationList:
DeclText = var.Declarator.lstrip()
FuncPointerPattern = GetTypedefFuncPointerPattern()
if FuncPointerPattern.match(DeclText):
continue
VarNameStartLine = var.NameStartPos[0]
VarNameStartColumn = var.NameStartPos[1]
FirstChar = DeclText[0]
while not FirstChar.isalpha() and FirstChar != '_':
if FirstChar == '*':
var.Modifier += '*'
VarNameStartColumn += 1
DeclText = DeclText.lstrip('*')
elif FirstChar == '\r':
DeclText = DeclText.lstrip('\r\n').lstrip('\r')
VarNameStartLine += 1
VarNameStartColumn = 0
elif FirstChar == '\n':
DeclText = DeclText.lstrip('\n')
VarNameStartLine += 1
VarNameStartColumn = 0
elif FirstChar == ' ':
DeclText = DeclText.lstrip(' ')
VarNameStartColumn += 1
elif FirstChar == '\t':
DeclText = DeclText.lstrip('\t')
VarNameStartColumn += 8
else:
DeclText = DeclText[1:]
VarNameStartColumn += 1
FirstChar = DeclText[0]
var.Declarator = DeclText
if FuncDeclPattern.match(var.Declarator):
DeclSplitList = var.Declarator.split('(')
FuncName = DeclSplitList[0].strip()
FuncNamePartList = FuncName.split()
if len(FuncNamePartList) > 1:
FuncName = FuncNamePartList[-1].strip()
NameStart = DeclSplitList[0].rfind(FuncName)
var.Declarator = var.Declarator[NameStart:]
if NameStart > 0:
var.Modifier += ' ' + DeclSplitList[0][0:NameStart]
Index = 0
PreChar = ''
while Index < NameStart:
FirstChar = DeclSplitList[0][Index]
if DeclSplitList[0][Index:].startswith('EFIAPI'):
Index += 6
VarNameStartColumn += 6
PreChar = ''
continue
elif FirstChar == '\r':
Index += 1
VarNameStartLine += 1
VarNameStartColumn = 0
elif FirstChar == '\n':
Index += 1
if PreChar != '\r':
VarNameStartLine += 1
VarNameStartColumn = 0
elif FirstChar == ' ':
Index += 1
VarNameStartColumn += 1
elif FirstChar == '\t':
Index += 1
VarNameStartColumn += 8
else:
Index += 1
VarNameStartColumn += 1
PreChar = FirstChar
IdVar = DataClass.IdentifierClass(-1, var.Modifier, '', var.Declarator, FuncName, DataClass.MODEL_IDENTIFIER_FUNCTION_DECLARATION, -1, -1, var.StartPos[0], var.StartPos[1], VarNameStartLine, VarNameStartColumn)
IdList.append(IdVar)
continue
if var.Declarator.find('{') == -1:
for decl in var.Declarator.split(','):
DeclList = decl.split('=')
Name = DeclList[0].strip()
if ArrayPattern.match(Name):
LSBPos = var.Declarator.find('[')
var.Modifier += ' ' + Name[LSBPos:]
Name = Name[0:LSBPos]
IdVar = DataClass.IdentifierClass(-1, var.Modifier, '', Name, (len(DeclList) > 1 and [DeclList[1]]or [''])[0], DataClass.MODEL_IDENTIFIER_VARIABLE, -1, -1, var.StartPos[0], var.StartPos[1], VarNameStartLine, VarNameStartColumn)
IdList.append(IdVar)
else:
DeclList = var.Declarator.split('=')
Name = DeclList[0].strip()
if ArrayPattern.match(Name):
LSBPos = var.Declarator.find('[')
var.Modifier += ' ' + Name[LSBPos:]
Name = Name[0:LSBPos]
IdVar = DataClass.IdentifierClass(-1, var.Modifier, '', Name, (len(DeclList) > 1 and [DeclList[1]]or [''])[0], DataClass.MODEL_IDENTIFIER_VARIABLE, -1, -1, var.StartPos[0], var.StartPos[1], VarNameStartLine, VarNameStartColumn)
IdList.append(IdVar)
for enum in FileProfile.EnumerationDefinitionList:
LBPos = enum.Content.find('{')
RBPos = enum.Content.find('}')
Name = enum.Content[4:LBPos].strip()
Value = enum.Content[LBPos + 1:RBPos]
IdEnum = DataClass.IdentifierClass(-1, '', '', Name, Value, DataClass.MODEL_IDENTIFIER_ENUMERATE, -1, -1, enum.StartPos[0], enum.StartPos[1], enum.EndPos[0], enum.EndPos[1])
IdList.append(IdEnum)
for su in FileProfile.StructUnionDefinitionList:
if SuOccurInTypedef(su, FileProfile.TypedefDefinitionList):
continue
Type = DataClass.MODEL_IDENTIFIER_STRUCTURE
SkipLen = 6
if su.Content.startswith('union'):
Type = DataClass.MODEL_IDENTIFIER_UNION
SkipLen = 5
LBPos = su.Content.find('{')
RBPos = su.Content.find('}')
if LBPos == -1 or RBPos == -1:
Name = su.Content[SkipLen:].strip()
Value = ''
else:
Name = su.Content[SkipLen:LBPos].strip()
Value = su.Content[LBPos:RBPos + 1]
IdPE = DataClass.IdentifierClass(-1, '', '', Name, Value, Type, -1, -1, su.StartPos[0], su.StartPos[1], su.EndPos[0], su.EndPos[1])
IdList.append(IdPE)
TdFuncPointerPattern = GetTypedefFuncPointerPattern()
for td in FileProfile.TypedefDefinitionList:
Modifier = ''
Name = td.ToType
Value = td.FromType
if TdFuncPointerPattern.match(td.ToType):
Modifier = td.FromType
LBPos = td.ToType.find('(')
TmpStr = td.ToType[LBPos + 1:].strip()
StarPos = TmpStr.find('*')
if StarPos != -1:
Modifier += ' ' + TmpStr[0:StarPos]
while TmpStr[StarPos] == '*':
# Modifier += ' ' + '*'
StarPos += 1
TmpStr = TmpStr[StarPos:].strip()
RBPos = TmpStr.find(')')
Name = TmpStr[0:RBPos]
Value = 'FP' + TmpStr[RBPos + 1:]
else:
while Name.startswith('*'):
Value += ' ' + '*'
Name = Name.lstrip('*').strip()
if Name.find('[') != -1:
LBPos = Name.find('[')
RBPos = Name.rfind(']')
Value += Name[LBPos : RBPos + 1]
Name = Name[0 : LBPos]
IdTd = DataClass.IdentifierClass(-1, Modifier, '', Name, Value, DataClass.MODEL_IDENTIFIER_TYPEDEF, -1, -1, td.StartPos[0], td.StartPos[1], td.EndPos[0], td.EndPos[1])
IdList.append(IdTd)
for funcCall in FileProfile.FunctionCallingList:
IdFC = DataClass.IdentifierClass(-1, '', '', funcCall.FuncName, funcCall.ParamList, DataClass.MODEL_IDENTIFIER_FUNCTION_CALLING, -1, -1, funcCall.StartPos[0], funcCall.StartPos[1], funcCall.EndPos[0], funcCall.EndPos[1])
IdList.append(IdFC)
return IdList
def StripNonAlnumChars(Str):
StrippedStr = ''
for Char in Str:
if Char.isalnum() or Char == '_':
StrippedStr += Char
return StrippedStr
def GetParamList(FuncDeclarator, FuncNameLine=0, FuncNameOffset=0):
FuncDeclarator = StripComments(FuncDeclarator)
ParamIdList = []
#DeclSplitList = FuncDeclarator.split('(')
LBPos = FuncDeclarator.find('(')
#if len(DeclSplitList) < 2:
if LBPos == -1:
return ParamIdList
#FuncName = DeclSplitList[0]
FuncName = FuncDeclarator[0:LBPos]
#ParamStr = DeclSplitList[1].rstrip(')')
ParamStr = FuncDeclarator[LBPos + 1:].rstrip(')')
LineSkipped = 0
OffsetSkipped = 0
TailChar = FuncName[-1]
while not TailChar.isalpha() and TailChar != '_':
if TailChar == '\n':
FuncName = FuncName.rstrip('\r\n').rstrip('\n')
LineSkipped += 1
OffsetSkipped = 0
elif TailChar == '\r':
FuncName = FuncName.rstrip('\r')
LineSkipped += 1
OffsetSkipped = 0
elif TailChar == ' ':
FuncName = FuncName.rstrip(' ')
OffsetSkipped += 1
elif TailChar == '\t':
FuncName = FuncName.rstrip('\t')
OffsetSkipped += 8
else:
FuncName = FuncName[:-1]
TailChar = FuncName[-1]
OffsetSkipped += 1 #skip '('
for p in ParamStr.split(','):
ListP = p.split()
if len(ListP) == 0:
continue
ParamName = ListP[-1]
DeclText = ParamName.strip()
RightSpacePos = p.rfind(ParamName)
ParamModifier = p[0:RightSpacePos]
if ParamName == 'OPTIONAL':
if ParamModifier == '':
ParamModifier += ' ' + 'OPTIONAL'
DeclText = ''
else:
ParamName = ListP[-2]
DeclText = ParamName.strip()
RightSpacePos = p.rfind(ParamName)
ParamModifier = p[0:RightSpacePos]
ParamModifier += 'OPTIONAL'
while DeclText.startswith('*'):
ParamModifier += ' ' + '*'
DeclText = DeclText.lstrip('*').strip()
ParamName = DeclText
# ignore array length if exists.
LBIndex = ParamName.find('[')
if LBIndex != -1:
ParamName = ParamName[0:LBIndex]
Start = RightSpacePos
Index = 0
PreChar = ''
while Index < Start:
FirstChar = p[Index]
if FirstChar == '\r':
Index += 1
LineSkipped += 1
OffsetSkipped = 0
elif FirstChar == '\n':
Index += 1
if PreChar != '\r':
LineSkipped += 1
OffsetSkipped = 0
elif FirstChar == ' ':
Index += 1
OffsetSkipped += 1
elif FirstChar == '\t':
Index += 1
OffsetSkipped += 8
else:
Index += 1
OffsetSkipped += 1
PreChar = FirstChar
ParamBeginLine = FuncNameLine + LineSkipped
ParamBeginOffset = FuncNameOffset + OffsetSkipped
Index = Start + len(ParamName)
PreChar = ''
while Index < len(p):
FirstChar = p[Index]
if FirstChar == '\r':
Index += 1
LineSkipped += 1
OffsetSkipped = 0
elif FirstChar == '\n':
Index += 1
if PreChar != '\r':
LineSkipped += 1
OffsetSkipped = 0
elif FirstChar == ' ':
Index += 1
OffsetSkipped += 1
elif FirstChar == '\t':
Index += 1
OffsetSkipped += 8
else:
Index += 1
OffsetSkipped += 1
PreChar = FirstChar
ParamEndLine = FuncNameLine + LineSkipped
ParamEndOffset = FuncNameOffset + OffsetSkipped
if ParamName != '...':
ParamName = StripNonAlnumChars(ParamName)
IdParam = DataClass.IdentifierClass(-1, ParamModifier, '', ParamName, '', DataClass.MODEL_IDENTIFIER_PARAMETER, -1, -1, ParamBeginLine, ParamBeginOffset, ParamEndLine, ParamEndOffset)
ParamIdList.append(IdParam)
OffsetSkipped += 1 #skip ','
return ParamIdList
def GetFunctionList():
FuncObjList = []
for FuncDef in FileProfile.FunctionDefinitionList:
ParamIdList = []
DeclText = FuncDef.Declarator.lstrip()
FuncNameStartLine = FuncDef.NamePos[0]
FuncNameStartColumn = FuncDef.NamePos[1]
FirstChar = DeclText[0]
while not FirstChar.isalpha() and FirstChar != '_':
if FirstChar == '*':
FuncDef.Modifier += '*'
FuncNameStartColumn += 1
DeclText = DeclText.lstrip('*')
elif FirstChar == '\r':
DeclText = DeclText.lstrip('\r\n').lstrip('\r')
FuncNameStartLine += 1
FuncNameStartColumn = 0
elif FirstChar == '\n':
DeclText = DeclText.lstrip('\n')
FuncNameStartLine += 1
FuncNameStartColumn = 0
elif FirstChar == ' ':
DeclText = DeclText.lstrip(' ')
FuncNameStartColumn += 1
elif FirstChar == '\t':
DeclText = DeclText.lstrip('\t')
FuncNameStartColumn += 8
else:
DeclText = DeclText[1:]
FuncNameStartColumn += 1
FirstChar = DeclText[0]
FuncDef.Declarator = DeclText
DeclSplitList = FuncDef.Declarator.split('(')
if len(DeclSplitList) < 2:
continue
FuncName = DeclSplitList[0]
FuncNamePartList = FuncName.split()
if len(FuncNamePartList) > 1:
FuncName = FuncNamePartList[-1]
NameStart = DeclSplitList[0].rfind(FuncName)
if NameStart > 0:
FuncDef.Modifier += ' ' + DeclSplitList[0][0:NameStart]
Index = 0
PreChar = ''
while Index < NameStart:
FirstChar = DeclSplitList[0][Index]
if DeclSplitList[0][Index:].startswith('EFIAPI'):
Index += 6
FuncNameStartColumn += 6
PreChar = ''
continue
elif FirstChar == '\r':
Index += 1
FuncNameStartLine += 1
FuncNameStartColumn = 0
elif FirstChar == '\n':
Index += 1
if PreChar != '\r':
FuncNameStartLine += 1
FuncNameStartColumn = 0
elif FirstChar == ' ':
Index += 1
FuncNameStartColumn += 1
elif FirstChar == '\t':
Index += 1
FuncNameStartColumn += 8
else:
Index += 1
FuncNameStartColumn += 1
PreChar = FirstChar
FuncObj = DataClass.FunctionClass(-1, FuncDef.Declarator, FuncDef.Modifier, FuncName.strip(), '', FuncDef.StartPos[0], FuncDef.StartPos[1], FuncDef.EndPos[0], FuncDef.EndPos[1], FuncDef.LeftBracePos[0], FuncDef.LeftBracePos[1], -1, ParamIdList, [], FuncNameStartLine, FuncNameStartColumn)
FuncObjList.append(FuncObj)
return FuncObjList
def GetFileModificationTimeFromDB(FullFileName):
TimeValue = 0.0
Db = GetDB()
SqlStatement = """ select TimeStamp
from File
where FullPath = \'%s\'
""" % (FullFileName)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
TimeValue = Result[0]
return TimeValue
def CollectSourceCodeDataIntoDB(RootDir):
FileObjList = []
tuple = os.walk(RootDir)
IgnoredPattern = GetIgnoredDirListPattern()
ParseErrorFileList = []
for dirpath, dirnames, filenames in tuple:
if IgnoredPattern.match(dirpath.upper()):
continue
for Dir in dirnames:
Dirname = os.path.join(dirpath, Dir)
if os.path.islink(Dirname):
Dirname = os.path.realpath(Dirname)
if os.path.isdir(Dirname):
# symlinks to directories are treated as directories
dirnames.remove(Dir)
dirnames.append(Dirname)
for f in filenames:
if f.lower() in EccGlobalData.gConfig.SkipFileList:
continue
collector = None
FullName = os.path.normpath(os.path.join(dirpath, f))
model = DataClass.MODEL_FILE_OTHERS
if os.path.splitext(f)[1] in ('.h', '.c'):
EdkLogger.info("Parsing " + FullName)
model = f.endswith('c') and DataClass.MODEL_FILE_C or DataClass.MODEL_FILE_H
collector = CodeFragmentCollector.CodeFragmentCollector(FullName)
try:
collector.ParseFile()
except UnicodeError:
ParseErrorFileList.append(FullName)
collector.CleanFileProfileBuffer()
collector.ParseFileWithClearedPPDirective()
# collector.PrintFragments()
BaseName = os.path.basename(f)
DirName = os.path.dirname(FullName)
Ext = os.path.splitext(f)[1].lstrip('.')
ModifiedTime = os.path.getmtime(FullName)
FileObj = DataClass.FileClass(-1, BaseName, Ext, DirName, FullName, model, ModifiedTime, GetFunctionList(), GetIdentifierList(), [])
FileObjList.append(FileObj)
if collector:
collector.CleanFileProfileBuffer()
if len(ParseErrorFileList) > 0:
EdkLogger.info("Found unrecoverable error during parsing:\n\t%s\n" % "\n\t".join(ParseErrorFileList))
Db = GetDB()
for file in FileObjList:
if file.ExtName.upper() not in ['INF', 'DEC', 'DSC', 'FDF']:
Db.InsertOneFile(file)
Db.UpdateIdentifierBelongsToFunction()
def GetTableID(FullFileName, ErrorMsgList=None):
if ErrorMsgList == None:
ErrorMsgList = []
Db = GetDB()
SqlStatement = """ select ID
from File
where FullPath like '%s'
""" % FullFileName
ResultSet = Db.TblFile.Exec(SqlStatement)
FileID = -1
for Result in ResultSet:
if FileID != -1:
ErrorMsgList.append('Duplicate file ID found in DB for file %s' % FullFileName)
return - 2
FileID = Result[0]
if FileID == -1:
ErrorMsgList.append('NO file ID found in DB for file %s' % FullFileName)
return - 1
return FileID
def GetIncludeFileList(FullFileName):
if os.path.splitext(FullFileName)[1].upper() not in ('.H'):
return []
IFList = IncludeFileListDict.get(FullFileName)
if IFList != None:
return IFList
FileID = GetTableID(FullFileName)
if FileID < 0:
return []
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Value
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_INCLUDE)
ResultSet = Db.TblFile.Exec(SqlStatement)
IncludeFileListDict[FullFileName] = ResultSet
return ResultSet
def GetFullPathOfIncludeFile(Str, IncludePathList):
for IncludePath in IncludePathList:
FullPath = os.path.join(IncludePath, Str)
FullPath = os.path.normpath(FullPath)
if os.path.exists(FullPath):
return FullPath
return None
def GetAllIncludeFiles(FullFileName):
if AllIncludeFileListDict.get(FullFileName) != None:
return AllIncludeFileListDict.get(FullFileName)
FileDirName = os.path.dirname(FullFileName)
IncludePathList = IncludePathListDict.get(FileDirName)
if IncludePathList == None:
IncludePathList = MetaDataParser.GetIncludeListOfFile(EccGlobalData.gWorkspace, FullFileName, GetDB())
if FileDirName not in IncludePathList:
IncludePathList.insert(0, FileDirName)
IncludePathListDict[FileDirName] = IncludePathList
IncludeFileQueue = []
for IncludeFile in GetIncludeFileList(FullFileName):
FileName = IncludeFile[0].lstrip('#').strip()
FileName = FileName.lstrip('include').strip()
FileName = FileName.strip('\"')
FileName = FileName.lstrip('<').rstrip('>').strip()
FullPath = GetFullPathOfIncludeFile(FileName, IncludePathList)
if FullPath != None:
IncludeFileQueue.append(FullPath)
i = 0
while i < len(IncludeFileQueue):
for IncludeFile in GetIncludeFileList(IncludeFileQueue[i]):
FileName = IncludeFile[0].lstrip('#').strip()
FileName = FileName.lstrip('include').strip()
FileName = FileName.strip('\"')
FileName = FileName.lstrip('<').rstrip('>').strip()
FullPath = GetFullPathOfIncludeFile(FileName, IncludePathList)
if FullPath != None and FullPath not in IncludeFileQueue:
IncludeFileQueue.insert(i + 1, FullPath)
i += 1
AllIncludeFileListDict[FullFileName] = IncludeFileQueue
return IncludeFileQueue
def GetPredicateListFromPredicateExpStr(PES):
PredicateList = []
i = 0
PredicateBegin = 0
#PredicateEnd = 0
LogicOpPos = -1
p = GetFuncDeclPattern()
while i < len(PES) - 1:
if (PES[i].isalnum() or PES[i] == '_' or PES[i] == '*') and LogicOpPos > PredicateBegin:
PredicateBegin = i
if (PES[i] == '&' and PES[i + 1] == '&') or (PES[i] == '|' and PES[i + 1] == '|'):
LogicOpPos = i
Exp = PES[PredicateBegin:i].strip()
# Exp may contain '.' or '->'
TmpExp = Exp.replace('.', '').replace('->', '')
if p.match(TmpExp):
PredicateList.append(Exp)
else:
PredicateList.append(Exp.rstrip(';').rstrip(')').strip())
i += 1
if PredicateBegin > LogicOpPos:
while PredicateBegin < len(PES):
if PES[PredicateBegin].isalnum() or PES[PredicateBegin] == '_' or PES[PredicateBegin] == '*':
break
PredicateBegin += 1
Exp = PES[PredicateBegin:len(PES)].strip()
# Exp may contain '.' or '->'
TmpExp = Exp.replace('.', '').replace('->', '')
if p.match(TmpExp):
PredicateList.append(Exp)
else:
PredicateList.append(Exp.rstrip(';').rstrip(')').strip())
return PredicateList
def GetCNameList(Lvalue, StarList=[]):
Lvalue += ' '
i = 0
SearchBegin = 0
VarStart = -1
VarEnd = -1
VarList = []
while SearchBegin < len(Lvalue):
while i < len(Lvalue):
if Lvalue[i].isalnum() or Lvalue[i] == '_':
if VarStart == -1:
VarStart = i
VarEnd = i
i += 1
elif VarEnd != -1:
VarList.append(Lvalue[VarStart:VarEnd + 1])
i += 1
break
else:
if VarStart == -1 and Lvalue[i] == '*':
StarList.append('*')
i += 1
if VarEnd == -1:
break
DotIndex = Lvalue[VarEnd:].find('.')
ArrowIndex = Lvalue[VarEnd:].find('->')
if DotIndex == -1 and ArrowIndex == -1:
break
elif DotIndex == -1 and ArrowIndex != -1:
SearchBegin = VarEnd + ArrowIndex
elif ArrowIndex == -1 and DotIndex != -1:
SearchBegin = VarEnd + DotIndex
else:
SearchBegin = VarEnd + ((DotIndex < ArrowIndex) and DotIndex or ArrowIndex)
i = SearchBegin
VarStart = -1
VarEnd = -1
return VarList
def SplitPredicateByOp(Str, Op, IsFuncCalling=False):
Name = Str.strip()
Value = None
if IsFuncCalling:
Index = 0
LBFound = False
UnmatchedLBCount = 0
while Index < len(Str):
while not LBFound and Str[Index] != '_' and not Str[Index].isalnum():
Index += 1
while not LBFound and (Str[Index].isalnum() or Str[Index] == '_'):
Index += 1
# maybe type-cast at the begining, skip it.
RemainingStr = Str[Index:].lstrip()
if RemainingStr.startswith(')') and not LBFound:
Index += 1
continue
if RemainingStr.startswith('(') and not LBFound:
LBFound = True
if Str[Index] == '(':
UnmatchedLBCount += 1
Index += 1
continue
if Str[Index] == ')':
UnmatchedLBCount -= 1
Index += 1
if UnmatchedLBCount == 0:
break
continue
Index += 1
if UnmatchedLBCount > 0:
return [Name]
IndexInRemainingStr = Str[Index:].find(Op)
if IndexInRemainingStr == -1:
return [Name]
Name = Str[0:Index + IndexInRemainingStr].strip()
Value = Str[Index + IndexInRemainingStr + len(Op):].strip().strip(')')
return [Name, Value]
TmpStr = Str.rstrip(';').rstrip(')')
while True:
Index = TmpStr.rfind(Op)
if Index == -1:
return [Name]
if Str[Index - 1].isalnum() or Str[Index - 1].isspace() or Str[Index - 1] == ')' or Str[Index - 1] == ']':
Name = Str[0:Index].strip()
Value = Str[Index + len(Op):].strip()
return [Name, Value]
TmpStr = Str[0:Index - 1]
def SplitPredicateStr(Str):
Str = Str.lstrip('(')
IsFuncCalling = False
p = GetFuncDeclPattern()
TmpStr = Str.replace('.', '').replace('->', '')
if p.match(TmpStr):
IsFuncCalling = True
PredPartList = SplitPredicateByOp(Str, '==', IsFuncCalling)
if len(PredPartList) > 1:
return [PredPartList, '==']
PredPartList = SplitPredicateByOp(Str, '!=', IsFuncCalling)
if len(PredPartList) > 1:
return [PredPartList, '!=']
PredPartList = SplitPredicateByOp(Str, '>=', IsFuncCalling)
if len(PredPartList) > 1:
return [PredPartList, '>=']
PredPartList = SplitPredicateByOp(Str, '<=', IsFuncCalling)
if len(PredPartList) > 1:
return [PredPartList, '<=']
PredPartList = SplitPredicateByOp(Str, '>', IsFuncCalling)
if len(PredPartList) > 1:
return [PredPartList, '>']
PredPartList = SplitPredicateByOp(Str, '<', IsFuncCalling)
if len(PredPartList) > 1:
return [PredPartList, '<']
return [[Str, None], None]
def GetFuncContainsPE(ExpLine, ResultSet):
for Result in ResultSet:
if Result[0] < ExpLine and Result[1] > ExpLine:
return Result
return None
def PatternInModifier(Modifier, SubStr):
PartList = Modifier.split()
for Part in PartList:
if Part == SubStr:
return True
return False
def GetDataTypeFromModifier(ModifierStr):
MList = ModifierStr.split()
ReturnType = ''
for M in MList:
if M in EccGlobalData.gConfig.ModifierList:
continue
# remove array sufix
if M.startswith('[') or M.endswith(']'):
continue
ReturnType += M + ' '
ReturnType = ReturnType.strip()
if len(ReturnType) == 0:
ReturnType = 'VOID'
return ReturnType
def DiffModifier(Str1, Str2):
PartList1 = Str1.split()
PartList2 = Str2.split()
if PartList1 == PartList2:
return False
else:
return True
def GetTypedefDict(FullFileName):
Dict = ComplexTypeDict.get(FullFileName)
if Dict != None:
return Dict
FileID = GetTableID(FullFileName)
FileTable = 'Identifier' + str(FileID)
Db = GetDB()
SqlStatement = """ select Modifier, Name, Value, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_TYPEDEF)
ResultSet = Db.TblFile.Exec(SqlStatement)
Dict = {}
for Result in ResultSet:
if len(Result[0]) == 0:
Dict[Result[1]] = Result[2]
IncludeFileList = GetAllIncludeFiles(FullFileName)
for F in IncludeFileList:
FileID = GetTableID(F)
if FileID < 0:
continue
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Modifier, Name, Value, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_TYPEDEF)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
if not Result[2].startswith('FP ('):
Dict[Result[1]] = Result[2]
else:
if len(Result[0]) == 0:
Dict[Result[1]] = 'VOID'
else:
Dict[Result[1]] = GetDataTypeFromModifier(Result[0])
ComplexTypeDict[FullFileName] = Dict
return Dict
def GetSUDict(FullFileName):
Dict = SUDict.get(FullFileName)
if Dict != None:
return Dict
FileID = GetTableID(FullFileName)
FileTable = 'Identifier' + str(FileID)
Db = GetDB()
SqlStatement = """ select Name, Value, ID
from %s
where Model = %d or Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_STRUCTURE, DataClass.MODEL_IDENTIFIER_UNION)
ResultSet = Db.TblFile.Exec(SqlStatement)
Dict = {}
for Result in ResultSet:
if len(Result[1]) > 0:
Dict[Result[0]] = Result[1]
IncludeFileList = GetAllIncludeFiles(FullFileName)
for F in IncludeFileList:
FileID = GetTableID(F)
if FileID < 0:
continue
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Name, Value, ID
from %s
where Model = %d or Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_STRUCTURE, DataClass.MODEL_IDENTIFIER_UNION)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
if len(Result[1]) > 0:
Dict[Result[0]] = Result[1]
SUDict[FullFileName] = Dict
return Dict
def StripComments(Str):
Str += ' '
ListFromStr = list(Str)
InComment = False
DoubleSlashComment = False
Index = 0
while Index < len(ListFromStr):
# meet new line, then no longer in a comment for //
if ListFromStr[Index] == '\n':
if InComment and DoubleSlashComment:
InComment = False
DoubleSlashComment = False
Index += 1
# check for */ comment end
elif InComment and not DoubleSlashComment and ListFromStr[Index] == '*' and ListFromStr[Index + 1] == '/':
ListFromStr[Index] = ' '
Index += 1
ListFromStr[Index] = ' '
Index += 1
InComment = False
# set comments to spaces
elif InComment:
ListFromStr[Index] = ' '
Index += 1
# check for // comment
elif ListFromStr[Index] == '/' and ListFromStr[Index + 1] == '/' and ListFromStr[Index + 2] != '\n':
InComment = True
DoubleSlashComment = True
# check for /* comment start
elif ListFromStr[Index] == '/' and ListFromStr[Index + 1] == '*':
ListFromStr[Index] = ' '
Index += 1
ListFromStr[Index] = ' '
Index += 1
InComment = True
else:
Index += 1
# restore from List to String
Str = "".join(ListFromStr)
Str = Str.rstrip(' ')
return Str
def GetFinalTypeValue(Type, FieldName, TypedefDict, SUDict):
Value = TypedefDict.get(Type)
if Value == None:
Value = SUDict.get(Type)
if Value == None:
return None
LBPos = Value.find('{')
while LBPos == -1:
FTList = Value.split()
for FT in FTList:
if FT not in ('struct', 'union'):
Value = TypedefDict.get(FT)
if Value == None:
Value = SUDict.get(FT)
break
if Value == None:
return None
LBPos = Value.find('{')
# RBPos = Value.find('}')
Fields = Value[LBPos + 1:]
Fields = StripComments(Fields)
FieldsList = Fields.split(';')
for Field in FieldsList:
Field = Field.strip()
Index = Field.rfind(FieldName)
if Index < 1:
continue
if not Field[Index - 1].isalnum():
if Index + len(FieldName) == len(Field):
Type = GetDataTypeFromModifier(Field[0:Index])
return Type.strip()
else:
# For the condition that the field in struct is an array with [] sufixes...
if not Field[Index + len(FieldName)].isalnum():
Type = GetDataTypeFromModifier(Field[0:Index])
return Type.strip()
return None
def GetRealType(Type, TypedefDict, TargetType=None):
if TargetType != None and Type == TargetType:
return Type
while TypedefDict.get(Type):
Type = TypedefDict.get(Type)
if TargetType != None and Type == TargetType:
return Type
return Type
def GetTypeInfo(RefList, Modifier, FullFileName, TargetType=None):
TypedefDict = GetTypedefDict(FullFileName)
SUDict = GetSUDict(FullFileName)
Type = GetDataTypeFromModifier(Modifier).replace('*', '').strip()
Type = Type.split()[-1]
Index = 0
while Index < len(RefList):
FieldName = RefList[Index]
FromType = GetFinalTypeValue(Type, FieldName, TypedefDict, SUDict)
if FromType == None:
return None
# we want to determine the exact type.
if TargetType != None:
Type = FromType.split()[0]
# we only want to check if it is a pointer
else:
Type = FromType
if Type.find('*') != -1 and Index == len(RefList) - 1:
return Type
Type = FromType.split()[0]
Index += 1
Type = GetRealType(Type, TypedefDict, TargetType)
return Type
def GetVarInfo(PredVarList, FuncRecord, FullFileName, IsFuncCall=False, TargetType=None, StarList=None):
PredVar = PredVarList[0]
FileID = GetTableID(FullFileName)
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
# search variable in include files
# it is a function call, search function declarations and definitions
if IsFuncCall:
SqlStatement = """ select Modifier, ID
from %s
where Model = %d and Value = \'%s\'
""" % (FileTable, DataClass.MODEL_IDENTIFIER_FUNCTION_DECLARATION, PredVar)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Type = GetDataTypeFromModifier(Result[0]).split()[-1]
TypedefDict = GetTypedefDict(FullFileName)
Type = GetRealType(Type, TypedefDict, TargetType)
return Type
IncludeFileList = GetAllIncludeFiles(FullFileName)
for F in IncludeFileList:
FileID = GetTableID(F)
if FileID < 0:
continue
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Modifier, ID
from %s
where Model = %d and Value = \'%s\'
""" % (FileTable, DataClass.MODEL_IDENTIFIER_FUNCTION_DECLARATION, PredVar)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Type = GetDataTypeFromModifier(Result[0]).split()[-1]
TypedefDict = GetTypedefDict(FullFileName)
Type = GetRealType(Type, TypedefDict, TargetType)
return Type
FileID = GetTableID(FullFileName)
SqlStatement = """ select Modifier, ID
from Function
where BelongsToFile = %d and Name = \'%s\'
""" % (FileID, PredVar)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Type = GetDataTypeFromModifier(Result[0]).split()[-1]
TypedefDict = GetTypedefDict(FullFileName)
Type = GetRealType(Type, TypedefDict, TargetType)
return Type
for F in IncludeFileList:
FileID = GetTableID(F)
if FileID < 0:
continue
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Modifier, ID
from Function
where BelongsToFile = %d and Name = \'%s\'
""" % (FileID, PredVar)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Type = GetDataTypeFromModifier(Result[0]).split()[-1]
TypedefDict = GetTypedefDict(FullFileName)
Type = GetRealType(Type, TypedefDict, TargetType)
return Type
return None
# really variable, search local variable first
SqlStatement = """ select Modifier, ID
from %s
where Model = %d and Name = \'%s\' and StartLine >= %d and StartLine <= %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_VARIABLE, PredVar, FuncRecord[0], FuncRecord[1])
ResultSet = Db.TblFile.Exec(SqlStatement)
VarFound = False
for Result in ResultSet:
if len(PredVarList) > 1:
Type = GetTypeInfo(PredVarList[1:], Result[0], FullFileName, TargetType)
return Type
else:
# Type = GetDataTypeFromModifier(Result[0]).split()[-1]
TypeList = GetDataTypeFromModifier(Result[0]).split()
Type = TypeList[-1]
if len(TypeList) > 1 and StarList != None:
for Star in StarList:
Type = Type.strip()
Type = Type.rstrip(Star)
# Get real type after de-reference pointers.
if len(Type.strip()) == 0:
Type = TypeList[-2]
TypedefDict = GetTypedefDict(FullFileName)
Type = GetRealType(Type, TypedefDict, TargetType)
return Type
# search function parameters second
ParamList = GetParamList(FuncRecord[2])
for Param in ParamList:
if Param.Name.strip() == PredVar:
if len(PredVarList) > 1:
Type = GetTypeInfo(PredVarList[1:], Param.Modifier, FullFileName, TargetType)
return Type
else:
TypeList = GetDataTypeFromModifier(Param.Modifier).split()
Type = TypeList[-1]
if Type == '*' and len(TypeList) >= 2:
Type = TypeList[-2]
if len(TypeList) > 1 and StarList != None:
for Star in StarList:
Type = Type.strip()
Type = Type.rstrip(Star)
# Get real type after de-reference pointers.
if len(Type.strip()) == 0:
Type = TypeList[-2]
TypedefDict = GetTypedefDict(FullFileName)
Type = GetRealType(Type, TypedefDict, TargetType)
return Type
# search global variable next
SqlStatement = """ select Modifier, ID
from %s
where Model = %d and Name = \'%s\' and BelongsToFunction = -1
""" % (FileTable, DataClass.MODEL_IDENTIFIER_VARIABLE, PredVar)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
if len(PredVarList) > 1:
Type = GetTypeInfo(PredVarList[1:], Result[0], FullFileName, TargetType)
return Type
else:
TypeList = GetDataTypeFromModifier(Result[0]).split()
Type = TypeList[-1]
if len(TypeList) > 1 and StarList != None:
for Star in StarList:
Type = Type.strip()
Type = Type.rstrip(Star)
# Get real type after de-reference pointers.
if len(Type.strip()) == 0:
Type = TypeList[-2]
TypedefDict = GetTypedefDict(FullFileName)
Type = GetRealType(Type, TypedefDict, TargetType)
return Type
IncludeFileList = GetAllIncludeFiles(FullFileName)
for F in IncludeFileList:
FileID = GetTableID(F)
if FileID < 0:
continue
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Modifier, ID
from %s
where Model = %d and BelongsToFunction = -1 and Name = \'%s\'
""" % (FileTable, DataClass.MODEL_IDENTIFIER_VARIABLE, PredVar)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
if len(PredVarList) > 1:
Type = GetTypeInfo(PredVarList[1:], Result[0], FullFileName, TargetType)
return Type
else:
TypeList = GetDataTypeFromModifier(Result[0]).split()
Type = TypeList[-1]
if len(TypeList) > 1 and StarList != None:
for Star in StarList:
Type = Type.strip()
Type = Type.rstrip(Star)
# Get real type after de-reference pointers.
if len(Type.strip()) == 0:
Type = TypeList[-2]
TypedefDict = GetTypedefDict(FullFileName)
Type = GetRealType(Type, TypedefDict, TargetType)
return Type
def GetTypeFromArray(Type, Var):
Count = Var.count('[')
while Count > 0:
Type = Type.strip()
Type = Type.rstrip('*')
Count = Count - 1
return Type
def CheckFuncLayoutReturnType(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Modifier, ID, StartLine, StartColumn, EndLine, Value
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_FUNCTION_DECLARATION)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
ReturnType = GetDataTypeFromModifier(Result[0])
TypeStart = ReturnType.split()[0]
FuncName = Result[5]
if EccGlobalData.gException.IsException(ERROR_C_FUNCTION_LAYOUT_CHECK_RETURN_TYPE, FuncName):
continue
Result0 = Result[0]
if Result0.upper().startswith('STATIC'):
Result0 = Result0[6:].strip()
Index = Result0.find(TypeStart)
if Index != 0 or Result[3] != 0:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_RETURN_TYPE, '[%s] Return Type should appear at the start of line' % FuncName, FileTable, Result[1])
if Result[2] == Result[4]:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_RETURN_TYPE, '[%s] Return Type should appear on its own line' % FuncName, FileTable, Result[1])
SqlStatement = """ select Modifier, ID, StartLine, StartColumn, FunNameStartLine, Name
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
ReturnType = GetDataTypeFromModifier(Result[0])
TypeStart = ReturnType.split()[0]
FuncName = Result[5]
if EccGlobalData.gException.IsException(ERROR_C_FUNCTION_LAYOUT_CHECK_RETURN_TYPE, FuncName):
continue
Result0 = Result[0]
if Result0.upper().startswith('STATIC'):
Result0 = Result0[6:].strip()
Index = Result0.find(ReturnType)
if Index != 0 or Result[3] != 0:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_RETURN_TYPE, '[%s] Return Type should appear at the start of line' % FuncName, 'Function', Result[1])
def CheckFuncLayoutModifier(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Modifier, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_FUNCTION_DECLARATION)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
ReturnType = GetDataTypeFromModifier(Result[0])
TypeStart = ReturnType.split()[0]
Result0 = Result[0]
if Result0.upper().startswith('STATIC'):
Result0 = Result0[6:].strip()
Index = Result0.find(TypeStart)
if Index != 0:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_OPTIONAL_FUNCTIONAL_MODIFIER, '', FileTable, Result[1])
SqlStatement = """ select Modifier, ID
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
ReturnType = GetDataTypeFromModifier(Result[0])
TypeStart = ReturnType.split()[0]
Result0 = Result[0]
if Result0.upper().startswith('STATIC'):
Result0 = Result0[6:].strip()
Index = Result0.find(TypeStart)
if Index != 0:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_OPTIONAL_FUNCTIONAL_MODIFIER, '', 'Function', Result[1])
def CheckFuncLayoutName(FullFileName):
ErrorMsgList = []
# Parameter variable format pattern.
Pattern = re.compile(r'^[A-Z]+\S*[a-z]\S*$')
ParamIgnoreList = ('VOID', '...')
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Name, ID, EndColumn, Value
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_FUNCTION_DECLARATION)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
FuncName = Result[3]
if EccGlobalData.gException.IsException(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_NAME, FuncName):
continue
if Result[2] != 0:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_NAME, 'Function name [%s] should appear at the start of a line' % FuncName, FileTable, Result[1])
ParamList = GetParamList(Result[0])
if len(ParamList) == 0:
continue
StartLine = 0
for Param in ParamList:
if Param.StartLine <= StartLine:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_NAME, 'Parameter %s should be in its own line.' % Param.Name, FileTable, Result[1])
if Param.StartLine - StartLine > 1:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_NAME, 'Empty line appears before Parameter %s.' % Param.Name, FileTable, Result[1])
if not Pattern.match(Param.Name) and not Param.Name in ParamIgnoreList and not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, Param.Name):
PrintErrorMsg(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, 'Parameter [%s] NOT follow naming convention.' % Param.Name, FileTable, Result[1])
StartLine = Param.StartLine
if not Result[0].endswith('\n )') and not Result[0].endswith('\r )'):
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_NAME, '\')\' should be on a new line and indented two spaces', FileTable, Result[1])
SqlStatement = """ select Modifier, ID, FunNameStartColumn, Name
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
FuncName = Result[3]
if EccGlobalData.gException.IsException(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_NAME, FuncName):
continue
if Result[2] != 0:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_NAME, 'Function name [%s] should appear at the start of a line' % FuncName, 'Function', Result[1])
ParamList = GetParamList(Result[0])
if len(ParamList) == 0:
continue
StartLine = 0
for Param in ParamList:
if Param.StartLine <= StartLine:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_NAME, 'Parameter %s should be in its own line.' % Param.Name, 'Function', Result[1])
if Param.StartLine - StartLine > 1:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_NAME, 'Empty line appears before Parameter %s.' % Param.Name, 'Function', Result[1])
if not Pattern.match(Param.Name) and not Param.Name in ParamIgnoreList and not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, Param.Name):
PrintErrorMsg(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, 'Parameter [%s] NOT follow naming convention.' % Param.Name, FileTable, Result[1])
StartLine = Param.StartLine
if not Result[0].endswith('\n )') and not Result[0].endswith('\r )'):
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_NAME, '\')\' should be on a new line and indented two spaces', 'Function', Result[1])
def CheckFuncLayoutPrototype(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
FileTable = 'Identifier' + str(FileID)
Db = GetDB()
SqlStatement = """ select Modifier, Header, Name, ID
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
if len(ResultSet) == 0:
return ErrorMsgList
FuncDefList = []
for Result in ResultSet:
FuncDefList.append(Result)
SqlStatement = """ select Modifier, Name, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_FUNCTION_DECLARATION)
ResultSet = Db.TblFile.Exec(SqlStatement)
FuncDeclList = []
for Result in ResultSet:
FuncDeclList.append(Result)
UndeclFuncList = []
for FuncDef in FuncDefList:
FuncName = FuncDef[2].strip()
FuncModifier = FuncDef[0]
FuncDefHeader = FuncDef[1]
for FuncDecl in FuncDeclList:
LBPos = FuncDecl[1].find('(')
DeclName = FuncDecl[1][0:LBPos].strip()
DeclModifier = FuncDecl[0]
if DeclName == FuncName:
if DiffModifier(FuncModifier, DeclModifier) and not EccGlobalData.gException.IsException(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE, FuncName):
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE, 'Function [%s] modifier different with prototype.' % FuncName, 'Function', FuncDef[3])
ParamListOfDef = GetParamList(FuncDefHeader)
ParamListOfDecl = GetParamList(FuncDecl[1])
if len(ParamListOfDef) != len(ParamListOfDecl) and not EccGlobalData.gException.IsException(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE_2, FuncName):
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE_2, 'Parameter number different in function [%s].' % FuncName, 'Function', FuncDef[3])
break
Index = 0
while Index < len(ParamListOfDef):
if DiffModifier(ParamListOfDef[Index].Modifier, ParamListOfDecl[Index].Modifier) and not EccGlobalData.gException.IsException(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE_3, FuncName):
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE_3, 'Parameter %s has different modifier with prototype in function [%s].' % (ParamListOfDef[Index].Name, FuncName), 'Function', FuncDef[3])
Index += 1
break
else:
UndeclFuncList.append(FuncDef)
IncludeFileList = GetAllIncludeFiles(FullFileName)
FuncDeclList = []
for F in IncludeFileList:
FileID = GetTableID(F, ErrorMsgList)
if FileID < 0:
continue
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Modifier, Name, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_FUNCTION_DECLARATION)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
FuncDeclList.append(Result)
for FuncDef in UndeclFuncList:
FuncName = FuncDef[2].strip()
FuncModifier = FuncDef[0]
FuncDefHeader = FuncDef[1]
for FuncDecl in FuncDeclList:
LBPos = FuncDecl[1].find('(')
DeclName = FuncDecl[1][0:LBPos].strip()
DeclModifier = FuncDecl[0]
if DeclName == FuncName:
if DiffModifier(FuncModifier, DeclModifier) and not EccGlobalData.gException.IsException(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE, FuncName):
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE, 'Function [%s] modifier different with prototype.' % FuncName, 'Function', FuncDef[3])
ParamListOfDef = GetParamList(FuncDefHeader)
ParamListOfDecl = GetParamList(FuncDecl[1])
if len(ParamListOfDef) != len(ParamListOfDecl) and not EccGlobalData.gException.IsException(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE_2, FuncName):
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE_2, 'Parameter number different in function [%s].' % FuncName, 'Function', FuncDef[3])
break
Index = 0
while Index < len(ParamListOfDef):
if DiffModifier(ParamListOfDef[Index].Modifier, ParamListOfDecl[Index].Modifier) and not EccGlobalData.gException.IsException(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE_3, FuncName):
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_PROTO_TYPE_3, 'Parameter %s has different modifier with prototype in function [%s].' % (ParamListOfDef[Index].Name, FuncName), 'Function', FuncDef[3])
Index += 1
break
def CheckFuncLayoutBody(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
FileTable = 'Identifier' + str(FileID)
Db = GetDB()
SqlStatement = """ select BodyStartColumn, EndColumn, ID
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
if len(ResultSet) == 0:
return ErrorMsgList
for Result in ResultSet:
if Result[0] != 0:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_BODY, 'open brace should be at the very beginning of a line.', 'Function', Result[2])
if Result[1] != 0:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_FUNCTION_BODY, 'close brace should be at the very beginning of a line.', 'Function', Result[2])
def CheckFuncLayoutLocalVariable(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select ID
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
if len(ResultSet) == 0:
return ErrorMsgList
FL = []
for Result in ResultSet:
FL.append(Result)
for F in FL:
SqlStatement = """ select Name, Value, ID, Modifier
from %s
where Model = %d and BelongsToFunction = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_VARIABLE, F[0])
ResultSet = Db.TblFile.Exec(SqlStatement)
if len(ResultSet) == 0:
continue
for Result in ResultSet:
if len(Result[1]) > 0 and 'CONST' not in Result[3]:
PrintErrorMsg(ERROR_C_FUNCTION_LAYOUT_CHECK_NO_INIT_OF_VARIABLE, 'Variable Name: %s' % Result[0], FileTable, Result[2])
def CheckMemberVariableFormat(Name, Value, FileTable, TdId, ModelId):
ErrMsgList = []
# Member variable format pattern.
Pattern = re.compile(r'^[A-Z]+\S*[a-z]\S*$')
LBPos = Value.find('{')
RBPos = Value.rfind('}')
if LBPos == -1 or RBPos == -1:
return ErrMsgList
Fields = Value[LBPos + 1 : RBPos]
Fields = StripComments(Fields).strip()
NestPos = Fields.find ('struct')
if NestPos != -1 and (NestPos + len('struct') < len(Fields)) and ModelId != DataClass.MODEL_IDENTIFIER_UNION:
if not Fields[NestPos + len('struct') + 1].isalnum():
if not EccGlobalData.gException.IsException(ERROR_DECLARATION_DATA_TYPE_CHECK_NESTED_STRUCTURE, Name):
PrintErrorMsg(ERROR_DECLARATION_DATA_TYPE_CHECK_NESTED_STRUCTURE, 'Nested struct in [%s].' % (Name), FileTable, TdId)
return ErrMsgList
NestPos = Fields.find ('union')
if NestPos != -1 and (NestPos + len('union') < len(Fields)):
if not Fields[NestPos + len('union') + 1].isalnum():
if not EccGlobalData.gException.IsException(ERROR_DECLARATION_DATA_TYPE_CHECK_NESTED_STRUCTURE, Name):
PrintErrorMsg(ERROR_DECLARATION_DATA_TYPE_CHECK_NESTED_STRUCTURE, 'Nested union in [%s].' % (Name), FileTable, TdId)
return ErrMsgList
NestPos = Fields.find ('enum')
if NestPos != -1 and (NestPos + len('enum') < len(Fields)):
if not Fields[NestPos + len('enum') + 1].isalnum():
if not EccGlobalData.gException.IsException(ERROR_DECLARATION_DATA_TYPE_CHECK_NESTED_STRUCTURE, Name):
PrintErrorMsg(ERROR_DECLARATION_DATA_TYPE_CHECK_NESTED_STRUCTURE, 'Nested enum in [%s].' % (Name), FileTable, TdId)
return ErrMsgList
if ModelId == DataClass.MODEL_IDENTIFIER_ENUMERATE:
FieldsList = Fields.split(',')
# deal with enum is pre-assigned a value by function call ( , , , ...)
QuoteCount = 0
Index = 0
RemoveCurrentElement = False
while Index < len(FieldsList):
Field = FieldsList[Index]
if Field.find('(') != -1:
QuoteCount += 1
RemoveCurrentElement = True
Index += 1
continue
if Field.find(')') != -1 and QuoteCount > 0:
QuoteCount -= 1
if RemoveCurrentElement:
FieldsList.remove(Field)
if QuoteCount == 0:
RemoveCurrentElement = False
continue
if QuoteCount == 0:
RemoveCurrentElement = False
Index += 1
else:
FieldsList = Fields.split(';')
for Field in FieldsList:
Field = Field.strip()
if Field == '':
continue
# For the condition that the field in struct is an array with [] sufixes...
if Field[-1] == ']':
LBPos = Field.find('[')
Field = Field[0:LBPos]
# For the condition that bit field ": Number"
if Field.find(':') != -1:
ColonPos = Field.find(':')
Field = Field[0:ColonPos]
Field = Field.strip()
if Field == '':
continue
if Field.startswith("#"):
continue
# Enum could directly assign value to variable
Field = Field.split('=')[0].strip()
TokenList = Field.split()
# Remove pointers before variable
Token = TokenList[-1]
if Token in ['OPTIONAL']:
Token = TokenList[-2]
if not Pattern.match(Token.lstrip('*')):
ErrMsgList.append(Token.lstrip('*'))
return ErrMsgList
def CheckDeclTypedefFormat(FullFileName, ModelId):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Name, StartLine, EndLine, ID, Value
from %s
where Model = %d
""" % (FileTable, ModelId)
ResultSet = Db.TblFile.Exec(SqlStatement)
ResultList = []
for Result in ResultSet:
ResultList.append(Result)
ErrorType = ERROR_DECLARATION_DATA_TYPE_CHECK_ALL
if ModelId == DataClass.MODEL_IDENTIFIER_STRUCTURE:
ErrorType = ERROR_DECLARATION_DATA_TYPE_CHECK_STRUCTURE_DECLARATION
elif ModelId == DataClass.MODEL_IDENTIFIER_ENUMERATE:
ErrorType = ERROR_DECLARATION_DATA_TYPE_CHECK_ENUMERATED_TYPE
elif ModelId == DataClass.MODEL_IDENTIFIER_UNION:
ErrorType = ERROR_DECLARATION_DATA_TYPE_CHECK_UNION_TYPE
SqlStatement = """ select Modifier, Name, Value, StartLine, EndLine, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_TYPEDEF)
TdSet = Db.TblFile.Exec(SqlStatement)
TdList = []
for Td in TdSet:
TdList.append(Td)
# Check member variable name format that from typedefs of ONLY this file.
for Td in TdList:
Name = Td[1].strip()
Value = Td[2].strip()
if Value.startswith('enum'):
ValueModelId = DataClass.MODEL_IDENTIFIER_ENUMERATE
elif Value.startswith('struct'):
ValueModelId = DataClass.MODEL_IDENTIFIER_STRUCTURE
elif Value.startswith('union'):
ValueModelId = DataClass.MODEL_IDENTIFIER_UNION
else:
continue
if ValueModelId != ModelId:
continue
# Check member variable format.
ErrMsgList = CheckMemberVariableFormat(Name, Value, FileTable, Td[5], ModelId)
for ErrMsg in ErrMsgList:
if EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, Name + '.' + ErrMsg):
continue
PrintErrorMsg(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, 'Member variable [%s] NOT follow naming convention.' % (Name + '.' + ErrMsg), FileTable, Td[5])
# First check in current file to see whether struct/union/enum is typedef-ed.
UntypedefedList = []
for Result in ResultList:
# Check member variable format.
Name = Result[0].strip()
Value = Result[4].strip()
if Value.startswith('enum'):
ValueModelId = DataClass.MODEL_IDENTIFIER_ENUMERATE
elif Value.startswith('struct'):
ValueModelId = DataClass.MODEL_IDENTIFIER_STRUCTURE
elif Value.startswith('union'):
ValueModelId = DataClass.MODEL_IDENTIFIER_UNION
else:
continue
if ValueModelId != ModelId:
continue
ErrMsgList = CheckMemberVariableFormat(Name, Value, FileTable, Result[3], ModelId)
for ErrMsg in ErrMsgList:
if EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, Result[0] + '.' + ErrMsg):
continue
PrintErrorMsg(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, 'Member variable [%s] NOT follow naming convention.' % (Result[0] + '.' + ErrMsg), FileTable, Result[3])
# Check whether it is typedefed.
Found = False
for Td in TdList:
# skip function pointer
if len(Td[0]) > 0:
continue
if Result[1] >= Td[3] and Td[4] >= Result[2]:
Found = True
if not Td[1].isupper():
PrintErrorMsg(ErrorType, 'Typedef should be UPPER case', FileTable, Td[5])
if Result[0] in Td[2].split():
Found = True
if not Td[1].isupper():
PrintErrorMsg(ErrorType, 'Typedef should be UPPER case', FileTable, Td[5])
if Found:
break
if not Found:
UntypedefedList.append(Result)
continue
if len(UntypedefedList) == 0:
return
IncludeFileList = GetAllIncludeFiles(FullFileName)
TdList = []
for F in IncludeFileList:
FileID = GetTableID(F, ErrorMsgList)
if FileID < 0:
continue
IncludeFileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Modifier, Name, Value, StartLine, EndLine, ID
from %s
where Model = %d
""" % (IncludeFileTable, DataClass.MODEL_IDENTIFIER_TYPEDEF)
ResultSet = Db.TblFile.Exec(SqlStatement)
TdList.extend(ResultSet)
for Result in UntypedefedList:
# Check whether it is typedefed.
Found = False
for Td in TdList:
if len(Td[0]) > 0:
continue
if Result[1] >= Td[3] and Td[4] >= Result[2]:
Found = True
if not Td[1].isupper():
PrintErrorMsg(ErrorType, 'Typedef should be UPPER case', FileTable, Td[5])
if Result[0] in Td[2].split():
Found = True
if not Td[1].isupper():
PrintErrorMsg(ErrorType, 'Typedef should be UPPER case', FileTable, Td[5])
if Found:
break
if not Found:
PrintErrorMsg(ErrorType, 'No Typedef for %s' % Result[0], FileTable, Result[3])
continue
def CheckDeclStructTypedef(FullFileName):
CheckDeclTypedefFormat(FullFileName, DataClass.MODEL_IDENTIFIER_STRUCTURE)
def CheckDeclEnumTypedef(FullFileName):
CheckDeclTypedefFormat(FullFileName, DataClass.MODEL_IDENTIFIER_ENUMERATE)
def CheckDeclUnionTypedef(FullFileName):
CheckDeclTypedefFormat(FullFileName, DataClass.MODEL_IDENTIFIER_UNION)
def CheckDeclArgModifier(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Modifier, Name, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_VARIABLE)
ResultSet = Db.TblFile.Exec(SqlStatement)
ModifierTuple = ('IN', 'OUT', 'OPTIONAL', 'UNALIGNED')
MAX_MODIFIER_LENGTH = 100
for Result in ResultSet:
for Modifier in ModifierTuple:
if PatternInModifier(Result[0], Modifier) and len(Result[0]) < MAX_MODIFIER_LENGTH:
PrintErrorMsg(ERROR_DECLARATION_DATA_TYPE_CHECK_IN_OUT_MODIFIER, 'Variable Modifier %s' % Result[0], FileTable, Result[2])
break
SqlStatement = """ select Modifier, Name, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_FUNCTION_DECLARATION)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
for Modifier in ModifierTuple:
if PatternInModifier(Result[0], Modifier):
PrintErrorMsg(ERROR_DECLARATION_DATA_TYPE_CHECK_IN_OUT_MODIFIER, 'Return Type Modifier %s' % Result[0], FileTable, Result[2])
break
SqlStatement = """ select Modifier, Header, ID
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
for Modifier in ModifierTuple:
if PatternInModifier(Result[0], Modifier):
PrintErrorMsg(ERROR_DECLARATION_DATA_TYPE_CHECK_IN_OUT_MODIFIER, 'Return Type Modifier %s' % Result[0], FileTable, Result[2])
break
def CheckDeclNoUseCType(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Modifier, Name, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_VARIABLE)
ResultSet = Db.TblFile.Exec(SqlStatement)
CTypeTuple = ('int', 'unsigned', 'char', 'void', 'static', 'long')
for Result in ResultSet:
for Type in CTypeTuple:
if PatternInModifier(Result[0], Type):
PrintErrorMsg(ERROR_DECLARATION_DATA_TYPE_CHECK_NO_USE_C_TYPE, 'Variable type %s' % Type, FileTable, Result[2])
break
SqlStatement = """ select Modifier, Name, ID, Value
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_FUNCTION_DECLARATION)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
ParamList = GetParamList(Result[1])
FuncName = Result[3]
if EccGlobalData.gException.IsException(ERROR_DECLARATION_DATA_TYPE_CHECK_NO_USE_C_TYPE, FuncName):
continue
for Type in CTypeTuple:
if PatternInModifier(Result[0], Type):
PrintErrorMsg(ERROR_DECLARATION_DATA_TYPE_CHECK_NO_USE_C_TYPE, '%s Return type %s' % (FuncName, Result[0]), FileTable, Result[2])
for Param in ParamList:
if PatternInModifier(Param.Modifier, Type):
PrintErrorMsg(ERROR_DECLARATION_DATA_TYPE_CHECK_NO_USE_C_TYPE, 'Parameter %s' % Param.Name, FileTable, Result[2])
SqlStatement = """ select Modifier, Header, ID, Name
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
ParamList = GetParamList(Result[1])
FuncName = Result[3]
if EccGlobalData.gException.IsException(ERROR_DECLARATION_DATA_TYPE_CHECK_NO_USE_C_TYPE, FuncName):
continue
for Type in CTypeTuple:
if PatternInModifier(Result[0], Type):
PrintErrorMsg(ERROR_DECLARATION_DATA_TYPE_CHECK_NO_USE_C_TYPE, '[%s] Return type %s' % (FuncName, Result[0]), FileTable, Result[2])
for Param in ParamList:
if PatternInModifier(Param.Modifier, Type):
PrintErrorMsg(ERROR_DECLARATION_DATA_TYPE_CHECK_NO_USE_C_TYPE, 'Parameter %s' % Param.Name, FileTable, Result[2])
def CheckPointerNullComparison(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
# cache the found function return type to accelerate later checking in this file.
FuncReturnTypeDict = {}
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Value, StartLine, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_PREDICATE_EXPRESSION)
ResultSet = Db.TblFile.Exec(SqlStatement)
if len(ResultSet) == 0:
return
PSL = []
for Result in ResultSet:
PSL.append([Result[0], Result[1], Result[2]])
SqlStatement = """ select BodyStartLine, EndLine, Header, Modifier, ID
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
FL = []
for Result in ResultSet:
FL.append([Result[0], Result[1], Result[2], Result[3], Result[4]])
p = GetFuncDeclPattern()
for Str in PSL:
FuncRecord = GetFuncContainsPE(Str[1], FL)
if FuncRecord == None:
continue
for Exp in GetPredicateListFromPredicateExpStr(Str[0]):
PredInfo = SplitPredicateStr(Exp)
if PredInfo[1] == None:
PredVarStr = PredInfo[0][0].strip()
IsFuncCall = False
SearchInCache = False
# PredVarStr may contain '.' or '->'
TmpStr = PredVarStr.replace('.', '').replace('->', '')
if p.match(TmpStr):
PredVarStr = PredVarStr[0:PredVarStr.find('(')]
SearchInCache = True
# Only direct function call using IsFuncCall branch. Multi-level ref. function call is considered a variable.
if TmpStr.startswith(PredVarStr):
IsFuncCall = True
if PredVarStr.strip() in IgnoredKeywordList:
continue
StarList = []
PredVarList = GetCNameList(PredVarStr, StarList)
# No variable found, maybe value first? like (0 == VarName)
if len(PredVarList) == 0:
continue
if SearchInCache:
Type = FuncReturnTypeDict.get(PredVarStr)
if Type != None:
if Type.find('*') != -1 and Type != 'BOOLEAN*':
PrintErrorMsg(ERROR_PREDICATE_EXPRESSION_CHECK_COMPARISON_NULL_TYPE, 'Predicate Expression: %s' % Exp, FileTable, Str[2])
continue
if PredVarStr in FuncReturnTypeDict:
continue
Type = GetVarInfo(PredVarList, FuncRecord, FullFileName, IsFuncCall, None, StarList)
if SearchInCache:
FuncReturnTypeDict[PredVarStr] = Type
if Type == None:
continue
Type = GetTypeFromArray(Type, PredVarStr)
if Type.find('*') != -1 and Type != 'BOOLEAN*':
PrintErrorMsg(ERROR_PREDICATE_EXPRESSION_CHECK_COMPARISON_NULL_TYPE, 'Predicate Expression: %s' % Exp, FileTable, Str[2])
def CheckNonBooleanValueComparison(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
# cache the found function return type to accelerate later checking in this file.
FuncReturnTypeDict = {}
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Value, StartLine, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_PREDICATE_EXPRESSION)
ResultSet = Db.TblFile.Exec(SqlStatement)
if len(ResultSet) == 0:
return
PSL = []
for Result in ResultSet:
PSL.append([Result[0], Result[1], Result[2]])
SqlStatement = """ select BodyStartLine, EndLine, Header, Modifier, ID
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
FL = []
for Result in ResultSet:
FL.append([Result[0], Result[1], Result[2], Result[3], Result[4]])
p = GetFuncDeclPattern()
for Str in PSL:
FuncRecord = GetFuncContainsPE(Str[1], FL)
if FuncRecord == None:
continue
for Exp in GetPredicateListFromPredicateExpStr(Str[0]):
PredInfo = SplitPredicateStr(Exp)
if PredInfo[1] == None:
PredVarStr = PredInfo[0][0].strip()
IsFuncCall = False
SearchInCache = False
# PredVarStr may contain '.' or '->'
TmpStr = PredVarStr.replace('.', '').replace('->', '')
if p.match(TmpStr):
PredVarStr = PredVarStr[0:PredVarStr.find('(')]
SearchInCache = True
# Only direct function call using IsFuncCall branch. Multi-level ref. function call is considered a variable.
if TmpStr.startswith(PredVarStr):
IsFuncCall = True
if PredVarStr.strip() in IgnoredKeywordList:
continue
StarList = []
PredVarList = GetCNameList(PredVarStr, StarList)
# No variable found, maybe value first? like (0 == VarName)
if len(PredVarList) == 0:
continue
if SearchInCache:
Type = FuncReturnTypeDict.get(PredVarStr)
if Type != None:
if Type.find('BOOLEAN') == -1:
PrintErrorMsg(ERROR_PREDICATE_EXPRESSION_CHECK_NO_BOOLEAN_OPERATOR, 'Predicate Expression: %s' % Exp, FileTable, Str[2])
continue
if PredVarStr in FuncReturnTypeDict:
continue
Type = GetVarInfo(PredVarList, FuncRecord, FullFileName, IsFuncCall, 'BOOLEAN', StarList)
if SearchInCache:
FuncReturnTypeDict[PredVarStr] = Type
if Type == None:
continue
if Type.find('BOOLEAN') == -1:
PrintErrorMsg(ERROR_PREDICATE_EXPRESSION_CHECK_NO_BOOLEAN_OPERATOR, 'Predicate Expression: %s' % Exp, FileTable, Str[2])
def CheckBooleanValueComparison(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
# cache the found function return type to accelerate later checking in this file.
FuncReturnTypeDict = {}
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Value, StartLine, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_PREDICATE_EXPRESSION)
ResultSet = Db.TblFile.Exec(SqlStatement)
if len(ResultSet) == 0:
return
PSL = []
for Result in ResultSet:
PSL.append([Result[0], Result[1], Result[2]])
SqlStatement = """ select BodyStartLine, EndLine, Header, Modifier, ID
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
FL = []
for Result in ResultSet:
FL.append([Result[0], Result[1], Result[2], Result[3], Result[4]])
p = GetFuncDeclPattern()
for Str in PSL:
FuncRecord = GetFuncContainsPE(Str[1], FL)
if FuncRecord == None:
continue
for Exp in GetPredicateListFromPredicateExpStr(Str[0]):
PredInfo = SplitPredicateStr(Exp)
if PredInfo[1] in ('==', '!=') and PredInfo[0][1] in ('TRUE', 'FALSE'):
PredVarStr = PredInfo[0][0].strip()
IsFuncCall = False
SearchInCache = False
# PredVarStr may contain '.' or '->'
TmpStr = PredVarStr.replace('.', '').replace('->', '')
if p.match(TmpStr):
PredVarStr = PredVarStr[0:PredVarStr.find('(')]
SearchInCache = True
# Only direct function call using IsFuncCall branch. Multi-level ref. function call is considered a variable.
if TmpStr.startswith(PredVarStr):
IsFuncCall = True
if PredVarStr.strip() in IgnoredKeywordList:
continue
StarList = []
PredVarList = GetCNameList(PredVarStr, StarList)
# No variable found, maybe value first? like (0 == VarName)
if len(PredVarList) == 0:
continue
if SearchInCache:
Type = FuncReturnTypeDict.get(PredVarStr)
if Type != None:
if Type.find('BOOLEAN') != -1:
PrintErrorMsg(ERROR_PREDICATE_EXPRESSION_CHECK_BOOLEAN_VALUE, 'Predicate Expression: %s' % Exp, FileTable, Str[2])
continue
if PredVarStr in FuncReturnTypeDict:
continue
Type = GetVarInfo(PredVarList, FuncRecord, FullFileName, IsFuncCall, 'BOOLEAN', StarList)
if SearchInCache:
FuncReturnTypeDict[PredVarStr] = Type
if Type == None:
continue
if Type.find('BOOLEAN') != -1:
PrintErrorMsg(ERROR_PREDICATE_EXPRESSION_CHECK_BOOLEAN_VALUE, 'Predicate Expression: %s' % Exp, FileTable, Str[2])
def CheckHeaderFileData(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select ID, Modifier
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_VARIABLE)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
if not Result[1].startswith('extern'):
PrintErrorMsg(ERROR_INCLUDE_FILE_CHECK_DATA, 'Variable definition appears in header file', FileTable, Result[0])
SqlStatement = """ select ID
from Function
where BelongsToFile = %d
""" % FileID
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
PrintErrorMsg(ERROR_INCLUDE_FILE_CHECK_DATA, 'Function definition appears in header file', 'Function', Result[0])
return ErrorMsgList
def CheckHeaderFileIfndef(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Value, StartLine
from %s
where Model = %d order by StartLine
""" % (FileTable, DataClass.MODEL_IDENTIFIER_MACRO_IFNDEF)
ResultSet = Db.TblFile.Exec(SqlStatement)
if len(ResultSet) == 0:
PrintErrorMsg(ERROR_INCLUDE_FILE_CHECK_IFNDEF_STATEMENT_1, '', 'File', FileID)
return ErrorMsgList
for Result in ResultSet:
SqlStatement = """ select Value, EndLine
from %s
where EndLine < %d
""" % (FileTable, Result[1])
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
if not Result[0].startswith('/*') and not Result[0].startswith('//'):
PrintErrorMsg(ERROR_INCLUDE_FILE_CHECK_IFNDEF_STATEMENT_2, '', 'File', FileID)
break
SqlStatement = """ select Value
from %s
where StartLine > (select max(EndLine) from %s where Model = %d)
""" % (FileTable, FileTable, DataClass.MODEL_IDENTIFIER_MACRO_ENDIF)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
if not Result[0].startswith('/*') and not Result[0].startswith('//'):
PrintErrorMsg(ERROR_INCLUDE_FILE_CHECK_IFNDEF_STATEMENT_3, '', 'File', FileID)
return ErrorMsgList
def CheckDoxygenCommand(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Value, ID
from %s
where Model = %d or Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_COMMENT, DataClass.MODEL_IDENTIFIER_FUNCTION_HEADER)
ResultSet = Db.TblFile.Exec(SqlStatement)
DoxygenCommandList = ['bug', 'todo', 'example', 'file', 'attention', 'param', 'post', 'pre', 'retval',
'return', 'sa', 'since', 'test', 'note', 'par', 'endcode', 'code']
for Result in ResultSet:
CommentStr = Result[0]
CommentPartList = CommentStr.split()
for Part in CommentPartList:
if Part.upper() == 'BUGBUG':
PrintErrorMsg(ERROR_DOXYGEN_CHECK_COMMAND, 'Bug should be marked with doxygen tag @bug', FileTable, Result[1])
if Part.upper() == 'TODO':
PrintErrorMsg(ERROR_DOXYGEN_CHECK_COMMAND, 'ToDo should be marked with doxygen tag @todo', FileTable, Result[1])
if Part.startswith('@'):
if EccGlobalData.gException.IsException(ERROR_DOXYGEN_CHECK_COMMAND, Part):
continue
if not Part.replace('@', '').strip():
continue
if Part.lstrip('@') in ['{', '}']:
continue
if Part.lstrip('@').isalpha():
if Part.lstrip('@') not in DoxygenCommandList:
PrintErrorMsg(ERROR_DOXYGEN_CHECK_COMMAND, 'Unknown doxygen command %s' % Part, FileTable, Result[1])
else:
Index = Part.find('[')
if Index == -1:
PrintErrorMsg(ERROR_DOXYGEN_CHECK_COMMAND, 'Unknown doxygen command %s' % Part, FileTable, Result[1])
RealCmd = Part[1:Index]
if RealCmd not in DoxygenCommandList:
PrintErrorMsg(ERROR_DOXYGEN_CHECK_COMMAND, 'Unknown doxygen command %s' % Part, FileTable, Result[1])
def CheckDoxygenTripleForwardSlash(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
SqlStatement = """ select ID, BodyStartLine, BodyStartColumn, EndLine, EndColumn
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
if len(ResultSet) == 0:
return
FuncDefSet = []
for Result in ResultSet:
FuncDefSet.append(Result)
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Value, ID, StartLine, StartColumn, EndLine, EndColumn
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_COMMENT)
ResultSet = Db.TblFile.Exec(SqlStatement)
CommentSet = []
try:
for Result in ResultSet:
CommentSet.append(Result)
except:
print 'Unrecognized chars in comment of file %s', FullFileName
for Result in CommentSet:
CommentStr = Result[0]
StartLine = Result[2]
StartColumn = Result[3]
EndLine = Result[4]
EndColumn = Result[5]
if not CommentStr.startswith('///<'):
continue
Found = False
for FuncDef in FuncDefSet:
if StartLine == FuncDef[1] and StartColumn > FuncDef[2] and EndLine == FuncDef[3] and EndColumn < FuncDef[4]:
Found = True
break
if StartLine > FuncDef[1] and EndLine < FuncDef[3]:
Found = True
break
if StartLine == FuncDef[1] and StartColumn > FuncDef[2] and EndLine < FuncDef[3]:
Found = True
break
if StartLine > FuncDef[1] and EndLine == FuncDef[3] and EndColumn < FuncDef[4]:
Found = True
break
if Found:
PrintErrorMsg(ERROR_DOXYGEN_CHECK_COMMENT_FORMAT, '', FileTable, Result[1])
def CheckFileHeaderDoxygenComments(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Value, ID
from %s
where Model = %d and (StartLine = 1 or StartLine = 7 or StartLine = 8) and StartColumn = 0
""" % (FileTable, DataClass.MODEL_IDENTIFIER_COMMENT)
ResultSet = Db.TblFile.Exec(SqlStatement)
if len(ResultSet) == 0:
PrintErrorMsg(ERROR_HEADER_CHECK_FILE, 'No File License header appear at the very beginning of file.', 'File', FileID)
return ErrorMsgList
NoHeaderCommentStartFlag = True
NoHeaderCommentEndFlag = True
NoHeaderCommentPeriodFlag = True
NoCopyrightFlag = True
NoLicenseFlag = True
NoRevReferFlag = True
NextLineIndex = 0
for Result in ResultSet:
FileStartFlag = False
CommentStrList = []
CommentStr = Result[0].strip()
CommentStrListTemp = CommentStr.split('\n')
if (len(CommentStrListTemp) <= 1):
# For Mac
CommentStrListTemp = CommentStr.split('\r')
# Skip the content before the file header
for CommentLine in CommentStrListTemp:
if CommentLine.strip().startswith('/** @file'):
FileStartFlag = True
if FileStartFlag == True:
CommentStrList.append(CommentLine)
ID = Result[1]
Index = 0
if CommentStrList and CommentStrList[0].strip().startswith('/** @file'):
NoHeaderCommentStartFlag = False
else:
continue
if CommentStrList and CommentStrList[-1].strip().endswith('**/'):
NoHeaderCommentEndFlag = False
else:
continue
for CommentLine in CommentStrList:
Index = Index + 1
NextLineIndex = Index
if CommentLine.startswith('/** @file'):
continue
if CommentLine.startswith('**/'):
break
# Check whether C File header Comment content start with two spaces.
if EccGlobalData.gConfig.HeaderCheckCFileCommentStartSpacesNum == '1' or EccGlobalData.gConfig.HeaderCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
if CommentLine.startswith('/** @file') == False and CommentLine.startswith('**/') == False and CommentLine.strip() and CommentLine.startswith(' ') == False:
PrintErrorMsg(ERROR_HEADER_CHECK_FILE, 'File header comment content should start with two spaces at each line', FileTable, ID)
CommentLine = CommentLine.strip()
if CommentLine.startswith('Copyright'):
NoCopyrightFlag = False
if CommentLine.find('All rights reserved') == -1:
for Copyright in EccGlobalData.gConfig.Copyright:
if CommentLine.find(Copyright) > -1:
PrintErrorMsg(ERROR_HEADER_CHECK_FILE, '""All rights reserved"" announcement should be following the ""Copyright"" at the same line', FileTable, ID)
break
if CommentLine.endswith('<BR>') == -1:
PrintErrorMsg(ERROR_HEADER_CHECK_FILE, 'The ""<BR>"" at the end of the Copyright line is required', FileTable, ID)
if NextLineIndex < len(CommentStrList) and CommentStrList[NextLineIndex].strip().startswith('Copyright') == False and CommentStrList[NextLineIndex].strip():
NoLicenseFlag = False
if CommentLine.startswith('@par Revision Reference:'):
NoRevReferFlag = False
RefListFlag = False
for RefLine in CommentStrList[NextLineIndex:]:
if RefLine.strip() and (NextLineIndex + 1) < len(CommentStrList) and CommentStrList[NextLineIndex+1].strip() and CommentStrList[NextLineIndex+1].strip().startswith('**/') == False:
RefListFlag = True
if RefLine.strip() == False or RefLine.strip().startswith('**/'):
RefListFlag = False
break
# Check whether C File header Comment's each reference at list should begin with a bullet character.
if EccGlobalData.gConfig.HeaderCheckCFileCommentReferenceFormat == '1' or EccGlobalData.gConfig.HeaderCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
if RefListFlag == True:
if RefLine.strip() and RefLine.strip().startswith('**/') == False and RefLine.startswith(' -') == False:
PrintErrorMsg(ERROR_HEADER_CHECK_FILE, 'Each reference on a separate line should begin with a bullet character ""-"" ', FileTable, ID)
if NoHeaderCommentStartFlag:
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FILE_HEADER, 'File header comment should begin with ""/** @file""', FileTable, ID)
return
if NoHeaderCommentEndFlag:
PrintErrorMsg(ERROR_HEADER_CHECK_FILE, 'File header comment should end with ""**/""', FileTable, ID)
return
if NoCopyrightFlag:
PrintErrorMsg(ERROR_HEADER_CHECK_FILE, 'File header comment missing the ""Copyright""', FileTable, ID)
#Check whether C File header Comment have the License immediately after the ""Copyright"" line.
if EccGlobalData.gConfig.HeaderCheckCFileCommentLicenseFormat == '1' or EccGlobalData.gConfig.HeaderCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
if NoLicenseFlag:
PrintErrorMsg(ERROR_HEADER_CHECK_FILE, 'File header comment should have the License immediately after the ""Copyright"" line', FileTable, ID)
def CheckFuncHeaderDoxygenComments(FullFileName):
ErrorMsgList = []
FileID = GetTableID(FullFileName, ErrorMsgList)
if FileID < 0:
return ErrorMsgList
Db = GetDB()
FileTable = 'Identifier' + str(FileID)
SqlStatement = """ select Value, StartLine, EndLine, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_COMMENT)
ResultSet = Db.TblFile.Exec(SqlStatement)
CommentSet = []
try:
for Result in ResultSet:
CommentSet.append(Result)
except:
print 'Unrecognized chars in comment of file %s', FullFileName
# Func Decl check
SqlStatement = """ select Modifier, Name, StartLine, ID, Value
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_FUNCTION_DECLARATION)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
FuncName = Result[4]
FunctionHeaderComment = CheckCommentImmediatelyPrecedeFunctionHeader(Result[1], Result[2], CommentSet)
if FunctionHeaderComment:
CheckFunctionHeaderConsistentWithDoxygenComment(Result[0], Result[1], Result[2], FunctionHeaderComment[0], FunctionHeaderComment[1], ErrorMsgList, FunctionHeaderComment[3], FileTable)
else:
if EccGlobalData.gException.IsException(ERROR_HEADER_CHECK_FUNCTION, FuncName):
continue
ErrorMsgList.append('Line %d :Function %s has NO comment immediately preceding it.' % (Result[2], Result[1]))
PrintErrorMsg(ERROR_HEADER_CHECK_FUNCTION, 'Function [%s] has NO comment immediately preceding it.' % (FuncName), FileTable, Result[3])
# Func Def check
SqlStatement = """ select Value, StartLine, EndLine, ID
from %s
where Model = %d
""" % (FileTable, DataClass.MODEL_IDENTIFIER_FUNCTION_HEADER)
ResultSet = Db.TblFile.Exec(SqlStatement)
CommentSet = []
try:
for Result in ResultSet:
CommentSet.append(Result)
except:
print 'Unrecognized chars in comment of file %s', FullFileName
SqlStatement = """ select Modifier, Header, StartLine, ID, Name
from Function
where BelongsToFile = %d
""" % (FileID)
ResultSet = Db.TblFile.Exec(SqlStatement)
for Result in ResultSet:
FuncName = Result[4]
FunctionHeaderComment = CheckCommentImmediatelyPrecedeFunctionHeader(Result[1], Result[2], CommentSet)
if FunctionHeaderComment:
CheckFunctionHeaderConsistentWithDoxygenComment(Result[0], Result[1], Result[2], FunctionHeaderComment[0], FunctionHeaderComment[1], ErrorMsgList, FunctionHeaderComment[3], FileTable)
else:
if EccGlobalData.gException.IsException(ERROR_HEADER_CHECK_FUNCTION, FuncName):
continue
ErrorMsgList.append('Line %d :Function [%s] has NO comment immediately preceding it.' % (Result[2], Result[1]))
PrintErrorMsg(ERROR_HEADER_CHECK_FUNCTION, 'Function [%s] has NO comment immediately preceding it.' % (FuncName), 'Function', Result[3])
return ErrorMsgList
def CheckCommentImmediatelyPrecedeFunctionHeader(FuncName, FuncStartLine, CommentSet):
for Comment in CommentSet:
if Comment[2] == FuncStartLine - 1:
return Comment
return None
def GetDoxygenStrFromComment(Str):
DoxygenStrList = []
ParamTagList = Str.split('@param')
if len(ParamTagList) > 1:
i = 1
while i < len(ParamTagList):
DoxygenStrList.append('@param' + ParamTagList[i])
i += 1
Str = ParamTagList[0]
RetvalTagList = ParamTagList[-1].split('@retval')
if len(RetvalTagList) > 1:
if len(ParamTagList) > 1:
DoxygenStrList[-1] = '@param' + RetvalTagList[0]
i = 1
while i < len(RetvalTagList):
DoxygenStrList.append('@retval' + RetvalTagList[i])
i += 1
ReturnTagList = RetvalTagList[-1].split('@return')
if len(ReturnTagList) > 1:
if len(RetvalTagList) > 1:
DoxygenStrList[-1] = '@retval' + ReturnTagList[0]
elif len(ParamTagList) > 1:
DoxygenStrList[-1] = '@param' + ReturnTagList[0]
i = 1
while i < len(ReturnTagList):
DoxygenStrList.append('@return' + ReturnTagList[i])
i += 1
if len(DoxygenStrList) > 0:
DoxygenStrList[-1] = DoxygenStrList[-1].rstrip('--*/')
return DoxygenStrList
def CheckGeneralDoxygenCommentLayout(Str, StartLine, ErrorMsgList, CommentId= -1, TableName=''):
#/** --*/ @retval after @param
if not Str.startswith('/**'):
ErrorMsgList.append('Line %d : Comment does NOT have prefix /** ' % StartLine)
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'Comment does NOT have prefix /** ', TableName, CommentId)
if not Str.endswith('**/'):
ErrorMsgList.append('Line %d : Comment does NOT have tail **/ ' % StartLine)
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'Comment does NOT have tail **/ ', TableName, CommentId)
FirstRetvalIndex = Str.find('@retval')
LastParamIndex = Str.rfind('@param')
if (FirstRetvalIndex > 0) and (LastParamIndex > 0) and (FirstRetvalIndex < LastParamIndex):
ErrorMsgList.append('Line %d : @retval appear before @param ' % StartLine)
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, @retval appear before @param ', TableName, CommentId)
def CheckFunctionHeaderConsistentWithDoxygenComment(FuncModifier, FuncHeader, FuncStartLine, CommentStr, CommentStartLine, ErrorMsgList, CommentId= -1, TableName=''):
ParamList = GetParamList(FuncHeader)
CheckGeneralDoxygenCommentLayout(CommentStr, CommentStartLine, ErrorMsgList, CommentId, TableName)
DescriptionStr = CommentStr
DoxygenStrList = GetDoxygenStrFromComment(DescriptionStr)
if DescriptionStr.find('.') == -1:
PrintErrorMsg(ERROR_DOXYGEN_CHECK_COMMENT_DESCRIPTION, 'Comment description should end with period \'.\'', TableName, CommentId)
DoxygenTagNumber = len(DoxygenStrList)
ParamNumber = len(ParamList)
for Param in ParamList:
if Param.Name.upper() == 'VOID' and ParamNumber == 1:
ParamNumber -= 1
Index = 0
if ParamNumber > 0 and DoxygenTagNumber > 0:
while Index < ParamNumber and Index < DoxygenTagNumber:
ParamModifier = ParamList[Index].Modifier
ParamName = ParamList[Index].Name.strip()
Tag = DoxygenStrList[Index].strip(' ')
if (not Tag[-1] == ('\n')) and (not Tag[-1] == ('\r')):
ErrorMsgList.append('Line %d : in Comment, <%s> does NOT end with new line ' % (CommentStartLine, Tag.replace('\n', '').replace('\r', '')))
PrintErrorMsg(ERROR_HEADER_CHECK_FUNCTION, 'in Comment, <%s> does NOT end with new line ' % (Tag.replace('\n', '').replace('\r', '')), TableName, CommentId)
TagPartList = Tag.split()
if len(TagPartList) < 2:
ErrorMsgList.append('Line %d : in Comment, <%s> does NOT contain doxygen contents ' % (CommentStartLine, Tag.replace('\n', '').replace('\r', '')))
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, <%s> does NOT contain doxygen contents ' % (Tag.replace('\n', '').replace('\r', '')), TableName, CommentId)
Index += 1
continue
LBPos = Tag.find('[')
RBPos = Tag.find(']')
ParamToLBContent = Tag[len('@param'):LBPos].strip()
if LBPos > 0 and len(ParamToLBContent) == 0 and RBPos > LBPos:
InOutStr = ''
ModifierPartList = ParamModifier.split()
for Part in ModifierPartList:
if Part.strip() == 'IN':
InOutStr += 'in'
if Part.strip() == 'OUT':
if InOutStr != '':
InOutStr += ', out'
else:
InOutStr = 'out'
if InOutStr != '':
if Tag.find('[' + InOutStr + ']') == -1:
if InOutStr != 'in, out':
ErrorMsgList.append('Line %d : in Comment, <%s> does NOT have %s ' % (CommentStartLine, (TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), '[' + InOutStr + ']'))
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, <%s> does NOT have %s ' % ((TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), '[' + InOutStr + ']'), TableName, CommentId)
else:
if Tag.find('[in,out]') == -1:
ErrorMsgList.append('Line %d : in Comment, <%s> does NOT have %s ' % (CommentStartLine, (TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), '[' + InOutStr + ']'))
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, <%s> does NOT have %s ' % ((TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), '[' + InOutStr + ']'), TableName, CommentId)
if Tag.find(ParamName) == -1 and ParamName != 'VOID' and ParamName != 'void':
ErrorMsgList.append('Line %d : in Comment, <%s> does NOT consistent with parameter name %s ' % (CommentStartLine, (TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), ParamName))
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'in Comment, <%s> does NOT consistent with parameter name %s ' % ((TagPartList[0] + ' ' + TagPartList[1]).replace('\n', '').replace('\r', ''), ParamName), TableName, CommentId)
Index += 1
if Index < ParamNumber:
ErrorMsgList.append('Line %d : Number of doxygen tags in comment less than number of function parameters' % CommentStartLine)
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'Number of doxygen tags in comment less than number of function parameters ', TableName, CommentId)
# VOID return type, NOT VOID*. VOID* should be matched with a doxygen tag.
if (FuncModifier.find('VOID') != -1 or FuncModifier.find('void') != -1) and FuncModifier.find('*') == -1:
# assume we allow a return description tag for void func. return. that's why 'DoxygenTagNumber - 1' is used instead of 'DoxygenTagNumber'
if Index < DoxygenTagNumber - 1 or (Index < DoxygenTagNumber and DoxygenStrList[Index].startswith('@retval')):
ErrorMsgList.append('Line %d : VOID return type need NO doxygen tags in comment' % CommentStartLine)
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'VOID return type need no doxygen tags in comment ', TableName, CommentId)
else:
if Index < DoxygenTagNumber and not DoxygenStrList[Index].startswith('@retval') and not DoxygenStrList[Index].startswith('@return'):
ErrorMsgList.append('Line %d : Number of @param doxygen tags in comment does NOT match number of function parameters' % CommentStartLine)
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'Number of @param doxygen tags in comment does NOT match number of function parameters ', TableName, CommentId)
else:
if ParamNumber == 0 and DoxygenTagNumber != 0 and ((FuncModifier.find('VOID') != -1 or FuncModifier.find('void') != -1) and FuncModifier.find('*') == -1):
ErrorMsgList.append('Line %d : VOID return type need NO doxygen tags in comment' % CommentStartLine)
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'VOID return type need NO doxygen tags in comment ', TableName, CommentId)
if ParamNumber != 0 and DoxygenTagNumber == 0:
ErrorMsgList.append('Line %d : No doxygen tags in comment' % CommentStartLine)
PrintErrorMsg(ERROR_DOXYGEN_CHECK_FUNCTION_HEADER, 'No doxygen tags in comment ', TableName, CommentId)
if __name__ == '__main__':
# EdkLogger.Initialize()
# EdkLogger.SetLevel(EdkLogger.QUIET)
# CollectSourceCodeDataIntoDB(sys.argv[1])
try:
test_file = sys.argv[1]
except IndexError, v:
print "Usage: %s filename" % sys.argv[0]
sys.exit(1)
MsgList = CheckFuncHeaderDoxygenComments(test_file)
for Msg in MsgList:
print Msg
print 'Done!'
| 41.683352
| 296
| 0.579536
|
33177ac4524143e78942c60455a88dcbc6825e55
| 56,029
|
py
|
Python
|
pandas/core/dtypes/common.py
|
JustinZhengBC/pandas
|
99df7da9ef5b3b210f3045ee19279808368def28
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/dtypes/common.py
|
JustinZhengBC/pandas
|
99df7da9ef5b3b210f3045ee19279808368def28
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/dtypes/common.py
|
JustinZhengBC/pandas
|
99df7da9ef5b3b210f3045ee19279808368def28
|
[
"BSD-3-Clause"
] | null | null | null |
""" common type operations """
import numpy as np
from pandas._libs import algos, lib
from pandas._libs.interval import Interval
from pandas._libs.tslibs import Period, Timestamp, conversion
from pandas.compat import PY3, PY36, binary_type, string_types, text_type
from pandas.core.dtypes.dtypes import (
CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, ExtensionDtype,
IntervalDtype, PandasExtensionDtype, PeriodDtype, _pandas_registry,
registry)
from pandas.core.dtypes.generic import (
ABCCategorical, ABCCategoricalIndex, ABCDateOffset, ABCDatetimeIndex,
ABCIndexClass, ABCPeriodArray, ABCPeriodIndex, ABCSeries, ABCSparseArray,
ABCSparseSeries)
from pandas.core.dtypes.inference import ( # noqa:F401
is_array_like, is_bool, is_complex, is_decimal, is_dict_like, is_file_like,
is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like,
is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable,
is_scalar, is_sequence, is_string_like)
_POSSIBLY_CAST_DTYPES = {np.dtype(t).name
for t in ['O', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64']}
_NS_DTYPE = conversion.NS_DTYPE
_TD_DTYPE = conversion.TD_DTYPE
_INT64_DTYPE = np.dtype(np.int64)
# oh the troubles to reduce import time
_is_scipy_sparse = None
ensure_float64 = algos.ensure_float64
ensure_float32 = algos.ensure_float32
_ensure_datetime64ns = conversion.ensure_datetime64ns
_ensure_timedelta64ns = conversion.ensure_timedelta64ns
def ensure_float(arr):
"""
Ensure that an array object has a float dtype if possible.
Parameters
----------
arr : array-like
The array whose data type we want to enforce as float.
Returns
-------
float_arr : The original array cast to the float dtype if
possible. Otherwise, the original array is returned.
"""
if issubclass(arr.dtype.type, (np.integer, np.bool_)):
arr = arr.astype(float)
return arr
ensure_uint64 = algos.ensure_uint64
ensure_int64 = algos.ensure_int64
ensure_int32 = algos.ensure_int32
ensure_int16 = algos.ensure_int16
ensure_int8 = algos.ensure_int8
ensure_platform_int = algos.ensure_platform_int
ensure_object = algos.ensure_object
def ensure_categorical(arr):
"""
Ensure that an array-like object is a Categorical (if not already).
Parameters
----------
arr : array-like
The array that we want to convert into a Categorical.
Returns
-------
cat_arr : The original array cast as a Categorical. If it already
is a Categorical, we return as is.
"""
if not is_categorical(arr):
from pandas import Categorical
arr = Categorical(arr)
return arr
def ensure_int64_or_float64(arr, copy=False):
"""
Ensure that an dtype array of some integer dtype
has an int64 dtype if possible
If it's not possible, potentially because of overflow,
convert the array to float64 instead.
Parameters
----------
arr : array-like
The array whose data type we want to enforce.
copy: boolean
Whether to copy the original array or reuse
it in place, if possible.
Returns
-------
out_arr : The input array cast as int64 if
possible without overflow.
Otherwise the input array cast to float64.
"""
try:
return arr.astype('int64', copy=copy, casting='safe')
except TypeError:
return arr.astype('float64', copy=copy)
def is_object_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the object dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is of the object dtype.
Examples
--------
>>> is_object_dtype(object)
True
>>> is_object_dtype(int)
False
>>> is_object_dtype(np.array([], dtype=object))
True
>>> is_object_dtype(np.array([], dtype=int))
False
>>> is_object_dtype([1, 2, 3])
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.object_)
def is_sparse(arr):
"""
Check whether an array-like is a 1-D pandas sparse array.
Check that the one-dimensional array-like is a pandas sparse array.
Returns True if it is a pandas sparse array, not another type of
sparse array.
Parameters
----------
arr : array-like
Array-like to check.
Returns
-------
bool
Whether or not the array-like is a pandas sparse array.
See Also
--------
DataFrame.to_sparse : Convert DataFrame to a SparseDataFrame.
Series.to_sparse : Convert Series to SparseSeries.
Series.to_dense : Return dense representation of a Series.
Examples
--------
Returns `True` if the parameter is a 1-D pandas sparse array.
>>> is_sparse(pd.SparseArray([0, 0, 1, 0]))
True
>>> is_sparse(pd.SparseSeries([0, 0, 1, 0]))
True
Returns `False` if the parameter is not sparse.
>>> is_sparse(np.array([0, 0, 1, 0]))
False
>>> is_sparse(pd.Series([0, 1, 0, 0]))
False
Returns `False` if the parameter is not a pandas sparse array.
>>> from scipy.sparse import bsr_matrix
>>> is_sparse(bsr_matrix([0, 1, 0, 0]))
False
Returns `False` if the parameter has more than one dimension.
>>> df = pd.SparseDataFrame([389., 24., 80.5, np.nan],
columns=['max_speed'],
index=['falcon', 'parrot', 'lion', 'monkey'])
>>> is_sparse(df)
False
>>> is_sparse(df.max_speed)
True
"""
from pandas.core.arrays.sparse import SparseDtype
dtype = getattr(arr, 'dtype', arr)
return isinstance(dtype, SparseDtype)
def is_scipy_sparse(arr):
"""
Check whether an array-like is a scipy.sparse.spmatrix instance.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a
scipy.sparse.spmatrix instance.
Notes
-----
If scipy is not installed, this function will always return False.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> is_scipy_sparse(bsr_matrix([1, 2, 3]))
True
>>> is_scipy_sparse(pd.SparseArray([1, 2, 3]))
False
>>> is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
False
"""
global _is_scipy_sparse
if _is_scipy_sparse is None:
try:
from scipy.sparse import issparse as _is_scipy_sparse
except ImportError:
_is_scipy_sparse = lambda _: False
return _is_scipy_sparse(arr)
def is_categorical(arr):
"""
Check whether an array-like is a Categorical instance.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is of a Categorical instance.
Examples
--------
>>> is_categorical([1, 2, 3])
False
Categoricals, Series Categoricals, and CategoricalIndex will return True.
>>> cat = pd.Categorical([1, 2, 3])
>>> is_categorical(cat)
True
>>> is_categorical(pd.Series(cat))
True
>>> is_categorical(pd.CategoricalIndex([1, 2, 3]))
True
"""
return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr)
def is_datetimetz(arr):
"""
Check whether an array-like is a datetime array-like with a timezone
component in its dtype.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a datetime array-like with
a timezone component in its dtype.
Examples
--------
>>> is_datetimetz([1, 2, 3])
False
Although the following examples are both DatetimeIndex objects,
the first one returns False because it has no timezone component
unlike the second one, which returns True.
>>> is_datetimetz(pd.DatetimeIndex([1, 2, 3]))
False
>>> is_datetimetz(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
The object need not be a DatetimeIndex object. It just needs to have
a dtype which has a timezone component.
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_datetimetz(s)
True
"""
# TODO: do we need this function?
# It seems like a repeat of is_datetime64tz_dtype.
return ((isinstance(arr, ABCDatetimeIndex) and
getattr(arr, 'tz', None) is not None) or
is_datetime64tz_dtype(arr))
def is_offsetlike(arr_or_obj):
"""
Check if obj or all elements of list-like is DateOffset
Parameters
----------
arr_or_obj : object
Returns
-------
boolean : Whether the object is a DateOffset or listlike of DatetOffsets
Examples
--------
>>> is_offsetlike(pd.DateOffset(days=1))
True
>>> is_offsetlike('offset')
False
>>> is_offsetlike([pd.offsets.Minute(4), pd.offsets.MonthEnd()])
True
>>> is_offsetlike(np.array([pd.DateOffset(months=3), pd.Timestamp.now()]))
False
"""
if isinstance(arr_or_obj, ABCDateOffset):
return True
elif (is_list_like(arr_or_obj) and len(arr_or_obj) and
is_object_dtype(arr_or_obj)):
return all(isinstance(x, ABCDateOffset) for x in arr_or_obj)
return False
def is_period(arr):
"""
Check whether an array-like is a periodical index.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a periodical index.
Examples
--------
>>> is_period([1, 2, 3])
False
>>> is_period(pd.Index([1, 2, 3]))
False
>>> is_period(pd.PeriodIndex(["2017-01-01"], freq="D"))
True
"""
# TODO: do we need this function?
# It seems like a repeat of is_period_arraylike.
return isinstance(arr, ABCPeriodIndex) or is_period_arraylike(arr)
def is_datetime64_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is of
the datetime64 dtype.
Examples
--------
>>> is_datetime64_dtype(object)
False
>>> is_datetime64_dtype(np.datetime64)
True
>>> is_datetime64_dtype(np.array([], dtype=int))
False
>>> is_datetime64_dtype(np.array([], dtype=np.datetime64))
True
>>> is_datetime64_dtype([1, 2, 3])
False
"""
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype_type(arr_or_dtype)
except (TypeError, UnicodeEncodeError):
return False
return issubclass(tipo, np.datetime64)
def is_datetime64tz_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of a DatetimeTZDtype dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is of
a DatetimeTZDtype dtype.
Examples
--------
>>> is_datetime64tz_dtype(object)
False
>>> is_datetime64tz_dtype([1, 2, 3])
False
>>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive
False
>>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_datetime64tz_dtype(dtype)
True
>>> is_datetime64tz_dtype(s)
True
"""
if arr_or_dtype is None:
return False
return DatetimeTZDtype.is_dtype(arr_or_dtype)
def is_timedelta64_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the timedelta64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is
of the timedelta64 dtype.
Examples
--------
>>> is_timedelta64_dtype(object)
False
>>> is_timedelta64_dtype(np.timedelta64)
True
>>> is_timedelta64_dtype([1, 2, 3])
False
>>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))
True
>>> is_timedelta64_dtype('0 days')
False
"""
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype_type(arr_or_dtype)
except (TypeError, ValueError, SyntaxError):
return False
return issubclass(tipo, np.timedelta64)
def is_period_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the Period dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is of the Period dtype.
Examples
--------
>>> is_period_dtype(object)
False
>>> is_period_dtype(PeriodDtype(freq="D"))
True
>>> is_period_dtype([1, 2, 3])
False
>>> is_period_dtype(pd.Period("2017-01-01"))
False
>>> is_period_dtype(pd.PeriodIndex([], freq="A"))
True
"""
# TODO: Consider making Period an instance of PeriodDtype
if arr_or_dtype is None:
return False
return PeriodDtype.is_dtype(arr_or_dtype)
def is_interval_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the Interval dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is
of the Interval dtype.
Examples
--------
>>> is_interval_dtype(object)
False
>>> is_interval_dtype(IntervalDtype())
True
>>> is_interval_dtype([1, 2, 3])
False
>>>
>>> interval = pd.Interval(1, 2, closed="right")
>>> is_interval_dtype(interval)
False
>>> is_interval_dtype(pd.IntervalIndex([interval]))
True
"""
# TODO: Consider making Interval an instance of IntervalDtype
if arr_or_dtype is None:
return False
return IntervalDtype.is_dtype(arr_or_dtype)
def is_categorical_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the Categorical dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is
of the Categorical dtype.
Examples
--------
>>> is_categorical_dtype(object)
False
>>> is_categorical_dtype(CategoricalDtype())
True
>>> is_categorical_dtype([1, 2, 3])
False
>>> is_categorical_dtype(pd.Categorical([1, 2, 3]))
True
>>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
True
"""
if arr_or_dtype is None:
return False
return CategoricalDtype.is_dtype(arr_or_dtype)
def is_string_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the string dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the string dtype.
Examples
--------
>>> is_string_dtype(str)
True
>>> is_string_dtype(object)
True
>>> is_string_dtype(int)
False
>>>
>>> is_string_dtype(np.array(['a', 'b']))
True
>>> is_string_dtype(pd.Series([1, 2]))
False
"""
# TODO: gh-15585: consider making the checks stricter.
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
return dtype.kind in ('O', 'S', 'U') and not is_period_dtype(dtype)
except TypeError:
return False
def is_period_arraylike(arr):
"""
Check whether an array-like is a periodical array-like or PeriodIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a periodical
array-like or PeriodIndex instance.
Examples
--------
>>> is_period_arraylike([1, 2, 3])
False
>>> is_period_arraylike(pd.Index([1, 2, 3]))
False
>>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
True
"""
if isinstance(arr, (ABCPeriodIndex, ABCPeriodArray)):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return is_period_dtype(arr.dtype)
return getattr(arr, 'inferred_type', None) == 'period'
def is_datetime_arraylike(arr):
"""
Check whether an array-like is a datetime array-like or DatetimeIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a datetime
array-like or DatetimeIndex.
Examples
--------
>>> is_datetime_arraylike([1, 2, 3])
False
>>> is_datetime_arraylike(pd.Index([1, 2, 3]))
False
>>> is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))
True
"""
if isinstance(arr, ABCDatetimeIndex):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return arr.dtype == object and lib.infer_dtype(arr) == 'datetime'
return getattr(arr, 'inferred_type', None) == 'datetime'
def is_datetimelike(arr):
"""
Check whether an array-like is a datetime-like array-like.
Acceptable datetime-like objects are (but not limited to) datetime
indices, periodic indices, and timedelta indices.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a datetime-like array-like.
Examples
--------
>>> is_datetimelike([1, 2, 3])
False
>>> is_datetimelike(pd.Index([1, 2, 3]))
False
>>> is_datetimelike(pd.DatetimeIndex([1, 2, 3]))
True
>>> is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>> is_datetimelike(pd.PeriodIndex([], freq="A"))
True
>>> is_datetimelike(np.array([], dtype=np.datetime64))
True
>>> is_datetimelike(pd.Series([], dtype="timedelta64[ns]"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_datetimelike(s)
True
"""
return (is_datetime64_dtype(arr) or is_datetime64tz_dtype(arr) or
is_timedelta64_dtype(arr) or
isinstance(arr, ABCPeriodIndex) or
is_datetimetz(arr))
def is_dtype_equal(source, target):
"""
Check if two dtypes are equal.
Parameters
----------
source : The first dtype to compare
target : The second dtype to compare
Returns
----------
boolean : Whether or not the two dtypes are equal.
Examples
--------
>>> is_dtype_equal(int, float)
False
>>> is_dtype_equal("int", int)
True
>>> is_dtype_equal(object, "category")
False
>>> is_dtype_equal(CategoricalDtype(), "category")
True
>>> is_dtype_equal(DatetimeTZDtype(), "datetime64")
False
"""
try:
source = _get_dtype(source)
target = _get_dtype(target)
return source == target
except (TypeError, AttributeError):
# invalid comparison
# object == category will hit this
return False
def is_dtype_union_equal(source, target):
"""
Check whether two arrays have compatible dtypes to do a union.
numpy types are checked with ``is_dtype_equal``. Extension types are
checked separately.
Parameters
----------
source : The first dtype to compare
target : The second dtype to compare
Returns
----------
boolean : Whether or not the two dtypes are equal.
>>> is_dtype_equal("int", int)
True
>>> is_dtype_equal(CategoricalDtype(['a', 'b'],
... CategoricalDtype(['b', 'c']))
True
>>> is_dtype_equal(CategoricalDtype(['a', 'b'],
... CategoricalDtype(['b', 'c'], ordered=True))
False
"""
source = _get_dtype(source)
target = _get_dtype(target)
if is_categorical_dtype(source) and is_categorical_dtype(target):
# ordered False for both
return source.ordered is target.ordered
return is_dtype_equal(source, target)
def is_any_int_dtype(arr_or_dtype):
"""Check whether the provided array or dtype is of an integer dtype.
In this function, timedelta64 instances are also considered "any-integer"
type objects and will return True.
This function is internal and should not be exposed in the public API.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of an integer dtype.
Examples
--------
>>> is_any_int_dtype(str)
False
>>> is_any_int_dtype(int)
True
>>> is_any_int_dtype(float)
False
>>> is_any_int_dtype(np.uint64)
True
>>> is_any_int_dtype(np.datetime64)
False
>>> is_any_int_dtype(np.timedelta64)
True
>>> is_any_int_dtype(np.array(['a', 'b']))
False
>>> is_any_int_dtype(pd.Series([1, 2]))
True
>>> is_any_int_dtype(np.array([], dtype=np.timedelta64))
True
>>> is_any_int_dtype(pd.Index([1, 2.])) # float
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.integer)
def is_integer_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of an integer dtype.
Unlike in `in_any_int_dtype`, timedelta64 instances will return False.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of an integer dtype
and not an instance of timedelta64.
Examples
--------
>>> is_integer_dtype(str)
False
>>> is_integer_dtype(int)
True
>>> is_integer_dtype(float)
False
>>> is_integer_dtype(np.uint64)
True
>>> is_integer_dtype(np.datetime64)
False
>>> is_integer_dtype(np.timedelta64)
False
>>> is_integer_dtype(np.array(['a', 'b']))
False
>>> is_integer_dtype(pd.Series([1, 2]))
True
>>> is_integer_dtype(np.array([], dtype=np.timedelta64))
False
>>> is_integer_dtype(pd.Index([1, 2.])) # float
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_signed_integer_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a signed integer dtype.
Unlike in `in_any_int_dtype`, timedelta64 instances will return False.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a signed integer dtype
and not an instance of timedelta64.
Examples
--------
>>> is_signed_integer_dtype(str)
False
>>> is_signed_integer_dtype(int)
True
>>> is_signed_integer_dtype(float)
False
>>> is_signed_integer_dtype(np.uint64) # unsigned
False
>>> is_signed_integer_dtype(np.datetime64)
False
>>> is_signed_integer_dtype(np.timedelta64)
False
>>> is_signed_integer_dtype(np.array(['a', 'b']))
False
>>> is_signed_integer_dtype(pd.Series([1, 2]))
True
>>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64))
False
>>> is_signed_integer_dtype(pd.Index([1, 2.])) # float
False
>>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.signedinteger) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_unsigned_integer_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of an unsigned integer dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of an
unsigned integer dtype.
Examples
--------
>>> is_unsigned_integer_dtype(str)
False
>>> is_unsigned_integer_dtype(int) # signed
False
>>> is_unsigned_integer_dtype(float)
False
>>> is_unsigned_integer_dtype(np.uint64)
True
>>> is_unsigned_integer_dtype(np.array(['a', 'b']))
False
>>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed
False
>>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float
False
>>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))
True
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.unsignedinteger) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_int64_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the int64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the int64 dtype.
Notes
-----
Depending on system architecture, the return value of `is_int64_dtype(
int)` will be True if the OS uses 64-bit integers and False if the OS
uses 32-bit integers.
Examples
--------
>>> is_int64_dtype(str)
False
>>> is_int64_dtype(np.int32)
False
>>> is_int64_dtype(np.int64)
True
>>> is_int64_dtype(float)
False
>>> is_int64_dtype(np.uint64) # unsigned
False
>>> is_int64_dtype(np.array(['a', 'b']))
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.int64))
True
>>> is_int64_dtype(pd.Index([1, 2.])) # float
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.int64)
def is_int_or_datetime_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of an
integer, timedelta64, or datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of an
integer, timedelta64, or datetime64 dtype.
Examples
--------
>>> is_int_or_datetime_dtype(str)
False
>>> is_int_or_datetime_dtype(int)
True
>>> is_int_or_datetime_dtype(float)
False
>>> is_int_or_datetime_dtype(np.uint64)
True
>>> is_int_or_datetime_dtype(np.datetime64)
True
>>> is_int_or_datetime_dtype(np.timedelta64)
True
>>> is_int_or_datetime_dtype(np.array(['a', 'b']))
False
>>> is_int_or_datetime_dtype(pd.Series([1, 2]))
True
>>> is_int_or_datetime_dtype(np.array([], dtype=np.timedelta64))
True
>>> is_int_or_datetime_dtype(np.array([], dtype=np.datetime64))
True
>>> is_int_or_datetime_dtype(pd.Index([1, 2.])) # float
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) or
issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_datetime64_any_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the datetime64 dtype.
Examples
--------
>>> is_datetime64_any_dtype(str)
False
>>> is_datetime64_any_dtype(int)
False
>>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive
True
>>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
True
>>> is_datetime64_any_dtype(np.array(['a', 'b']))
False
>>> is_datetime64_any_dtype(np.array([1, 2]))
False
>>> is_datetime64_any_dtype(np.array([], dtype=np.datetime64))
True
>>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3],
dtype=np.datetime64))
True
"""
if arr_or_dtype is None:
return False
return (is_datetime64_dtype(arr_or_dtype) or
is_datetime64tz_dtype(arr_or_dtype))
def is_datetime64_ns_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the datetime64[ns] dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the datetime64[ns] dtype.
Examples
--------
>>> is_datetime64_ns_dtype(str)
False
>>> is_datetime64_ns_dtype(int)
False
>>> is_datetime64_ns_dtype(np.datetime64) # no unit
False
>>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))
True
>>> is_datetime64_ns_dtype(np.array(['a', 'b']))
False
>>> is_datetime64_ns_dtype(np.array([1, 2]))
False
>>> is_datetime64_ns_dtype(np.array([], dtype=np.datetime64)) # no unit
False
>>> is_datetime64_ns_dtype(np.array([],
dtype="datetime64[ps]")) # wrong unit
False
>>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3],
dtype=np.datetime64)) # has 'ns' unit
True
"""
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype(arr_or_dtype)
except TypeError:
if is_datetime64tz_dtype(arr_or_dtype):
tipo = _get_dtype(arr_or_dtype.dtype)
else:
return False
return tipo == _NS_DTYPE or getattr(tipo, 'base', None) == _NS_DTYPE
def is_timedelta64_ns_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the timedelta64[ns] dtype.
This is a very specific dtype, so generic ones like `np.timedelta64`
will return False if passed into this function.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the
timedelta64[ns] dtype.
Examples
--------
>>> is_timedelta64_ns_dtype(np.dtype('m8[ns]'))
True
>>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency
False
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
True
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
False
"""
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype(arr_or_dtype)
return tipo == _TD_DTYPE
except TypeError:
return False
def is_datetime_or_timedelta_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of
a timedelta64 or datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a
timedelta64, or datetime64 dtype.
Examples
--------
>>> is_datetime_or_timedelta_dtype(str)
False
>>> is_datetime_or_timedelta_dtype(int)
False
>>> is_datetime_or_timedelta_dtype(np.datetime64)
True
>>> is_datetime_or_timedelta_dtype(np.timedelta64)
True
>>> is_datetime_or_timedelta_dtype(np.array(['a', 'b']))
False
>>> is_datetime_or_timedelta_dtype(pd.Series([1, 2]))
False
>>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.timedelta64))
True
>>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.datetime64))
True
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, (np.datetime64, np.timedelta64))
def _is_unorderable_exception(e):
"""
Check if the exception raised is an unorderable exception.
The error message differs for 3 <= PY <= 3.5 and PY >= 3.6, so
we need to condition based on Python version.
Parameters
----------
e : Exception or sub-class
The exception object to check.
Returns
-------
boolean : Whether or not the exception raised is an unorderable exception.
"""
if PY36:
return "'>' not supported between instances of" in str(e)
elif PY3:
return 'unorderable' in str(e)
return False
def is_numeric_v_string_like(a, b):
"""
Check if we are comparing a string-like object to a numeric ndarray.
NumPy doesn't like to compare such objects, especially numeric arrays
and scalar string-likes.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean : Whether we return a comparing a string-like
object to a numeric array.
Examples
--------
>>> is_numeric_v_string_like(1, 1)
False
>>> is_numeric_v_string_like("foo", "foo")
False
>>> is_numeric_v_string_like(1, "foo") # non-array numeric
False
>>> is_numeric_v_string_like(np.array([1]), "foo")
True
>>> is_numeric_v_string_like("foo", np.array([1])) # symmetric check
True
>>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
True
>>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
True
>>> is_numeric_v_string_like(np.array([1]), np.array([2]))
False
>>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))
False
"""
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
is_a_numeric_array = is_a_array and is_numeric_dtype(a)
is_b_numeric_array = is_b_array and is_numeric_dtype(b)
is_a_string_array = is_a_array and is_string_like_dtype(a)
is_b_string_array = is_b_array and is_string_like_dtype(b)
is_a_scalar_string_like = not is_a_array and is_string_like(a)
is_b_scalar_string_like = not is_b_array and is_string_like(b)
return ((is_a_numeric_array and is_b_scalar_string_like) or
(is_b_numeric_array and is_a_scalar_string_like) or
(is_a_numeric_array and is_b_string_array) or
(is_b_numeric_array and is_a_string_array))
def is_datetimelike_v_numeric(a, b):
"""
Check if we are comparing a datetime-like object to a numeric object.
By "numeric," we mean an object that is either of an int or float dtype.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean : Whether we return a comparing a datetime-like
to a numeric object.
Examples
--------
>>> dt = np.datetime64(pd.datetime(2017, 1, 1))
>>>
>>> is_datetimelike_v_numeric(1, 1)
False
>>> is_datetimelike_v_numeric(dt, dt)
False
>>> is_datetimelike_v_numeric(1, dt)
True
>>> is_datetimelike_v_numeric(dt, 1) # symmetric check
True
>>> is_datetimelike_v_numeric(np.array([dt]), 1)
True
>>> is_datetimelike_v_numeric(np.array([1]), dt)
True
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([1]))
True
>>> is_datetimelike_v_numeric(np.array([1]), np.array([2]))
False
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))
False
"""
if not hasattr(a, 'dtype'):
a = np.asarray(a)
if not hasattr(b, 'dtype'):
b = np.asarray(b)
def is_numeric(x):
"""
Check if an object has a numeric dtype (i.e. integer or float).
"""
return is_integer_dtype(x) or is_float_dtype(x)
is_datetimelike = needs_i8_conversion
return ((is_datetimelike(a) and is_numeric(b)) or
(is_datetimelike(b) and is_numeric(a)))
def is_datetimelike_v_object(a, b):
"""
Check if we are comparing a datetime-like object to an object instance.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean : Whether we return a comparing a datetime-like
to an object instance.
Examples
--------
>>> obj = object()
>>> dt = np.datetime64(pd.datetime(2017, 1, 1))
>>>
>>> is_datetimelike_v_object(obj, obj)
False
>>> is_datetimelike_v_object(dt, dt)
False
>>> is_datetimelike_v_object(obj, dt)
True
>>> is_datetimelike_v_object(dt, obj) # symmetric check
True
>>> is_datetimelike_v_object(np.array([dt]), obj)
True
>>> is_datetimelike_v_object(np.array([obj]), dt)
True
>>> is_datetimelike_v_object(np.array([dt]), np.array([obj]))
True
>>> is_datetimelike_v_object(np.array([obj]), np.array([obj]))
False
>>> is_datetimelike_v_object(np.array([dt]), np.array([1]))
False
>>> is_datetimelike_v_object(np.array([dt]), np.array([dt]))
False
"""
if not hasattr(a, 'dtype'):
a = np.asarray(a)
if not hasattr(b, 'dtype'):
b = np.asarray(b)
is_datetimelike = needs_i8_conversion
return ((is_datetimelike(a) and is_object_dtype(b)) or
(is_datetimelike(b) and is_object_dtype(a)))
def needs_i8_conversion(arr_or_dtype):
"""
Check whether the array or dtype should be converted to int64.
An array-like or dtype "needs" such a conversion if the array-like
or dtype is of a datetime-like dtype
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype should be converted to int64.
Examples
--------
>>> needs_i8_conversion(str)
False
>>> needs_i8_conversion(np.int64)
False
>>> needs_i8_conversion(np.datetime64)
True
>>> needs_i8_conversion(np.array(['a', 'b']))
False
>>> needs_i8_conversion(pd.Series([1, 2]))
False
>>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
True
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
"""
if arr_or_dtype is None:
return False
return (is_datetime_or_timedelta_dtype(arr_or_dtype) or
is_datetime64tz_dtype(arr_or_dtype) or
is_period_dtype(arr_or_dtype))
def is_numeric_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a numeric dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a numeric dtype.
Examples
--------
>>> is_numeric_dtype(str)
False
>>> is_numeric_dtype(int)
True
>>> is_numeric_dtype(float)
True
>>> is_numeric_dtype(np.uint64)
True
>>> is_numeric_dtype(np.datetime64)
False
>>> is_numeric_dtype(np.timedelta64)
False
>>> is_numeric_dtype(np.array(['a', 'b']))
False
>>> is_numeric_dtype(pd.Series([1, 2]))
True
>>> is_numeric_dtype(pd.Index([1, 2.]))
True
>>> is_numeric_dtype(np.array([], dtype=np.timedelta64))
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, (np.number, np.bool_)) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_string_like_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a string-like dtype.
Unlike `is_string_dtype`, the object dtype is excluded because it
is a mixed dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the string dtype.
Examples
--------
>>> is_string_like_dtype(str)
True
>>> is_string_like_dtype(object)
False
>>> is_string_like_dtype(np.array(['a', 'b']))
True
>>> is_string_like_dtype(pd.Series([1, 2]))
False
"""
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
return dtype.kind in ('S', 'U')
except TypeError:
return False
def is_float_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a float dtype.
This function is internal and should not be exposed in the public API.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a float dtype.
Examples
--------
>>> is_float_dtype(str)
False
>>> is_float_dtype(int)
False
>>> is_float_dtype(float)
True
>>> is_float_dtype(np.array(['a', 'b']))
False
>>> is_float_dtype(pd.Series([1, 2]))
False
>>> is_float_dtype(pd.Index([1, 2.]))
True
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.floating)
def is_floating_dtype(arr_or_dtype):
"""Check whether the provided array or dtype is an instance of
numpy's float dtype.
.. deprecated:: 0.20.0
Unlike, `is_float_dtype`, this check is a lot stricter, as it requires
`isinstance` of `np.floating` and not `issubclass`.
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return isinstance(tipo, np.floating)
def is_bool_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a boolean dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a boolean dtype.
Notes
-----
An ExtensionArray is considered boolean when the ``_is_boolean``
attribute is set to True.
Examples
--------
>>> is_bool_dtype(str)
False
>>> is_bool_dtype(int)
False
>>> is_bool_dtype(bool)
True
>>> is_bool_dtype(np.bool)
True
>>> is_bool_dtype(np.array(['a', 'b']))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
>>> is_bool_dtype(np.array([True, False]))
True
>>> is_bool_dtype(pd.Categorical([True, False]))
True
>>> is_bool_dtype(pd.SparseArray([True, False]))
True
"""
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype_type(arr_or_dtype)
except ValueError:
# this isn't even a dtype
return False
if isinstance(arr_or_dtype, (ABCCategorical, ABCCategoricalIndex)):
arr_or_dtype = arr_or_dtype.dtype
if isinstance(arr_or_dtype, CategoricalDtype):
arr_or_dtype = arr_or_dtype.categories
# now we use the special definition for Index
if isinstance(arr_or_dtype, ABCIndexClass):
# TODO(jreback)
# we don't have a boolean Index class
# so its object, we need to infer to
# guess this
return (arr_or_dtype.is_object and
arr_or_dtype.inferred_type == 'boolean')
elif is_extension_array_dtype(arr_or_dtype):
dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)
return dtype._is_boolean
return issubclass(tipo, np.bool_)
def is_extension_type(arr):
"""
Check whether an array-like is of a pandas extension class instance.
Extension classes include categoricals, pandas sparse objects (i.e.
classes represented within the pandas library and not ones external
to it like scipy sparse matrices), and datetime-like arrays.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is of a pandas
extension class instance.
Examples
--------
>>> is_extension_type([1, 2, 3])
False
>>> is_extension_type(np.array([1, 2, 3]))
False
>>>
>>> cat = pd.Categorical([1, 2, 3])
>>>
>>> is_extension_type(cat)
True
>>> is_extension_type(pd.Series(cat))
True
>>> is_extension_type(pd.SparseArray([1, 2, 3]))
True
>>> is_extension_type(pd.SparseSeries([1, 2, 3]))
True
>>>
>>> from scipy.sparse import bsr_matrix
>>> is_extension_type(bsr_matrix([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_extension_type(s)
True
"""
if is_categorical(arr):
return True
elif is_sparse(arr):
return True
elif is_datetimetz(arr):
return True
return False
def is_extension_array_dtype(arr_or_dtype):
"""Check if an object is a pandas extension array type.
Parameters
----------
arr_or_dtype : object
Returns
-------
bool
Notes
-----
This checks whether an object implements the pandas extension
array interface. In pandas, this includes:
* Categorical
* Sparse
* Interval
Third-party libraries may implement arrays or types satisfying
this interface as well.
"""
dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)
return (isinstance(dtype, ExtensionDtype) or
registry.find(dtype) is not None)
def is_complex_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a complex dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a compex dtype.
Examples
--------
>>> is_complex_dtype(str)
False
>>> is_complex_dtype(int)
False
>>> is_complex_dtype(np.complex)
True
>>> is_complex_dtype(np.array(['a', 'b']))
False
>>> is_complex_dtype(pd.Series([1, 2]))
False
>>> is_complex_dtype(np.array([1 + 1j, 5]))
True
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.complexfloating)
def _coerce_to_dtype(dtype):
"""
Coerce a string or np.dtype to a pandas or numpy
dtype if possible.
If we cannot convert to a pandas dtype initially,
we convert to a numpy dtype.
Parameters
----------
dtype : The dtype that we want to coerce.
Returns
-------
pd_or_np_dtype : The coerced dtype.
"""
if is_categorical_dtype(dtype):
categories = getattr(dtype, 'categories', None)
ordered = getattr(dtype, 'ordered', False)
dtype = CategoricalDtype(categories=categories, ordered=ordered)
elif is_datetime64tz_dtype(dtype):
dtype = DatetimeTZDtype(dtype)
elif is_period_dtype(dtype):
dtype = PeriodDtype(dtype)
elif is_interval_dtype(dtype):
dtype = IntervalDtype(dtype)
else:
dtype = np.dtype(dtype)
return dtype
def _get_dtype(arr_or_dtype):
"""
Get the dtype instance associated with an array
or dtype object.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype object whose dtype we want to extract.
Returns
-------
obj_dtype : The extract dtype instance from the
passed in array or dtype object.
Raises
------
TypeError : The passed in object is None.
"""
# TODO(extension)
# replace with pandas_dtype
if arr_or_dtype is None:
raise TypeError("Cannot deduce dtype from null object")
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
elif isinstance(arr_or_dtype, ExtensionDtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, DatetimeTZDtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, PeriodDtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, IntervalDtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, string_types):
if is_categorical_dtype(arr_or_dtype):
return CategoricalDtype.construct_from_string(arr_or_dtype)
elif is_datetime64tz_dtype(arr_or_dtype):
return DatetimeTZDtype.construct_from_string(arr_or_dtype)
elif is_period_dtype(arr_or_dtype):
return PeriodDtype.construct_from_string(arr_or_dtype)
elif is_interval_dtype(arr_or_dtype):
return IntervalDtype.construct_from_string(arr_or_dtype)
elif isinstance(arr_or_dtype, (ABCCategorical, ABCCategoricalIndex,
ABCSparseArray, ABCSparseSeries)):
return arr_or_dtype.dtype
if hasattr(arr_or_dtype, 'dtype'):
arr_or_dtype = arr_or_dtype.dtype
return np.dtype(arr_or_dtype)
def _get_dtype_type(arr_or_dtype):
"""
Get the type (NOT dtype) instance associated with
an array or dtype object.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype object whose type we want to extract.
Returns
-------
obj_type : The extract type instance from the
passed in array or dtype object.
"""
# TODO(extension)
# replace with pandas_dtype
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype.type
elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype).type
elif isinstance(arr_or_dtype, CategoricalDtype):
return CategoricalDtypeType
elif isinstance(arr_or_dtype, DatetimeTZDtype):
return Timestamp
elif isinstance(arr_or_dtype, IntervalDtype):
return Interval
elif isinstance(arr_or_dtype, PeriodDtype):
return Period
elif isinstance(arr_or_dtype, string_types):
if is_categorical_dtype(arr_or_dtype):
return CategoricalDtypeType
elif is_datetime64tz_dtype(arr_or_dtype):
return Timestamp
elif is_period_dtype(arr_or_dtype):
return Period
elif is_interval_dtype(arr_or_dtype):
return Interval
return _get_dtype_type(np.dtype(arr_or_dtype))
else:
from pandas.core.arrays.sparse import SparseDtype
if isinstance(arr_or_dtype, (ABCSparseSeries,
ABCSparseArray,
SparseDtype)):
dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)
return dtype.type
try:
return arr_or_dtype.dtype.type
except AttributeError:
return type(None)
def _get_dtype_from_object(dtype):
"""
Get a numpy dtype.type-style object for a dtype object.
This methods also includes handling of the datetime64[ns] and
datetime64[ns, TZ] objects.
If no dtype can be found, we return ``object``.
Parameters
----------
dtype : dtype, type
The dtype object whose numpy dtype.type-style
object we want to extract.
Returns
-------
dtype_object : The extracted numpy dtype.type-style object.
"""
if isinstance(dtype, type) and issubclass(dtype, np.generic):
# Type object from a dtype
return dtype
elif is_categorical(dtype):
return CategoricalDtype().type
elif is_datetimetz(dtype):
return DatetimeTZDtype(dtype).type
elif isinstance(dtype, np.dtype): # dtype object
try:
_validate_date_like_dtype(dtype)
except TypeError:
# Should still pass if we don't have a date-like
pass
return dtype.type
elif isinstance(dtype, string_types):
if dtype in ['datetimetz', 'datetime64tz']:
return DatetimeTZDtype.type
elif dtype in ['period']:
raise NotImplementedError
if dtype == 'datetime' or dtype == 'timedelta':
dtype += '64'
try:
return _get_dtype_from_object(getattr(np, dtype))
except (AttributeError, TypeError):
# Handles cases like _get_dtype(int) i.e.,
# Python objects that are valid dtypes
# (unlike user-defined types, in general)
#
# TypeError handles the float16 type code of 'e'
# further handle internal types
pass
return _get_dtype_from_object(np.dtype(dtype))
def _validate_date_like_dtype(dtype):
"""
Check whether the dtype is a date-like dtype. Raises an error if invalid.
Parameters
----------
dtype : dtype, type
The dtype to check.
Raises
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
the frequency provided is too specific)
"""
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError('{error}'.format(error=e))
if typ != 'generic' and typ != 'ns':
msg = '{name!r} is too specific of a frequency, try passing {type!r}'
raise ValueError(msg.format(name=dtype.name, type=dtype.type.__name__))
_string_dtypes = frozenset(map(_get_dtype_from_object, (binary_type,
text_type)))
def pandas_dtype(dtype):
"""
Converts input into a pandas only dtype object or a numpy dtype object.
Parameters
----------
dtype : object to be converted
Returns
-------
np.dtype or a pandas dtype
Raises
------
TypeError if not a dtype
"""
# short-circuit
if isinstance(dtype, np.ndarray):
return dtype.dtype
elif isinstance(dtype, np.dtype):
return dtype
# registered extension types
result = _pandas_registry.find(dtype) or registry.find(dtype)
if result is not None:
return result
# un-registered extension types
elif isinstance(dtype, (PandasExtensionDtype, ExtensionDtype)):
return dtype
# try a numpy dtype
# raise a consistent TypeError if failed
try:
npdtype = np.dtype(dtype)
except Exception:
# we don't want to force a repr of the non-string
if not isinstance(dtype, string_types):
raise TypeError("data type not understood")
raise TypeError("data type '{}' not understood".format(
dtype))
# Any invalid dtype (such as pd.Timestamp) should raise an error.
# np.dtype(invalid_type).kind = 0 for such objects. However, this will
# also catch some valid dtypes such as object, np.object_ and 'object'
# which we safeguard against by catching them earlier and returning
# np.dtype(valid_dtype) before this condition is evaluated.
if is_hashable(dtype) and dtype in [object, np.object_, 'object', 'O']:
# check hashability to avoid errors/DeprecationWarning when we get
# here and `dtype` is an array
return npdtype
elif npdtype.kind == 'O':
raise TypeError("dtype '{}' not understood".format(dtype))
return npdtype
| 26.54145
| 79
| 0.628371
|
913d591184ed35c8f057a5e413146803e58cdf4f
| 560
|
py
|
Python
|
scripts/study_case/ID_4/test/transforms/test_random_shear.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 8
|
2021-06-30T06:55:14.000Z
|
2022-03-18T01:57:14.000Z
|
scripts/study_case/ID_4/test/transforms/test_random_shear.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 1
|
2021-06-30T03:08:15.000Z
|
2021-06-30T03:08:15.000Z
|
scripts/study_case/ID_4/test/transforms/test_random_shear.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 2
|
2021-11-17T11:19:48.000Z
|
2021-11-18T03:05:58.000Z
|
import torch
from scripts.study_case.ID_4.torch_geometric.transforms import RandomShear
from scripts.study_case.ID_4.torch_geometric.data import Data
def test_random_shear():
assert RandomShear(0.1).__repr__() == 'RandomShear(0.1)'
pos = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
data = Data(pos=pos)
data = RandomShear(0)(data)
assert len(data) == 1
assert data.pos.tolist() == pos.tolist()
data = Data(pos=pos)
data = RandomShear(0.1)(data)
assert len(data) == 1
assert data.pos.tolist() != pos.tolist()
| 28
| 74
| 0.655357
|
25aae02126e1c10a29c6d5e52e6c89d196c51688
| 10,054
|
py
|
Python
|
rllib/examples/env/matrix_sequential_social_dilemma.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 22
|
2018-05-08T05:52:34.000Z
|
2020-04-01T10:09:55.000Z
|
rllib/examples/env/matrix_sequential_social_dilemma.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 73
|
2021-09-25T07:11:39.000Z
|
2022-03-26T07:10:59.000Z
|
rllib/examples/env/matrix_sequential_social_dilemma.py
|
mgelbart/ray
|
4cec2286572e368a4bd64aae467751a384eff62d
|
[
"Apache-2.0"
] | 10
|
2018-04-27T10:50:59.000Z
|
2020-02-24T02:41:43.000Z
|
##########
# Contribution by the Center on Long-Term Risk:
# https://github.com/longtermrisk/marltoolbox
# Some parts are originally from:
# https://github.com/alshedivat/lola/tree/master/lola
##########
import logging
from abc import ABC
from collections import Iterable
from typing import Dict, Optional
import numpy as np
from gym.spaces import Discrete
from gym.utils import seeding
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.examples.env.utils.interfaces import InfoAccumulationInterface
from ray.rllib.examples.env.utils.mixins import (
TwoPlayersTwoActionsInfoMixin,
NPlayersNDiscreteActionsInfoMixin,
)
logger = logging.getLogger(__name__)
class MatrixSequentialSocialDilemma(InfoAccumulationInterface, MultiAgentEnv, ABC):
"""
A multi-agent abstract class for two player matrix games.
PAYOUT_MATRIX: Numpy array. Along the dimension N, the action of the
Nth player change. The last dimension is used to select the player
whose reward you want to know.
max_steps: number of step in one episode
players_ids: list of the RLlib agent id of each player
output_additional_info: ask the environment to aggregate information
about the last episode and output them as info at the end of the
episode.
"""
def __init__(self, config: Optional[Dict] = None):
if config is None:
config = {}
assert "reward_randomness" not in config.keys()
assert self.PAYOUT_MATRIX is not None
if "players_ids" in config:
assert (
isinstance(config["players_ids"], Iterable)
and len(config["players_ids"]) == self.NUM_AGENTS
)
self.players_ids = config.get("players_ids", ["player_row", "player_col"])
self.player_row_id, self.player_col_id = self.players_ids
self.max_steps = config.get("max_steps", 20)
self.output_additional_info = config.get("output_additional_info", True)
self.step_count_in_current_episode = None
# To store info about the fraction of each states
if self.output_additional_info:
self._init_info()
def seed(self, seed=None):
"""Seed the PRNG of this space."""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.step_count_in_current_episode = 0
if self.output_additional_info:
self._reset_info()
return {
self.player_row_id: self.NUM_STATES - 1,
self.player_col_id: self.NUM_STATES - 1,
}
def step(self, actions: dict):
"""
:param actions: Dict containing both actions for player_1 and player_2
:return: observations, rewards, done, info
"""
self.step_count_in_current_episode += 1
action_player_row = actions[self.player_row_id]
action_player_col = actions[self.player_col_id]
if self.output_additional_info:
self._accumulate_info(action_player_row, action_player_col)
observations = self._produce_observations_invariant_to_the_player_trained(
action_player_row, action_player_col
)
rewards = self._get_players_rewards(action_player_row, action_player_col)
epi_is_done = self.step_count_in_current_episode >= self.max_steps
if self.step_count_in_current_episode > self.max_steps:
logger.warning("self.step_count_in_current_episode >= self.max_steps")
info = self._get_info_for_current_epi(epi_is_done)
return self._to_RLlib_API(observations, rewards, epi_is_done, info)
def _produce_observations_invariant_to_the_player_trained(
self, action_player_0: int, action_player_1: int
):
"""
We want to be able to use a policy trained as player 1
for evaluation as player 2 and vice versa.
"""
return [
action_player_0 * self.NUM_ACTIONS + action_player_1,
action_player_1 * self.NUM_ACTIONS + action_player_0,
]
def _get_players_rewards(self, action_player_0: int, action_player_1: int):
return [
self.PAYOUT_MATRIX[action_player_0][action_player_1][0],
self.PAYOUT_MATRIX[action_player_0][action_player_1][1],
]
def _to_RLlib_API(
self, observations: list, rewards: list, epi_is_done: bool, info: dict
):
observations = {
self.player_row_id: observations[0],
self.player_col_id: observations[1],
}
rewards = {self.player_row_id: rewards[0], self.player_col_id: rewards[1]}
if info is None:
info = {}
else:
info = {self.player_row_id: info, self.player_col_id: info}
done = {
self.player_row_id: epi_is_done,
self.player_col_id: epi_is_done,
"__all__": epi_is_done,
}
return observations, rewards, done, info
def _get_info_for_current_epi(self, epi_is_done):
if epi_is_done and self.output_additional_info:
info_for_current_epi = self._get_episode_info()
else:
info_for_current_epi = None
return info_for_current_epi
def __str__(self):
return self.NAME
class IteratedMatchingPennies(
TwoPlayersTwoActionsInfoMixin, MatrixSequentialSocialDilemma
):
"""
A two-agent environment for the Matching Pennies game.
"""
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = NUM_ACTIONS ** NUM_AGENTS + 1
ACTION_SPACE = Discrete(NUM_ACTIONS)
OBSERVATION_SPACE = Discrete(NUM_STATES)
PAYOUT_MATRIX = np.array([[[+1, -1], [-1, +1]], [[-1, +1], [+1, -1]]])
NAME = "IMP"
class IteratedPrisonersDilemma(
TwoPlayersTwoActionsInfoMixin, MatrixSequentialSocialDilemma
):
"""
A two-agent environment for the Prisoner's Dilemma game.
"""
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = NUM_ACTIONS ** NUM_AGENTS + 1
ACTION_SPACE = Discrete(NUM_ACTIONS)
OBSERVATION_SPACE = Discrete(NUM_STATES)
PAYOUT_MATRIX = np.array([[[-1, -1], [-3, +0]], [[+0, -3], [-2, -2]]])
NAME = "IPD"
class IteratedAsymPrisonersDilemma(
TwoPlayersTwoActionsInfoMixin, MatrixSequentialSocialDilemma
):
"""
A two-agent environment for the Asymmetric Prisoner's Dilemma game.
"""
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = NUM_ACTIONS ** NUM_AGENTS + 1
ACTION_SPACE = Discrete(NUM_ACTIONS)
OBSERVATION_SPACE = Discrete(NUM_STATES)
PAYOUT_MATRIX = np.array([[[+0, -1], [-3, +0]], [[+0, -3], [-2, -2]]])
NAME = "IPD"
class IteratedStagHunt(TwoPlayersTwoActionsInfoMixin, MatrixSequentialSocialDilemma):
"""
A two-agent environment for the Stag Hunt game.
"""
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = NUM_ACTIONS ** NUM_AGENTS + 1
ACTION_SPACE = Discrete(NUM_ACTIONS)
OBSERVATION_SPACE = Discrete(NUM_STATES)
PAYOUT_MATRIX = np.array([[[3, 3], [0, 2]], [[2, 0], [1, 1]]])
NAME = "IteratedStagHunt"
class IteratedChicken(TwoPlayersTwoActionsInfoMixin, MatrixSequentialSocialDilemma):
"""
A two-agent environment for the Chicken game.
"""
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = NUM_ACTIONS ** NUM_AGENTS + 1
ACTION_SPACE = Discrete(NUM_ACTIONS)
OBSERVATION_SPACE = Discrete(NUM_STATES)
PAYOUT_MATRIX = np.array([[[+0, +0], [-1.0, +1.0]], [[+1, -1], [-10, -10]]])
NAME = "IteratedChicken"
class IteratedAsymChicken(TwoPlayersTwoActionsInfoMixin, MatrixSequentialSocialDilemma):
"""
A two-agent environment for the Asymmetric Chicken game.
"""
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = NUM_ACTIONS ** NUM_AGENTS + 1
ACTION_SPACE = Discrete(NUM_ACTIONS)
OBSERVATION_SPACE = Discrete(NUM_STATES)
PAYOUT_MATRIX = np.array([[[+2.0, +0], [-1.0, +1.0]], [[+2.5, -1], [-10, -10]]])
NAME = "AsymmetricIteratedChicken"
class IteratedBoS(TwoPlayersTwoActionsInfoMixin, MatrixSequentialSocialDilemma):
"""
A two-agent environment for the BoS game.
"""
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = NUM_ACTIONS ** NUM_AGENTS + 1
ACTION_SPACE = Discrete(NUM_ACTIONS)
OBSERVATION_SPACE = Discrete(NUM_STATES)
PAYOUT_MATRIX = np.array(
[[[+3.0, +2.0], [+0.0, +0.0]], [[+0.0, +0.0], [+2.0, +3.0]]]
)
NAME = "IteratedBoS"
class IteratedAsymBoS(TwoPlayersTwoActionsInfoMixin, MatrixSequentialSocialDilemma):
"""
A two-agent environment for the BoS game.
"""
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = NUM_ACTIONS ** NUM_AGENTS + 1
ACTION_SPACE = Discrete(NUM_ACTIONS)
OBSERVATION_SPACE = Discrete(NUM_STATES)
PAYOUT_MATRIX = np.array(
[[[+4.0, +1.0], [+0.0, +0.0]], [[+0.0, +0.0], [+2.0, +2.0]]]
)
NAME = "AsymmetricIteratedBoS"
def define_greed_fear_matrix_game(greed, fear):
class GreedFearGame(TwoPlayersTwoActionsInfoMixin, MatrixSequentialSocialDilemma):
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = NUM_ACTIONS ** NUM_AGENTS + 1
ACTION_SPACE = Discrete(NUM_ACTIONS)
OBSERVATION_SPACE = Discrete(NUM_STATES)
R = 3
P = 1
T = R + greed
S = P - fear
PAYOUT_MATRIX = np.array([[[R, R], [S, T]], [[T, S], [P, P]]])
NAME = "IteratedGreedFear"
def __str__(self):
return f"{self.NAME} with greed={greed} and fear={fear}"
return GreedFearGame
class IteratedBoSAndPD(
NPlayersNDiscreteActionsInfoMixin, MatrixSequentialSocialDilemma
):
"""
A two-agent environment for the BOTS + PD game.
"""
NUM_AGENTS = 2
NUM_ACTIONS = 3
NUM_STATES = NUM_ACTIONS ** NUM_AGENTS + 1
ACTION_SPACE = Discrete(NUM_ACTIONS)
OBSERVATION_SPACE = Discrete(NUM_STATES)
PAYOUT_MATRIX = np.array(
[
[[3.5, +1], [+0, +0], [-3, +2]],
[[+0.0, +0], [+1, +3], [-3, +2]],
[[+2.0, -3], [+2, -3], [-1, -1]],
]
)
NAME = "IteratedBoSAndPD"
| 31.616352
| 88
| 0.652676
|
2ae4763cc17a1e280c7da2251df98f9372304681
| 4,491
|
py
|
Python
|
starry_process/ops/base_op.py
|
arfon/starry_process
|
72b2a540e7e4fdb2e6af61507efa1c9861d5c919
|
[
"MIT"
] | 13
|
2020-04-14T17:47:28.000Z
|
2022-03-16T15:19:48.000Z
|
starry_process/ops/base_op.py
|
arfon/starry_process
|
72b2a540e7e4fdb2e6af61507efa1c9861d5c919
|
[
"MIT"
] | 22
|
2020-09-23T20:33:22.000Z
|
2022-02-07T17:38:09.000Z
|
starry_process/ops/base_op.py
|
arfon/starry_process
|
72b2a540e7e4fdb2e6af61507efa1c9861d5c919
|
[
"MIT"
] | 8
|
2020-04-14T17:47:44.000Z
|
2022-02-06T16:39:47.000Z
|
# -*- coding: utf-8 -*-
from ..starry_process_version import __version__
from ..defaults import defaults
from ..compat import theano, tt, COp, Apply, floatX
import sys
import pkg_resources
# Allow C code caching even in dev mode?
try:
from .. import CACHE_DEV_C_CODE
except:
CACHE_DEV_C_CODE = False
__all__ = ["BaseOp", "IntegralOp"]
class BaseOp(COp):
__props__ = ("ydeg", "udeg", "compile_args")
func_file = None
func_name = None
def __init__(
self,
ydeg=defaults["ydeg"],
udeg=defaults["udeg"],
compile_args=[],
**kwargs
):
self.ydeg = ydeg
self.N = (self.ydeg + 1) ** 2
self.udeg = udeg
self.NLU = (self.ydeg + self.udeg + 1) ** 2
assert type(compile_args) is list, "arg `compile_args` must be a list"
for item in compile_args:
assert (
type(item) is tuple
), "items in `compile_args` must be tuples"
assert len(item) == 2, "tuples in `compile_args` must have 2 items"
self.compile_args = tuple(compile_args)
super().__init__(self.func_file, self.func_name)
def perform(self, *args):
raise NotImplementedError("Only C op is implemented")
def c_code_cache_version(self, *args, **kwargs):
if ("dev" in __version__) and not CACHE_DEV_C_CODE:
return ()
else:
v = []
for sv in __version__.split("."):
try:
v.append(int(sv))
except:
v.append(sv)
return tuple(v)
def c_headers(self, *args, **kwargs):
return [
"utils.h",
"special.h",
"latitude.h",
"wigner.h",
"eigh.h",
"flux.h",
"theano_helpers.h",
"vector",
]
def c_header_dirs(self, *args, **kwargs):
dirs = [
pkg_resources.resource_filename("starry_process", "ops/include")
]
dirs += [
pkg_resources.resource_filename(
"starry_process", "ops/vendor/eigen_3.3.5"
)
]
return dirs
def c_compile_args(self, *args, **kwargs):
args = ["-std=c++14", "-O2", "-DNDEBUG"]
if sys.platform == "darwin":
args += ["-stdlib=libc++", "-mmacosx-version-min=10.7"]
args += ["-DSP__LMAX={0}".format(self.ydeg)]
args += ["-DSP__UMAX={0}".format(self.udeg)]
for (key, value) in self.compile_args:
if key.startswith("SP_"):
args += ["-D{0}={1}".format(key, value)]
return args
class IntegralOp(BaseOp):
def make_node(self, alpha, beta):
in_args = [
tt.as_tensor_variable(arg).astype(floatX) for arg in [alpha, beta]
]
out_args = [
tt.TensorType(dtype=floatX, broadcastable=[False])(),
tt.TensorType(dtype=floatX, broadcastable=[False])(),
tt.TensorType(dtype=floatX, broadcastable=[False])(),
tt.TensorType(dtype=floatX, broadcastable=[False, False])(),
tt.TensorType(dtype=floatX, broadcastable=[False, False])(),
tt.TensorType(dtype=floatX, broadcastable=[False, False])(),
]
return Apply(self, in_args, out_args)
def infer_shape(self, *args):
return (
[self.N],
[self.N],
[self.N],
[self.N, self.N],
[self.N, self.N],
[self.N, self.N],
)
def grad(self, inputs, gradients):
alpha, beta = inputs
q, dqda, dqdb, Q, dQda, dQdb = self(*inputs)
bq = gradients[0]
bQ = gradients[3]
# Derivs of derivs not implemented
for i, g in enumerate(list(gradients[1:3]) + list(gradients[4:6])):
if not isinstance(g.type, theano.gradient.DisconnectedType):
raise ValueError(
"can't propagate gradients wrt parameter {0}".format(i + 1)
)
# Chain rule
ba = 0.0
bb = 0.0
for g, fa, fb in zip([bq, bQ], [dqda, dQda], [dqdb, dQdb]):
if not isinstance(g.type, theano.gradient.DisconnectedType):
ba += tt.sum(g * fa)
bb += tt.sum(g * fb)
return ba, bb
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
| 31.1875
| 79
| 0.531285
|
6f8ea7d7e0fe3f7ad67d9786d63a5216df23498b
| 69
|
py
|
Python
|
python/testData/editing/noBackslashOnEnterInMappingPattern.after.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
python/testData/editing/noBackslashOnEnterInMappingPattern.after.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
python/testData/editing/noBackslashOnEnterInMappingPattern.after.py
|
06needhamt/intellij-community
|
63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b
|
[
"Apache-2.0"
] | null | null | null |
match x:
case {'foo': 1,
<caret>'bar': 2}
pass
| 17.25
| 26
| 0.391304
|
2e212ba7238166c94ef356bff92e6d0a2a82f77f
| 56,426
|
py
|
Python
|
leo/commands/commanderOutlineCommands.py
|
gamtiq/leo-editor
|
3f51986fea4c16ed0713699b088bc8a8abd738c3
|
[
"MIT"
] | 1
|
2020-11-07T23:22:12.000Z
|
2020-11-07T23:22:12.000Z
|
leo/commands/commanderOutlineCommands.py
|
gamtiq/leo-editor
|
3f51986fea4c16ed0713699b088bc8a8abd738c3
|
[
"MIT"
] | null | null | null |
leo/commands/commanderOutlineCommands.py
|
gamtiq/leo-editor
|
3f51986fea4c16ed0713699b088bc8a8abd738c3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20171124080430.1: * @file ../commands/commanderOutlineCommands.py
#@@first
"""Outline commands that used to be defined in leoCommands.py"""
import leo.core.leoGlobals as g
import xml.etree.ElementTree as ElementTree
import leo.core.leoNodes as leoNodes
import leo.core.leoFileCommands as leoFileCommands
#@+others
#@+node:ekr.20031218072017.1548: ** c_oc.Cut & Paste Outlines
#@+node:ekr.20031218072017.1550: *3* c_oc.copyOutline
@g.commander_command('copy-node')
def copyOutline(self, event=None):
"""Copy the selected outline to the clipboard."""
# Copying an outline has no undo consequences.
c = self
c.endEditing()
s = c.fileCommands.putLeoOutline()
g.app.paste_c = c
g.app.gui.replaceClipboardWith(s)
#@+node:ekr.20031218072017.1549: *3* c_oc.cutOutline
@g.commander_command('cut-node')
def cutOutline(self, event=None):
"""Delete the selected outline and send it to the clipboard."""
c = self
if c.canDeleteHeadline():
c.copyOutline()
c.deleteOutline(op_name="Cut Node")
c.recolor()
#@+node:ekr.20031218072017.1551: *3* c_oc.pasteOutline
@g.commander_command('paste-node')
def pasteOutline(self,
event=None,
redrawFlag=True,
s=None,
undoFlag=True
):
"""
Paste an outline into the present outline from the clipboard.
Nodes do *not* retain their original identify.
"""
c = self
if s is None:
s = g.app.gui.getTextFromClipboard()
c.endEditing()
if not s or not c.canPasteOutline(s):
return None # This should never happen.
isLeo = g.match(s, 0, g.app.prolog_prefix_string)
if not isLeo:
return None
# Get *position* to be pasted.
pasted = c.fileCommands.getLeoOutlineFromClipboard(s)
if not pasted:
# Leo no longer supports MORE outlines. Use import-MORE-files instead.
return None
# Validate.
c.validateOutline()
c.checkOutline()
# Handle the "before" data for undo.
if undoFlag:
undoData = c.undoer.beforeInsertNode(c.p,
pasteAsClone=False,
copiedBunchList=[],
)
# Paste the node into the outline.
c.selectPosition(pasted)
pasted.setDirty()
c.setChanged(redrawFlag=redrawFlag)
# Prevent flash when fixing #387.
back = pasted.back()
if back and back.hasChildren() and back.isExpanded():
pasted.moveToNthChildOf(back, 0)
# Finish the command.
if undoFlag:
c.undoer.afterInsertNode(pasted, 'Paste Node', undoData)
if redrawFlag:
c.redraw(pasted)
c.recolor()
return pasted
#@+node:EKR.20040610130943: *3* c_oc.pasteOutlineRetainingClones & helpers
@g.commander_command('paste-retaining-clones')
def pasteOutlineRetainingClones(self,
event=None,
redrawFlag=True,
s=None,
undoFlag=True,
):
"""
Paste an outline into the present outline from the clipboard.
Nodes *retain* their original identify.
"""
c = self
if s is None:
s = g.app.gui.getTextFromClipboard()
c.endEditing()
if not s or not c.canPasteOutline(s):
return None # This should never happen.
isLeo = g.match(s, 0, g.app.prolog_prefix_string)
if not isLeo:
return None
# Get *position* to be pasted.
pasted = c.fileCommands.getLeoOutlineFromClipboardRetainingClones(s)
if not pasted:
# Leo no longer supports MORE outlines. Use import-MORE-files instead.
return None
# Validate.
c.validateOutline()
c.checkOutline()
# Handle the "before" data for undo.
if undoFlag:
vnodeInfoDict = computeVnodeInfoDict(c)
undoData = c.undoer.beforeInsertNode(c.p,
pasteAsClone=True,
copiedBunchList=computeCopiedBunchList(c, pasted, vnodeInfoDict),
)
# Paste the node into the outline.
c.selectPosition(pasted)
pasted.setDirty()
c.setChanged(redrawFlag=redrawFlag)
# Prevent flash when fixing #387.
back = pasted.back()
if back and back.hasChildren() and back.isExpanded():
pasted.moveToNthChildOf(back, 0)
pasted.setDirty()
# Set dirty bits for ancestors of *all* pasted nodes.
for p in pasted.self_and_subtree():
p.setAllAncestorAtFileNodesDirty()
# Finish the command.
if undoFlag:
c.undoer.afterInsertNode(pasted, 'Paste As Clone', undoData)
if redrawFlag:
c.redraw(pasted)
c.recolor()
return pasted
#@+node:ekr.20050418084539.2: *4* def computeCopiedBunchList
def computeCopiedBunchList(c, pasted, vnodeInfoDict):
"""Create a dict containing only copied vnodes."""
d = {}
for p in pasted.self_and_subtree(copy=False):
d[p.v] = p.v
aList = []
for v in vnodeInfoDict:
if d.get(v):
bunch = vnodeInfoDict.get(v)
aList.append(bunch)
return aList
#@+node:ekr.20050418084539: *4* def computeVnodeInfoDict
def computeVnodeInfoDict(c):
"""
We don't know yet which nodes will be affected by the paste, so we remember
everything. This is expensive, but foolproof.
The alternative is to try to remember the 'before' values of nodes in the
FileCommands read logic. Several experiments failed, and the code is very ugly.
In short, it seems wise to do things the foolproof way.
"""
d = {}
for v in c.all_unique_nodes():
if v not in d:
d[v] = g.Bunch(v=v, head=v.h, body=v.b)
return d
#@+node:vitalije.20200529105105.1: *3* c_oc.pasteAsTemplate
@g.commander_command('paste-as-template')
def pasteAsTemplate(self, event=None):
c = self
p = c.p
#@+others
#@+node:vitalije.20200529112224.1: *4* skip_root
def skip_root(v):
'''
generates v nodes in the outline order
but skips a subtree of the node with root_gnx
'''
if v.gnx != root_gnx:
yield v
for ch in v.children:
yield from skip_root(ch)
#@+node:vitalije.20200529112459.1: *4* translate_gnx
def translate_gnx(gnx):
'''
allocates a new gnx for all nodes that
are not found outside copied tree
'''
if gnx in outside:
return gnx
return g.app.nodeIndices.computeNewIndex()
#@+node:vitalije.20200529115141.1: *4* viter
def viter(parent_gnx, xv):
'''
iterates <v> nodes generating tuples:
(parent_gnx, child_gnx, headline, body)
skipping the descendants of already seen nodes.
'''
chgnx = xv.attrib.get('t')
b = bodies[chgnx]
gnx = translation.get(chgnx)
if gnx in seen:
yield parent_gnx, gnx, heads.get(gnx), b
else:
seen.add(gnx)
h = xv[0].text
heads[gnx] = h
yield parent_gnx, gnx, h, b
for xch in xv[1:]:
yield from viter(gnx, xch)
#@+node:vitalije.20200529114857.1: *4* getv
gnx2v = c.fileCommands.gnxDict
def getv(gnx):
'''
returns a pair (vnode, is_new) for the given gnx.
if node doesn't exist, creates a new one.
'''
v = gnx2v.get(gnx)
if v is None:
return leoNodes.VNode(c, gnx), True
return v, False
#@+node:vitalije.20200529115539.1: *4* do_paste
def do_paste(vpar, index):
'''
pastes a new node as a child of vpar at given index
'''
vpargnx = vpar.gnx
# the first node is inserted at the given index
# and the rest are just appended at parents children
# to achieve this we first create a generator object
rows = viter(vpargnx, xvnodes[0])
# then we just take first tuple
pgnx, gnx, h, b = next(rows)
# create vnode
v, _ = getv(gnx)
v.h = h
v.b = b
# and finally insert it at the given index
vpar.children.insert(index, v)
v.parents.append(vpar)
pasted = v # remember the first node as a return value
# now we iterate the rest of tuples
for pgnx, gnx, h, b in rows:
# get or create a child `v`
v, isNew = getv(gnx)
if isNew:
v.h = h
v.b = b
ua = uas.get(gnx)
if ua:
v.unknownAttributes = ua
# get parent node `vpar`
vpar = getv(pgnx)[0]
# and link them
vpar.children.append(v)
v.parents.append(vpar)
return pasted
#@+node:vitalije.20200529120440.1: *4* undoHelper
def undoHelper():
v = vpar.children.pop(index)
v.parents.remove(vpar)
c.redraw(bunch.p)
#@+node:vitalije.20200529120537.1: *4* redoHelper
def redoHelper():
vpar.children.insert(index, pasted)
pasted.parents.append(vpar)
c.redraw(newp)
#@-others
xroot = ElementTree.fromstring(g.app.gui.getTextFromClipboard())
xvnodes = xroot.find('vnodes')
xtnodes = xroot.find('tnodes')
bodies, uas = leoFileCommands.FastRead(c, {}).scanTnodes(xtnodes)
root_gnx = xvnodes[0].attrib.get('t') # the gnx of copied node
outside = { x.gnx for x in skip_root(c.hiddenRootNode) }
# outside will contain gnxes of nodes that are outside the copied tree
translation = { x: translate_gnx(x) for x in bodies }
# we generate new gnx for each node in the copied tree
seen = set(outside) # required for the treatment of local clones inside the copied tree
heads = {}
bunch = c.undoer.createCommonBunch(p)
#@+<< prepare destination data >>
#@+node:vitalije.20200529111500.1: *4* << prepare destination data >>
# destination data consists of
# 1. vpar --- parent v node that should receive pasted child
# 2. index --- at which pasted child will be
# 3. parStack --- a stack for creating new position of the pasted node
#
# the new position will be: Position(vpar.children[index], index, parStack)
# but it can't be calculated yet, before actual paste is done
if p.isExpanded():
# paste as a first child of current position
vpar = p.v
index = 0
parStack = p.stack + [(p.v, p._childIndex)]
else:
# paste after the current position
parStack = p.stack
vpar = p.stack[-1][0] if p.stack else c.hiddenRootNode
index = p._childIndex + 1
#@-<< prepare destination data >>
pasted = do_paste(vpar, index)
newp = leoNodes.Position(pasted, index, parStack)
bunch.undoHelper = undoHelper
bunch.redoHelper = redoHelper
bunch.undoType = 'paste-retaining-outside-clones'
newp.setDirty()
c.undoer.pushBead(bunch)
c.redraw(newp)
#@+node:ekr.20040412060927: ** c_oc.dumpOutline
@g.commander_command('dump-outline')
def dumpOutline(self, event=None):
""" Dump all nodes in the outline."""
c = self
seen = {}
print('')
print('=' * 40)
v = c.hiddenRootNode
v.dump()
seen[v] = True
for p in c.all_positions():
if p.v not in seen:
seen[p.v] = True
p.v.dump()
#@+node:ekr.20031218072017.2898: ** c_oc.Expand & contract commands
#@+node:ekr.20031218072017.2900: *3* c_oc.contract-all
@g.commander_command('contract-all')
def contractAllHeadlinesCommand(self, event=None, redrawFlag=True):
"""Contract all nodes in the outline."""
# The helper does all the work.
c = self
c.contractAllHeadlines(event=event, redrawFlag=redrawFlag)
#@+node:ekr.20080819075811.3: *3* c_oc.contractAllOtherNodes & helper
@g.commander_command('contract-all-other-nodes')
def contractAllOtherNodes(self, event=None):
"""
Contract all nodes except those needed to make the
presently selected node visible.
"""
c = self
leaveOpen = c.p
for p in c.rootPosition().self_and_siblings():
contractIfNotCurrent(c, p, leaveOpen)
c.redraw()
#@+node:ekr.20080819075811.7: *4* def contractIfNotCurrent
def contractIfNotCurrent(c, p, leaveOpen):
if p == leaveOpen or not p.isAncestorOf(leaveOpen):
p.contract()
for child in p.children():
if child != leaveOpen and child.isAncestorOf(leaveOpen):
contractIfNotCurrent(c, child, leaveOpen)
else:
for p2 in child.self_and_subtree():
p2.contract()
#@+node:ekr.20200824130837.1: *3* c_oc.contractAllSubheads (new)
@g.commander_command('contract-all-subheads')
def contractAllSubheads(self, event=None):
"""Contract all children of the presently selected node."""
c, p = self, self.p
if not p:
return
child = p.firstChild()
c.contractSubtree(p)
while child:
c.contractSubtree(child)
child = child.next()
c.redraw(p)
#@+node:ekr.20031218072017.2901: *3* c_oc.contractNode
@g.commander_command('contract-node')
def contractNode(self, event=None):
"""Contract the presently selected node."""
c = self
p = c.p
c.endEditing()
p.contract()
c.redraw_after_contract(p)
c.selectPosition(p)
#@+node:ekr.20040930064232: *3* c_oc.contractNodeOrGoToParent
@g.commander_command('contract-or-go-left')
def contractNodeOrGoToParent(self, event=None):
"""Simulate the left Arrow Key in folder of Windows Explorer."""
c, cc, p = self, self.chapterController, self.p
parent = p.parent()
redraw = False
# Bug fix: 2016/04/19: test p.v.isExpanded().
if p.hasChildren() and (p.v.isExpanded() or p.isExpanded()):
c.contractNode()
elif parent and parent.isVisible(c):
# Contract all children first.
if c.collapse_on_lt_arrow:
for child in parent.children():
if child.isExpanded():
child.contract()
if child.hasChildren():
redraw = True
if cc and cc.inChapter and parent.h.startswith('@chapter '):
pass
else:
c.goToParent()
if redraw:
# A *child* should be collapsed. Do a *full* redraw.
c.redraw()
#@+node:ekr.20031218072017.2902: *3* c_oc.contractParent
@g.commander_command('contract-parent')
def contractParent(self, event=None):
"""Contract the parent of the presently selected node."""
c = self
c.endEditing()
p = c.p
parent = p.parent()
if not parent: return
parent.contract()
c.redraw_after_contract(p=parent)
#@+node:ekr.20031218072017.2903: *3* c_oc.expandAllHeadlines
@g.commander_command('expand-all')
def expandAllHeadlines(self, event=None):
"""Expand all headlines.
Warning: this can take a long time for large outlines."""
c = self
c.endEditing()
p = c.rootPosition()
while p:
c.expandSubtree(p, redraw=False)
p.moveToNext()
c.redraw_after_expand(p=c.rootPosition())
c.expansionLevel = 0 # Reset expansion level.
#@+node:ekr.20031218072017.2904: *3* c_oc.expandAllSubheads
@g.commander_command('expand-all-subheads')
def expandAllSubheads(self, event=None):
"""Expand all children of the presently selected node."""
c = self; p = c.p
if not p: return
child = p.firstChild()
c.expandSubtree(p)
while child:
c.expandSubtree(child)
child = child.next()
c.redraw(p)
#@+node:ekr.20031218072017.2905: *3* c_oc.expandLevel1..9
@g.commander_command('expand-to-level-1')
def expandLevel1(self, event=None):
"""Expand the outline to level 1"""
self.expandToLevel(1)
@g.commander_command('expand-to-level-2')
def expandLevel2(self, event=None):
"""Expand the outline to level 2"""
self.expandToLevel(2)
@g.commander_command('expand-to-level-3')
def expandLevel3(self, event=None):
"""Expand the outline to level 3"""
self.expandToLevel(3)
@g.commander_command('expand-to-level-4')
def expandLevel4(self, event=None):
"""Expand the outline to level 4"""
self.expandToLevel(4)
@g.commander_command('expand-to-level-5')
def expandLevel5(self, event=None):
"""Expand the outline to level 5"""
self.expandToLevel(5)
@g.commander_command('expand-to-level-6')
def expandLevel6(self, event=None):
"""Expand the outline to level 6"""
self.expandToLevel(6)
@g.commander_command('expand-to-level-7')
def expandLevel7(self, event=None):
"""Expand the outline to level 7"""
self.expandToLevel(7)
@g.commander_command('expand-to-level-8')
def expandLevel8(self, event=None):
"""Expand the outline to level 8"""
self.expandToLevel(8)
@g.commander_command('expand-to-level-9')
def expandLevel9(self, event=None):
"""Expand the outline to level 9"""
self.expandToLevel(9)
#@+node:ekr.20031218072017.2906: *3* c_oc.expandNextLevel
@g.commander_command('expand-next-level')
def expandNextLevel(self, event=None):
"""
Increase the expansion level of the outline and
Expand all nodes at that level or lower.
"""
c = self
# Expansion levels are now local to a particular tree.
if c.expansionNode != c.p:
c.expansionLevel = 1
c.expansionNode = c.p.copy()
self.expandToLevel(c.expansionLevel + 1)
#@+node:ekr.20031218072017.2907: *3* c_oc.expandNode
@g.commander_command('expand-node')
def expandNode(self, event=None):
"""Expand the presently selected node."""
c = self
p = c.p
c.endEditing()
p.expand()
c.redraw_after_expand(p)
c.selectPosition(p)
#@+node:ekr.20040930064232.1: *3* c_oc.expandNodeAndGoToFirstChild
@g.commander_command('expand-and-go-right')
def expandNodeAndGoToFirstChild(self, event=None):
"""If a node has children, expand it if needed and go to the first child."""
c, p = self, self.p
c.endEditing()
if p.hasChildren():
if not p.isExpanded():
c.expandNode()
c.selectPosition(p.firstChild())
c.treeFocusHelper()
#@+node:ekr.20171125082744.1: *3* c_oc.expandNodeOrGoToFirstChild
@g.commander_command('expand-or-go-right')
def expandNodeOrGoToFirstChild(self, event=None):
"""
Simulate the Right Arrow Key in folder of Windows Explorer.
if c.p has no children, do nothing.
Otherwise, if c.p is expanded, select the first child.
Otherwise, expand c.p.
"""
c, p = self, self.p
c.endEditing()
if p.hasChildren():
if p.isExpanded():
c.redraw_after_expand(p.firstChild())
else:
c.expandNode()
#@+node:ekr.20060928062431: *3* c_oc.expandOnlyAncestorsOfNode
@g.commander_command('expand-ancestors-only')
def expandOnlyAncestorsOfNode(self, event=None, p=None):
"""Contract all nodes in the outline."""
c = self
level = 1
if p: c.selectPosition(p) # 2013/12/25
root = c.p
for p in c.all_unique_positions():
p.v.expandedPositions = []
p.v.contract()
for p in root.parents():
p.expand()
level += 1
c.expansionLevel = level # Reset expansion level.
#@+node:ekr.20031218072017.2908: *3* c_oc.expandPrevLevel
@g.commander_command('expand-prev-level')
def expandPrevLevel(self, event=None):
"""Decrease the expansion level of the outline and
Expand all nodes at that level or lower."""
c = self
# Expansion levels are now local to a particular tree.
if c.expansionNode != c.p:
c.expansionLevel = 1
c.expansionNode = c.p.copy()
self.expandToLevel(max(1, c.expansionLevel - 1))
#@+node:ekr.20171124081846.1: ** c_oc.fullCheckOutline
@g.commander_command('check-outline')
def fullCheckOutline(self, event=None):
"""
Performs a full check of the consistency of a .leo file.
As of Leo 5.1, Leo performs checks of gnx's and outline structure
before writes and after reads, pastes and undo/redo.
"""
c = self
return c.checkOutline(check_links=True)
#@+node:ekr.20031218072017.2913: ** c_oc.Goto commands
#@+node:ekr.20071213123942: *3* c_oc.findNextClone
@g.commander_command('find-next-clone')
def findNextClone(self, event=None):
"""Select the next cloned node."""
c = self; p = c.p; cc = c.chapterController
if not p: return
if p.isCloned():
p.moveToThreadNext()
flag = False
while p:
if p.isCloned():
flag = True; break
else:
p.moveToThreadNext()
if flag:
if cc:
# name = cc.findChapterNameForPosition(p)
cc.selectChapterByName('main')
c.selectPosition(p)
c.redraw_after_select(p)
else:
g.blue('no more clones')
#@+node:ekr.20031218072017.1628: *3* c_oc.goNextVisitedNode
@g.commander_command('go-forward')
def goNextVisitedNode(self, event=None):
"""Select the next visited node."""
c = self
p = c.nodeHistory.goNext()
if p:
c.nodeHistory.skipBeadUpdate = True
try:
c.selectPosition(p)
finally:
c.nodeHistory.skipBeadUpdate = False
c.redraw_after_select(p)
#@+node:ekr.20031218072017.1627: *3* c_oc.goPrevVisitedNode
@g.commander_command('go-back')
def goPrevVisitedNode(self, event=None):
"""Select the previously visited node."""
c = self
p = c.nodeHistory.goPrev()
if p:
c.nodeHistory.skipBeadUpdate = True
try:
c.selectPosition(p)
finally:
c.nodeHistory.skipBeadUpdate = False
c.redraw_after_select(p)
#@+node:ekr.20031218072017.2914: *3* c_oc.goToFirstNode
@g.commander_command('goto-first-node')
def goToFirstNode(self, event=None):
"""Select the first node of the entire outline."""
c = self
p = c.rootPosition()
c.expandOnlyAncestorsOfNode(p=p)
c.redraw()
#@+node:ekr.20051012092453: *3* c_oc.goToFirstSibling
@g.commander_command('goto-first-sibling')
def goToFirstSibling(self, event=None):
"""Select the first sibling of the selected node."""
c = self; p = c.p
if p.hasBack():
while p.hasBack():
p.moveToBack()
c.treeSelectHelper(p)
#@+node:ekr.20070615070925: *3* c_oc.goToFirstVisibleNode
@g.commander_command('goto-first-visible-node')
def goToFirstVisibleNode(self, event=None):
"""Select the first visible node of the selected chapter or hoist."""
c = self
p = c.firstVisible()
if p:
c.expandOnlyAncestorsOfNode(p=p)
c.redraw()
#@+node:ekr.20031218072017.2915: *3* c_oc.goToLastNode
@g.commander_command('goto-last-node')
def goToLastNode(self, event=None):
"""Select the last node in the entire tree."""
c = self
p = c.rootPosition()
while p and p.hasThreadNext():
p.moveToThreadNext()
c.expandOnlyAncestorsOfNode(p=p)
c.redraw()
#@+node:ekr.20051012092847.1: *3* c_oc.goToLastSibling
@g.commander_command('goto-last-sibling')
def goToLastSibling(self, event=None):
"""Select the last sibling of the selected node."""
c = self; p = c.p
if p.hasNext():
while p.hasNext():
p.moveToNext()
c.treeSelectHelper(p)
#@+node:ekr.20050711153537: *3* c_oc.goToLastVisibleNode
@g.commander_command('goto-last-visible-node')
def goToLastVisibleNode(self, event=None):
"""Select the last visible node of selected chapter or hoist."""
c = self
p = c.lastVisible()
if p:
c.expandOnlyAncestorsOfNode(p=p)
c.redraw()
#@+node:ekr.20031218072017.2916: *3* c_oc.goToNextClone
@g.commander_command('goto-next-clone')
def goToNextClone(self, event=None):
"""
Select the next node that is a clone of the selected node.
If the selected node is not a clone, do find-next-clone.
"""
c, p = self, self.p
cc = c.chapterController; p = c.p
if not p:
return
if not p.isCloned():
c.findNextClone()
return
v = p.v
p.moveToThreadNext()
wrapped = False
while 1:
if p and p.v == v:
break
elif p:
p.moveToThreadNext()
elif wrapped:
break
else:
wrapped = True
p = c.rootPosition()
if p:
c.expandAllAncestors(p)
if cc:
# #252: goto-next clone activate chapter.
chapter = cc.getSelectedChapter()
old_name = chapter and chapter.name
new_name = cc.findChapterNameForPosition(p)
if new_name == old_name:
# Always do a full redraw.
c.redraw(p)
else:
if 1:
cc.selectChapterByName(new_name)
c.redraw(p)
else: # Old code.
c.selectPosition(p)
cc.selectChapterByName(new_name)
else:
# Always do a full redraw.
c.redraw(p)
else:
g.blue('done')
#@+node:ekr.20031218072017.2917: *3* c_oc.goToNextDirtyHeadline
@g.commander_command('goto-next-changed')
def goToNextDirtyHeadline(self, event=None):
"""Select the node that is marked as changed."""
c = self; p = c.p
if not p: return
p.moveToThreadNext()
wrapped = False
while 1:
if p and p.isDirty():
break
elif p:
p.moveToThreadNext()
elif wrapped:
break
else:
wrapped = True
p = c.rootPosition()
if not p: g.blue('done')
c.treeSelectHelper(p) # Sets focus.
#@+node:ekr.20031218072017.2918: *3* c_oc.goToNextMarkedHeadline
@g.commander_command('goto-next-marked')
def goToNextMarkedHeadline(self, event=None):
"""Select the next marked node."""
c = self; p = c.p
if not p: return
p.moveToThreadNext()
wrapped = False
while 1:
if p and p.isMarked():
break
elif p:
p.moveToThreadNext()
elif wrapped:
break
else:
wrapped = True
p = c.rootPosition()
if not p: g.blue('done')
c.treeSelectHelper(p) # Sets focus.
#@+node:ekr.20031218072017.2919: *3* c_oc.goToNextSibling
@g.commander_command('goto-next-sibling')
def goToNextSibling(self, event=None):
"""Select the next sibling of the selected node."""
c = self; p = c.p
c.treeSelectHelper(p and p.next())
#@+node:ekr.20031218072017.2920: *3* c_oc.goToParent
@g.commander_command('goto-parent')
def goToParent(self, event=None):
"""Select the parent of the selected node."""
c = self; p = c.p
c.treeSelectHelper(p and p.parent())
#@+node:ekr.20190211104913.1: *3* c_oc.goToPrevMarkedHeadline
@g.commander_command('goto-prev-marked')
def goToPrevMarkedHeadline(self, event=None):
"""Select the next marked node."""
c = self; p = c.p
if not p: return
p.moveToThreadBack()
wrapped = False
while 1:
if p and p.isMarked():
break
elif p:
p.moveToThreadBack()
elif wrapped:
break
else:
wrapped = True
p = c.rootPosition()
if not p: g.blue('done')
c.treeSelectHelper(p) # Sets focus.
#@+node:ekr.20031218072017.2921: *3* c_oc.goToPrevSibling
@g.commander_command('goto-prev-sibling')
def goToPrevSibling(self, event=None):
"""Select the previous sibling of the selected node."""
c = self; p = c.p
c.treeSelectHelper(p and p.back())
#@+node:ekr.20031218072017.2993: *3* c_oc.selectThreadBack
@g.commander_command('goto-prev-node')
def selectThreadBack(self, event=None):
"""Select the node preceding the selected node in outline order."""
c = self; p = c.p
if not p: return
p.moveToThreadBack()
c.treeSelectHelper(p)
#@+node:ekr.20031218072017.2994: *3* c_oc.selectThreadNext
@g.commander_command('goto-next-node')
def selectThreadNext(self, event=None):
"""Select the node following the selected node in outline order."""
c = self; p = c.p
if not p: return
p.moveToThreadNext()
c.treeSelectHelper(p)
#@+node:ekr.20031218072017.2995: *3* c_oc.selectVisBack
@g.commander_command('goto-prev-visible')
def selectVisBack(self, event=None):
"""Select the visible node preceding the presently selected node."""
# This has an up arrow for a control key.
c, p = self, self.p
if not p:
return
if c.canSelectVisBack():
p.moveToVisBack(c)
c.treeSelectHelper(p)
else:
c.endEditing() # 2011/05/28: A special case.
#@+node:ekr.20031218072017.2996: *3* c_oc.selectVisNext
@g.commander_command('goto-next-visible')
def selectVisNext(self, event=None):
"""Select the visible node following the presently selected node."""
c, p = self, self.p
if not p:
return
if c.canSelectVisNext():
p.moveToVisNext(c)
c.treeSelectHelper(p)
else:
c.endEditing() # 2011/05/28: A special case.
#@+node:ekr.20031218072017.2028: ** c_oc.hoist/dehoist/clearAllHoists
#@+node:ekr.20120308061112.9865: *3* c_oc.deHoist
@g.commander_command('de-hoist')
@g.commander_command('dehoist')
def dehoist(self, event=None):
"""Undo a previous hoist of an outline."""
c = self
if not c.p or not c.hoistStack:
return
# Don't de-hoist an @chapter node.
if c.chapterController and c.p.h.startswith('@chapter '):
if not g.unitTesting:
g.es('can not de-hoist an @chapter node.', color='blue')
return
bunch = c.hoistStack.pop()
p = bunch.p
if bunch.expanded: p.expand()
else: p.contract()
c.setCurrentPosition(p)
c.redraw()
c.frame.clearStatusLine()
c.frame.putStatusLine("De-Hoist: " + p.h)
c.undoer.afterDehoist(p, 'DeHoist')
g.doHook('hoist-changed', c=c)
#@+node:ekr.20120308061112.9866: *3* c_oc.clearAllHoists
@g.commander_command('clear-all-hoists')
def clearAllHoists(self, event=None):
"""Undo a previous hoist of an outline."""
c = self
c.hoistStack = []
c.frame.putStatusLine("Hoists cleared")
g.doHook('hoist-changed', c=c)
#@+node:ekr.20120308061112.9867: *3* c_oc.hoist
@g.commander_command('hoist')
def hoist(self, event=None):
"""Make only the selected outline visible."""
c = self
p = c.p
if not p:
return
# Don't hoist an @chapter node.
if c.chapterController and p.h.startswith('@chapter '):
if not g.unitTesting:
g.es('can not hoist an @chapter node.', color='blue')
return
# Remember the expansion state.
bunch = g.Bunch(p=p.copy(), expanded=p.isExpanded())
c.hoistStack.append(bunch)
p.expand()
c.redraw(p)
c.frame.clearStatusLine()
c.frame.putStatusLine("Hoist: " + p.h)
c.undoer.afterHoist(p, 'Hoist')
g.doHook('hoist-changed', c=c)
#@+node:ekr.20031218072017.1759: ** c_oc.Insert, Delete & Clone commands
#@+node:ekr.20031218072017.1762: *3* c_oc.clone
@g.commander_command('clone-node')
def clone(self, event=None):
"""Create a clone of the selected outline."""
c = self; u = c.undoer; p = c.p
if not p:
return None
undoData = c.undoer.beforeCloneNode(p)
c.endEditing() # Capture any changes to the headline.
clone = p.clone()
clone.setDirty()
c.setChanged()
if c.validateOutline():
u.afterCloneNode(clone, 'Clone Node', undoData)
c.redraw(clone)
c.treeWantsFocus()
return clone # For mod_labels and chapters plugins.
clone.doDelete()
c.setCurrentPosition(p)
return None
#@+node:ekr.20150630152607.1: *3* c_oc.cloneToAtSpot
@g.commander_command('clone-to-at-spot')
def cloneToAtSpot(self, event=None):
"""
Create a clone of the selected node and move it to the last @spot node
of the outline. Create the @spot node if necessary.
"""
c = self; u = c.undoer; p = c.p
if not p:
return
# 2015/12/27: fix bug 220: do not allow clone-to-at-spot on @spot node.
if p.h.startswith('@spot'):
g.es("can not clone @spot node", color='red')
return
last_spot = None
for p2 in c.all_positions():
if g.match_word(p2.h, 0, '@spot'):
last_spot = p2.copy()
if not last_spot:
last = c.lastTopLevel()
last_spot = last.insertAfter()
last_spot.h = '@spot'
undoData = c.undoer.beforeCloneNode(p)
c.endEditing() # Capture any changes to the headline.
clone = p.copy()
clone._linkAsNthChild(last_spot, n=last_spot.numberOfChildren())
clone.setDirty()
c.setChanged()
if c.validateOutline():
u.afterCloneNode(clone, 'Clone Node', undoData)
c.contractAllHeadlines()
c.redraw()
c.selectPosition(clone)
else:
clone.doDelete()
c.setCurrentPosition(p)
#@+node:ekr.20141023154408.5: *3* c_oc.cloneToLastNode
@g.commander_command('clone-node-to-last-node')
def cloneToLastNode(self, event=None):
"""
Clone the selected node and move it to the last node.
Do *not* change the selected node.
"""
c, p, u = self, self.p, self.undoer
if not p: return
prev = p.copy()
undoData = c.undoer.beforeCloneNode(p)
c.endEditing() # Capture any changes to the headline.
clone = p.clone()
last = c.rootPosition()
while last and last.hasNext():
last.moveToNext()
clone.moveAfter(last)
clone.setDirty()
c.setChanged()
u.afterCloneNode(clone, 'Clone Node To Last', undoData)
c.redraw(prev)
# return clone # For mod_labels and chapters plugins.
#@+node:ekr.20031218072017.1193: *3* c_oc.deleteOutline
@g.commander_command('delete-node')
def deleteOutline(self, event=None, op_name="Delete Node"):
"""Deletes the selected outline."""
c, u = self, self.undoer
p = c.p
if not p: return
c.endEditing() # Make sure we capture the headline for Undo.
if False: # c.config.getBool('select-next-after-delete'):
# #721: Optionally select next node after delete.
if p.hasVisNext(c): newNode = p.visNext(c)
elif p.hasParent(): newNode = p.parent()
else: newNode = p.back() # _not_ p.visBack(): we are at the top level.
else:
# Legacy: select previous node if possible.
if p.hasVisBack(c): newNode = p.visBack(c)
else: newNode = p.next() # _not_ p.visNext(): we are at the top level.
if not newNode: return
undoData = u.beforeDeleteNode(p)
p.setDirty()
p.doDelete(newNode)
c.setChanged()
u.afterDeleteNode(newNode, op_name, undoData)
c.redraw(newNode)
c.validateOutline()
#@+node:ekr.20071005173203.1: *3* c_oc.insertChild
@g.commander_command('insert-child')
def insertChild(self, event=None):
"""Insert a node after the presently selected node."""
c = self
return c.insertHeadline(event=event, op_name='Insert Child', as_child=True)
#@+node:ekr.20031218072017.1761: *3* c_oc.insertHeadline (insert-*)
@g.commander_command('insert-node')
def insertHeadline(self, event=None, op_name="Insert Node", as_child=False):
"""Insert a node after the presently selected node."""
c = self
# Fix #600.
return insertHeadlineHelper(c, event=event, as_child=as_child)
@g.commander_command('insert-as-first-child')
def insertNodeAsFirstChild(self, event=None):
"""Insert a node as the last child of the previous node."""
c = self
return insertHeadlineHelper(c, event=event, as_first_child=True)
@g.commander_command('insert-as-last-child')
def insertNodeAsLastChild(self, event=None):
"""Insert a node as the last child of the previous node."""
c = self
return insertHeadlineHelper(c, event=event, as_last_child=True)
#@+node:ekr.20171124091846.1: *4* def insertHeadlineHelper
def insertHeadlineHelper(c,
event=None,
op_name="Insert Node",
as_child=False,
as_first_child=False,
as_last_child=False,
):
"""Insert a node after the presently selected node."""
u = c.undoer
current = c.p
if not current:
return None
c.endEditing()
undoData = c.undoer.beforeInsertNode(current)
if as_first_child:
p = current.insertAsNthChild(0)
elif as_last_child:
p = current.insertAsLastChild()
elif (
as_child or
(current.hasChildren() and current.isExpanded()) or
(c.hoistStack and current == c.hoistStack[-1].p)
):
# Make sure the new node is visible when hoisting.
if c.config.getBool('insert-new-nodes-at-end'):
p = current.insertAsLastChild()
else:
p = current.insertAsNthChild(0)
else:
p = current.insertAfter()
g.doHook('create-node', c=c, p=p)
p.setDirty()
c.setChanged()
u.afterInsertNode(p, op_name, undoData)
c.redrawAndEdit(p, selectAll=True)
return p
#@+node:ekr.20130922133218.11540: *3* c_oc.insertHeadlineBefore
@g.commander_command('insert-node-before')
def insertHeadlineBefore(self, event=None):
"""Insert a node before the presently selected node."""
c, current, u = self, self.p, self.undoer
op_name = 'Insert Node Before'
if not current:
return None
# Can not insert before the base of a hoist.
if c.hoistStack and current == c.hoistStack[-1].p:
g.warning('can not insert a node before the base of a hoist')
return None
c.endEditing()
undoData = u.beforeInsertNode(current)
p = current.insertBefore()
g.doHook('create-node', c=c, p=p)
p.setDirty()
c.setChanged()
u.afterInsertNode(p, op_name, undoData)
c.redrawAndEdit(p, selectAll=True)
return p
#@+node:ekr.20031218072017.2922: ** c_oc.Mark commands
#@+node:ekr.20090905110447.6098: *3* c_oc.cloneMarked
@g.commander_command('clone-marked-nodes')
def cloneMarked(self, event=None):
"""Clone all marked nodes as children of a new node."""
c = self; u = c.undoer; p1 = c.p.copy()
# Create a new node to hold clones.
parent = p1.insertAfter()
parent.h = 'Clones of marked nodes'
cloned, n, p = [], 0, c.rootPosition()
while p:
# Careful: don't clone already-cloned nodes.
if p == parent:
p.moveToNodeAfterTree()
elif p.isMarked() and p.v not in cloned:
cloned.append(p.v)
if 0: # old code
# Calling p.clone would cause problems
p.clone().moveToLastChildOf(parent)
else: # New code.
# Create the clone directly as a child of parent.
p2 = p.copy()
n = parent.numberOfChildren()
p2._linkAsNthChild(parent, n)
p.moveToNodeAfterTree()
n += 1
else:
p.moveToThreadNext()
if n:
c.setChanged()
parent.expand()
c.selectPosition(parent)
u.afterCloneMarkedNodes(p1)
else:
parent.doDelete()
c.selectPosition(p1)
if not g.unitTesting:
g.blue(f"cloned {n} nodes")
c.redraw()
#@+node:ekr.20160502090456.1: *3* c_oc.copyMarked
@g.commander_command('copy-marked-nodes')
def copyMarked(self, event=None):
"""Copy all marked nodes as children of a new node."""
c = self; u = c.undoer; p1 = c.p.copy()
# Create a new node to hold clones.
parent = p1.insertAfter()
parent.h = 'Copies of marked nodes'
copied, n, p = [], 0, c.rootPosition()
while p:
# Careful: don't clone already-cloned nodes.
if p == parent:
p.moveToNodeAfterTree()
elif p.isMarked() and p.v not in copied:
copied.append(p.v)
p2 = p.copyWithNewVnodes(copyMarked=True)
p2._linkAsNthChild(parent, n)
p.moveToNodeAfterTree()
n += 1
else:
p.moveToThreadNext()
if n:
c.setChanged()
parent.expand()
c.selectPosition(parent)
u.afterCopyMarkedNodes(p1)
else:
parent.doDelete()
c.selectPosition(p1)
if not g.unitTesting:
g.blue(f"copied {n} nodes")
c.redraw()
#@+node:ekr.20111005081134.15540: *3* c_oc.deleteMarked
@g.commander_command('delete-marked-nodes')
def deleteMarked(self, event=None):
"""Delete all marked nodes."""
c = self; u = c.undoer; p1 = c.p.copy()
undo_data, p = [], c.rootPosition()
while p:
if p.isMarked():
undo_data.append(p.copy())
next = p.positionAfterDeletedTree()
p.doDelete()
p = next
else:
p.moveToThreadNext()
if undo_data:
u.afterDeleteMarkedNodes(undo_data, p1)
if not g.unitTesting:
g.blue(f"deleted {len(undo_data)} nodes")
c.setChanged()
# Don't even *think* about restoring the old position.
c.contractAllHeadlines()
c.selectPosition(c.rootPosition())
c.redraw()
#@+node:ekr.20111005081134.15539: *3* c_oc.moveMarked & helper
@g.commander_command('move-marked-nodes')
def moveMarked(self, event=None):
"""
Move all marked nodes as children of a new node.
This command is not undoable.
Consider using clone-marked-nodes, followed by copy/paste instead.
"""
c = self
p1 = c.p.copy()
# Check for marks.
for v in c.all_unique_nodes():
if v.isMarked():
break
else:
g.warning('no marked nodes')
return
result = g.app.gui.runAskYesNoDialog(c,
'Move Marked Nodes?',
message='move-marked-nodes is not undoable\nProceed?',
)
if result == 'no':
return
# Create a new *root* node to hold the moved nodes.
# This node's position remains stable while other nodes move.
parent = createMoveMarkedNode(c)
assert not parent.isMarked()
moved = []
p = c.rootPosition()
while p:
assert parent == c.rootPosition()
# Careful: don't move already-moved nodes.
if p.isMarked() and not parent.isAncestorOf(p):
moved.append(p.copy())
next = p.positionAfterDeletedTree()
p.moveToLastChildOf(parent)
# This does not change parent's position.
p = next
else:
p.moveToThreadNext()
if moved:
# Find a position p2 outside of parent's tree with p2.v == p1.v.
# Such a position may not exist.
p2 = c.rootPosition()
while p2:
if p2 == parent:
p2.moveToNodeAfterTree()
elif p2.v == p1.v:
break
else:
p2.moveToThreadNext()
else:
# Not found. Move to last top-level.
p2 = c.lastTopLevel()
parent.moveAfter(p2)
# u.afterMoveMarkedNodes(moved, p1)
if not g.unitTesting:
g.blue(f"moved {len(moved)} nodes")
c.setChanged()
# c.contractAllHeadlines()
# Causes problems when in a chapter.
c.selectPosition(parent)
c.redraw()
#@+node:ekr.20111005081134.15543: *4* def createMoveMarkedNode
def createMoveMarkedNode(c):
oldRoot = c.rootPosition()
p = oldRoot.insertAfter()
p.moveToRoot()
c.setHeadString(p, 'Moved marked nodes')
return p
#@+node:ekr.20031218072017.2923: *3* c_oc.markChangedHeadlines
@g.commander_command('mark-changed-items')
def markChangedHeadlines(self, event=None):
"""Mark all nodes that have been changed."""
c = self; u = c.undoer; undoType = 'Mark Changed'
current = c.p
c.endEditing()
u.beforeChangeGroup(current, undoType)
for p in c.all_unique_positions():
if p.isDirty() and not p.isMarked():
bunch = u.beforeMark(p, undoType)
# c.setMarked calls a hook.
c.setMarked(p)
p.setDirty()
c.setChanged()
u.afterMark(p, undoType, bunch)
u.afterChangeGroup(current, undoType)
if not g.unitTesting:
g.blue('done')
c.redraw_after_icons_changed()
#@+node:ekr.20031218072017.2924: *3* c_oc.markChangedRoots
def markChangedRoots(self, event=None):
"""Mark all changed @root nodes."""
c = self; u = c.undoer; undoType = 'Mark Changed'
current = c.p
c.endEditing()
u.beforeChangeGroup(current, undoType)
for p in c.all_unique_positions():
if p.isDirty() and not p.isMarked():
s = p.b
flag, i = g.is_special(s, "@root")
if flag:
bunch = u.beforeMark(p, undoType)
c.setMarked(p) # Calls a hook.
p.setDirty()
c.setChanged()
u.afterMark(p, undoType, bunch)
u.afterChangeGroup(current, undoType)
if not g.unitTesting:
g.blue('done')
c.redraw_after_icons_changed()
#@+node:ekr.20031218072017.2928: *3* c_oc.markHeadline
@g.commander_command('mark')
def markHeadline(self, event=None):
"""Toggle the mark of the selected node."""
c = self; u = c.undoer; p = c.p
if not p: return
c.endEditing()
undoType = 'Unmark' if p.isMarked() else 'Mark'
bunch = u.beforeMark(p, undoType)
# c.set/clearMarked call a hook.
if p.isMarked():
c.clearMarked(p)
else:
c.setMarked(p)
p.setDirty()
c.setChanged()
u.afterMark(p, undoType, bunch)
c.redraw_after_icons_changed()
#@+node:ekr.20031218072017.2929: *3* c_oc.markSubheads
@g.commander_command('mark-subheads')
def markSubheads(self, event=None):
"""Mark all children of the selected node as changed."""
c = self; u = c.undoer; undoType = 'Mark Subheads'
current = c.p
if not current: return
c.endEditing()
u.beforeChangeGroup(current, undoType)
for p in current.children():
if not p.isMarked():
bunch = u.beforeMark(p, undoType)
c.setMarked(p) # Calls a hook.
p.setDirty()
c.setChanged()
u.afterMark(p, undoType, bunch)
u.afterChangeGroup(current, undoType)
c.redraw_after_icons_changed()
#@+node:ekr.20031218072017.2930: *3* c_oc.unmarkAll
@g.commander_command('unmark-all')
def unmarkAll(self, event=None):
"""Unmark all nodes in the entire outline."""
c = self; u = c.undoer; undoType = 'Unmark All'
current = c.p
if not current: return
c.endEditing()
u.beforeChangeGroup(current, undoType)
changed = False
p = None # To keep pylint happy.
for p in c.all_unique_positions():
if p.isMarked():
bunch = u.beforeMark(p, undoType)
# c.clearMarked(p) # Very slow: calls a hook.
p.v.clearMarked()
p.setDirty()
u.afterMark(p, undoType, bunch)
changed = True
if changed:
g.doHook("clear-all-marks", c=c, p=p)
c.setChanged()
u.afterChangeGroup(current, undoType)
c.redraw_after_icons_changed()
#@+node:ekr.20031218072017.1766: ** c_oc.Move commands
#@+node:ekr.20031218072017.1767: *3* c_oc.demote
@g.commander_command('demote')
def demote(self, event=None):
"""Make all following siblings children of the selected node."""
c = self; u = c.undoer
p = c.p
if not p or not p.hasNext():
c.treeFocusHelper()
return
# Make sure all the moves will be valid.
next = p.next()
while next:
if not c.checkMoveWithParentWithWarning(next, p, True):
c.treeFocusHelper()
return
next.moveToNext()
c.endEditing()
parent_v = p._parentVnode()
n = p.childIndex()
followingSibs = parent_v.children[n + 1 :]
# Remove the moved nodes from the parent's children.
parent_v.children = parent_v.children[: n + 1]
# Add the moved nodes to p's children
p.v.children.extend(followingSibs)
# Adjust the parent links in the moved nodes.
# There is no need to adjust descendant links.
for child in followingSibs:
child.parents.remove(parent_v)
child.parents.append(p.v)
p.expand()
p.setDirty()
c.setChanged()
u.afterDemote(p, followingSibs)
c.redraw(p)
c.updateSyntaxColorer(p) # Moving can change syntax coloring.
#@+node:ekr.20031218072017.1768: *3* c_oc.moveOutlineDown
@g.commander_command('move-outline-down')
def moveOutlineDown(self, event=None):
"""Move the selected node down."""
# Moving down is more tricky than moving up because we can't
# move p to be a child of itself.
#
# An important optimization:
# we don't have to call checkMoveWithParentWithWarning() if the parent of
# the moved node remains the same.
c = self; u = c.undoer; p = c.p
if not p:
return
if not c.canMoveOutlineDown():
if c.hoistStack: cantMoveMessage(c)
c.treeFocusHelper()
return
parent = p.parent()
next = p.visNext(c)
while next and p.isAncestorOf(next):
next = next.visNext(c)
if not next:
if c.hoistStack: cantMoveMessage(c)
c.treeFocusHelper()
return
c.endEditing()
undoData = u.beforeMoveNode(p)
#@+<< Move p down & set moved if successful >>
#@+node:ekr.20031218072017.1769: *4* << Move p down & set moved if successful >>
if next.hasChildren() and next.isExpanded():
# Attempt to move p to the first child of next.
moved = c.checkMoveWithParentWithWarning(p, next, True)
if moved:
p.setDirty()
p.moveToNthChildOf(next, 0)
else:
# Attempt to move p after next.
moved = c.checkMoveWithParentWithWarning(p, next.parent(), True)
if moved:
p.setDirty()
p.moveAfter(next)
# Patch by nh2: 0004-Add-bool-collapse_nodes_after_move-option.patch
if (
c.collapse_nodes_after_move
and moved and c.sparse_move
and parent and not parent.isAncestorOf(p)
):
# New in Leo 4.4.2: contract the old parent if it is no longer the parent of p.
parent.contract()
#@-<< Move p down & set moved if successful >>
if moved:
p.setDirty()
c.setChanged()
u.afterMoveNode(p, 'Move Down', undoData)
c.redraw(p)
c.updateSyntaxColorer(p) # Moving can change syntax coloring.
#@+node:ekr.20031218072017.1770: *3* c_oc.moveOutlineLeft
@g.commander_command('move-outline-left')
def moveOutlineLeft(self, event=None):
"""Move the selected node left if possible."""
c = self; u = c.undoer; p = c.p
if not p: return
if not c.canMoveOutlineLeft():
if c.hoistStack: cantMoveMessage(c)
c.treeFocusHelper()
return
if not p.hasParent():
c.treeFocusHelper()
return
parent = p.parent()
c.endEditing()
undoData = u.beforeMoveNode(p)
p.setDirty()
p.moveAfter(parent)
p.setDirty()
c.setChanged()
u.afterMoveNode(p, 'Move Left', undoData)
# Patch by nh2: 0004-Add-bool-collapse_nodes_after_move-option.patch
if c.collapse_nodes_after_move and c.sparse_move: # New in Leo 4.4.2
parent.contract()
c.redraw(p)
c.recolor() # Moving can change syntax coloring.
#@+node:ekr.20031218072017.1771: *3* c_oc.moveOutlineRight
@g.commander_command('move-outline-right')
def moveOutlineRight(self, event=None):
"""Move the selected node right if possible."""
c = self; u = c.undoer; p = c.p
if not p: return
if not c.canMoveOutlineRight(): # 11/4/03: Support for hoist.
if c.hoistStack: cantMoveMessage(c)
c.treeFocusHelper()
return
back = p.back()
if not back:
c.treeFocusHelper()
return
if not c.checkMoveWithParentWithWarning(p, back, True):
c.treeFocusHelper()
return
c.endEditing()
undoData = u.beforeMoveNode(p)
p.setDirty()
n = back.numberOfChildren()
p.moveToNthChildOf(back, n)
p.setDirty()
u.afterMoveNode(p, 'Move Right', undoData)
c.redraw(p)
c.recolor()
#@+node:ekr.20031218072017.1772: *3* c_oc.moveOutlineUp
@g.commander_command('move-outline-up')
def moveOutlineUp(self, event=None):
"""Move the selected node up if possible."""
c = self; u = c.undoer; p = c.p
if not p: return
if not c.canMoveOutlineUp(): # Support for hoist.
if c.hoistStack: cantMoveMessage(c)
c.treeFocusHelper()
return
back = p.visBack(c)
if not back:
return
back2 = back.visBack(c)
c.endEditing()
undoData = u.beforeMoveNode(p)
moved = False
#@+<< Move p up >>
#@+node:ekr.20031218072017.1773: *4* << Move p up >>
parent = p.parent()
if not back2:
if c.hoistStack: # hoist or chapter.
limit, limitIsVisible = c.visLimit()
assert limit
if limitIsVisible:
# canMoveOutlineUp should have caught this.
g.trace('can not happen. In hoist')
else:
moved = True
p.setDirty()
p.moveToFirstChildOf(limit)
else:
# p will be the new root node
p.setDirty()
p.moveToRoot()
moved = True
elif back2.hasChildren() and back2.isExpanded():
if c.checkMoveWithParentWithWarning(p, back2, True):
moved = True
p.setDirty()
p.moveToNthChildOf(back2, 0)
else:
if c.checkMoveWithParentWithWarning(p, back2.parent(), True):
moved = True
p.setDirty()
p.moveAfter(back2)
# Patch by nh2: 0004-Add-bool-collapse_nodes_after_move-option.patch
if (
c.collapse_nodes_after_move
and moved and c.sparse_move
and parent and not parent.isAncestorOf(p)
):
# New in Leo 4.4.2: contract the old parent if it is no longer the parent of p.
parent.contract()
#@-<< Move p up >>
if moved:
p.setDirty()
c.setChanged()
u.afterMoveNode(p, 'Move Right', undoData)
c.redraw(p)
c.updateSyntaxColorer(p) # Moving can change syntax coloring.
#@+node:ekr.20031218072017.1774: *3* c_oc.promote
@g.commander_command('promote')
def promote(self, event=None, undoFlag=True, redrawFlag=True):
"""Make all children of the selected nodes siblings of the selected node."""
c = self; u = c.undoer; p = c.p
if not p or not p.hasChildren():
c.treeFocusHelper()
return
c.endEditing()
children = p.v.children # First, for undo.
p.promote()
c.setChanged()
if undoFlag:
p.setDirty()
u.afterPromote(p, children)
if redrawFlag:
c.redraw(p)
c.updateSyntaxColorer(p) # Moving can change syntax coloring.
#@+node:ekr.20071213185710: *3* c_oc.toggleSparseMove
@g.commander_command('toggle-sparse-move')
def toggleSparseMove(self, event=None):
"""Toggle whether moves collapse the outline."""
c = self
c.sparse_move = not c.sparse_move
if not g.unitTesting:
g.blue(f"sparse-move: {c.sparse_move}")
#@+node:ekr.20080425060424.1: ** c_oc.Sort commands
#@+node:ekr.20050415134809: *3* c_oc.sortChildren
@g.commander_command('sort-children')
def sortChildren(self, event=None, key=None, reverse=False):
"""Sort the children of a node."""
# This method no longer supports the 'cmp' keyword arg.
c = self; p = c.p
if p and p.hasChildren():
c.sortSiblings(p=p.firstChild(), sortChildren=True, key=key, reverse=reverse)
#@+node:ekr.20050415134809.1: *3* c_oc.sortSiblings
@g.commander_command('sort-siblings')
def sortSiblings(self, event=None,
# cmp keyword is no longer supported.
key=None,
p=None,
sortChildren=False,
reverse=False
):
"""Sort the siblings of a node."""
c = self; u = c.undoer
if not p: p = c.p
if not p: return
c.endEditing()
undoType = 'Sort Children' if sortChildren else 'Sort Siblings'
parent_v = p._parentVnode()
oldChildren = parent_v.children[:]
newChildren = parent_v.children[:]
if key is None:
def lowerKey(self):
return (self.h.lower())
key = lowerKey
newChildren.sort(key=key, reverse=reverse)
if oldChildren == newChildren:
return
# 2010/01/20. Fix bug 510148.
c.setChanged()
bunch = u.beforeSort(p, undoType, oldChildren, newChildren, sortChildren)
parent_v.children = newChildren
u.afterSort(p, bunch)
# Sorting destroys position p, and possibly the root position.
p = c.setPositionAfterSort(sortChildren)
if p.parent():
p.parent().setDirty()
c.redraw(p)
#@+node:ekr.20070420092425: ** def cantMoveMessage
def cantMoveMessage(c):
h = c.rootPosition().h
kind = 'chapter' if h.startswith('@chapter') else 'hoist'
g.warning("can't move node out of", kind)
#@+node:ekr.20180201040936.1: ** count-children
@g.command('count-children')
def count_children(event=None):
c = event and event.get('c')
if c:
g.es_print(f"{c.p.numberOfChildren()} children")
#@-others
#@-leo
| 34.114873
| 91
| 0.629816
|
98e9c33334ef004ee1dc5561ff2459a9fce4fa31
| 1,884
|
py
|
Python
|
tools/download.py
|
dkratzert/FinalCif
|
07ca23dbb4e7439b108a906521a118cdb876d97e
|
[
"Beerware"
] | 13
|
2020-01-14T16:23:48.000Z
|
2022-02-16T18:02:08.000Z
|
tools/download.py
|
dkratzert/FinalCif
|
07ca23dbb4e7439b108a906521a118cdb876d97e
|
[
"Beerware"
] | 24
|
2021-04-21T05:30:42.000Z
|
2022-03-31T20:07:29.000Z
|
tools/download.py
|
dkratzert/FinalCif
|
07ca23dbb4e7439b108a906521a118cdb876d97e
|
[
"Beerware"
] | 1
|
2021-08-09T16:48:33.000Z
|
2021-08-09T16:48:33.000Z
|
import sys
import requests
from PyQt5.QtCore import QThread, pyqtSignal
from tools.version import VERSION
class MyDownloader(QThread):
progress = pyqtSignal(str)
failed = pyqtSignal(int)
finished = pyqtSignal(bytes)
def __init__(self, parent, url: str):
super().__init__(parent)
self.url = url
def run(self):
self.download(self.url)
def print_status(self, status: str) -> None:
print(status)
def failed_to_download(self, status_code: int):
print('Failed to download: {}'.format(self.url))
print('HTTP status was {}'.format(status_code))
def download(self, full_url: str, user_agent=None) -> bytes:
user_agent = user_agent if user_agent else 'FinalCif v{}'.format(VERSION)
headers = {
'User-Agent': user_agent,
}
# noinspection PyUnresolvedReferences
# self.progress.emit('Starting download: {}'.format(full_url))
response = requests.get(full_url, stream=True, headers=headers)
if response.status_code != 200:
# noinspection PyUnresolvedReferences
self.failed.emit(response.status_code)
# noinspection PyUnresolvedReferences
self.finished.emit(b'')
return b''
# noinspection PyUnresolvedReferences
self.finished.emit(response.content)
return response.content
if __name__ == "__main__":
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
w = QWidget()
w.show()
def foo(bar: bytes):
print(bar.decode('ascii'))
upd = MyDownloader(None, "https://dkratzert.de/files/finalcif/version.txt")
upd.finished.connect(foo)
upd.failed.connect(upd.failed_to_download)
upd.progress.connect(upd.print_status)
upd.start()
sys.exit(app.exec_())
| 28.545455
| 81
| 0.654459
|
3680312fbb6ced5fd0870a2c981abc389f11dc15
| 257
|
py
|
Python
|
BOJ9461.py
|
INYEONGKIM/BOJ
|
5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc
|
[
"MIT"
] | 2
|
2019-03-05T15:42:46.000Z
|
2019-07-24T15:52:36.000Z
|
BOJ9461.py
|
INYEONGKIM/BOJ
|
5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc
|
[
"MIT"
] | null | null | null |
BOJ9461.py
|
INYEONGKIM/BOJ
|
5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc
|
[
"MIT"
] | null | null | null |
res=""
p=[1]*100
p[3]=2;p[4]=2;p[5]=3;p[6]=4;p[7]=5;p[8]=7;p[9]=9
for i in range(10, 100):
p[i] = p[i-1] + p[i-5]
for _ in range(int(__import__('sys').stdin.readline())):
res += f'{p[int(__import__("sys").stdin.readline())-1]}\n'
print(res.strip())
| 28.555556
| 62
| 0.552529
|
ed97cbd51cef903155f6a2f10eda555b0a620b5e
| 176
|
py
|
Python
|
tests/test_cipher_xs2428_mds.py
|
QMSS-G5072-2021/cipher_Shan_Kimberly
|
ec258e785048f43b95fdc2297ae269c4217ee462
|
[
"MIT"
] | null | null | null |
tests/test_cipher_xs2428_mds.py
|
QMSS-G5072-2021/cipher_Shan_Kimberly
|
ec258e785048f43b95fdc2297ae269c4217ee462
|
[
"MIT"
] | null | null | null |
tests/test_cipher_xs2428_mds.py
|
QMSS-G5072-2021/cipher_Shan_Kimberly
|
ec258e785048f43b95fdc2297ae269c4217ee462
|
[
"MIT"
] | null | null | null |
from cipher_xs2428_mds import cipher_xs2428_mds
def test_cipher():
expected = 'Bqqmf'
actual = cipher_xs2428_mds.cipher('Apple', 1, True)
assert actual == expected
| 29.333333
| 55
| 0.738636
|
cff78eb03f96b757913b6202c1a90195680a6fc4
| 868
|
py
|
Python
|
ML1-Supervised-Learning/ML1.1-Classification/cl5_overfitting_underfitting.py
|
ridhanf/machine-learning-datacamp
|
2c2c0f5dfcf9df315488cdef8eabd1d4f9fbd0e8
|
[
"MIT"
] | null | null | null |
ML1-Supervised-Learning/ML1.1-Classification/cl5_overfitting_underfitting.py
|
ridhanf/machine-learning-datacamp
|
2c2c0f5dfcf9df315488cdef8eabd1d4f9fbd0e8
|
[
"MIT"
] | null | null | null |
ML1-Supervised-Learning/ML1.1-Classification/cl5_overfitting_underfitting.py
|
ridhanf/machine-learning-datacamp
|
2c2c0f5dfcf9df315488cdef8eabd1d4f9fbd0e8
|
[
"MIT"
] | null | null | null |
# Setup arrays to store train and test accuracies
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
# Loop over different values of k
for i, k in enumerate(neighbors):
# Setup a k-NN Classifier with k neighbors: knn
knn = KNeighborsClassifier(n_neighbors=k)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
#Compute accuracy on the training set
train_accuracy[i] = knn.score(X_train, y_train)
#Compute accuracy on the testing set
test_accuracy[i] = knn.score(X_test, y_test)
# Generate plot
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy')
plt.show()
| 31
| 64
| 0.733871
|
def351d8b98d300e8b5a6f9280a5e56c9e9adbdc
| 12,412
|
py
|
Python
|
haystack/file_converter/pdf.py
|
SjSnowball/haystack
|
bb066c0a2c10253cf2bf7eb8cc829f1a0edde84d
|
[
"Apache-2.0"
] | 4,544
|
2019-11-14T11:57:49.000Z
|
2022-03-31T17:41:18.000Z
|
haystack/file_converter/pdf.py
|
SjSnowball/haystack
|
bb066c0a2c10253cf2bf7eb8cc829f1a0edde84d
|
[
"Apache-2.0"
] | 1,679
|
2020-01-14T15:55:58.000Z
|
2022-03-31T20:55:25.000Z
|
haystack/file_converter/pdf.py
|
SjSnowball/haystack
|
bb066c0a2c10253cf2bf7eb8cc829f1a0edde84d
|
[
"Apache-2.0"
] | 820
|
2019-11-27T13:01:42.000Z
|
2022-03-31T12:54:34.000Z
|
import logging
import subprocess
from pathlib import Path
import tempfile
import os
from typing import List, Optional, Dict, Any
from pdf2image import convert_from_path, convert_from_bytes
from haystack.file_converter.base import BaseConverter
from haystack.file_converter.image import ImageToTextConverter
logger = logging.getLogger(__name__)
class PDFToTextConverter(BaseConverter):
def __init__(
self,
remove_numeric_tables: bool = False,
valid_languages: Optional[List[str]] = None,
):
"""
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
"""
# save init parameters to enable export of component config as YAML
self.set_config(
remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages
)
verify_installation = subprocess.run(["pdftotext -v"], shell=True)
if verify_installation.returncode == 127:
raise Exception(
"""pdftotext is not installed. It is part of xpdf or poppler-utils software suite.
Installation on Linux:
wget --no-check-certificate https://dl.xpdfreader.com/xpdf-tools-linux-4.03.tar.gz &&
tar -xvf xpdf-tools-linux-4.03.tar.gz && sudo cp xpdf-tools-linux-4.03/bin64/pdftotext /usr/local/bin
Installation on MacOS:
brew install xpdf
You can find more details here: https://www.xpdfreader.com
"""
)
super().__init__(
remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages
)
def convert(
self,
file_path: Path,
meta: Optional[Dict[str, str]] = None,
remove_numeric_tables: Optional[bool] = None,
valid_languages: Optional[List[str]] = None,
encoding: Optional[str] = "Latin1",
) -> Dict[str, Any]:
"""
Extract text from a .pdf file using the pdftotext library (https://www.xpdfreader.com/pdftotext-man.html)
:param file_path: Path to the .pdf file you want to convert
:param meta: Optional dictionary with metadata that shall be attached to all resulting documents.
Can be any custom keys and values.
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
:param encoding: Encoding that will be passed as -enc parameter to pdftotext. "Latin 1" is the default encoding
of pdftotext. While this works well on many PDFs, it might be needed to switch to "UTF-8" or
others if your doc contains special characters (e.g. German Umlauts, Cyrillic characters ...).
Note: With "UTF-8" we experienced cases, where a simple "fi" gets wrongly parsed as
"xef\xac\x81c" (see test cases). That's why we keep "Latin 1" as default here.
(See list of available encodings by running `pdftotext -listenc` in the terminal)
"""
pages = self._read_pdf(file_path, layout=False, encoding=encoding)
if remove_numeric_tables is None:
remove_numeric_tables = self.remove_numeric_tables
if valid_languages is None:
valid_languages = self.valid_languages
cleaned_pages = []
for page in pages:
# pdftotext tool provides an option to retain the original physical layout of a PDF page. This behaviour
# can be toggled by using the layout param.
# layout=True
# + table structures get retained better
# - multi-column pages(eg, research papers) gets extracted with text from multiple columns on same line
# layout=False
# + keeps strings in content stream order, hence multi column layout works well
# - cells of tables gets split across line
#
# Here, as a "safe" default, layout is turned off.
lines = page.splitlines()
cleaned_lines = []
for line in lines:
words = line.split()
digits = [word for word in words if any(i.isdigit() for i in word)]
# remove lines having > 40% of words as digits AND not ending with a period(.)
if remove_numeric_tables:
if (
words
and len(digits) / len(words) > 0.4
and not line.strip().endswith(".")
):
logger.debug(f"Removing line '{line}' from {file_path}")
continue
cleaned_lines.append(line)
page = "\n".join(cleaned_lines)
cleaned_pages.append(page)
if valid_languages:
document_text = "".join(cleaned_pages)
if not self.validate_language(document_text):
logger.warning(
f"The language for {file_path} is not one of {self.valid_languages}. The file may not have "
f"been decoded in the correct text format."
)
text = "\f".join(cleaned_pages)
document = {"content": text, "content_type": "text", "meta": meta}
return document
def _read_pdf(
self, file_path: Path, layout: bool, encoding: Optional[str] = "Latin1"
) -> List[str]:
"""
Extract pages from the pdf file at file_path.
:param file_path: path of the pdf file
:param layout: whether to retain the original physical layout for a page. If disabled, PDF pages are read in
the content stream order.
"""
if layout:
command = ["pdftotext", "-enc", encoding, "-layout", str(file_path), "-"]
else:
command = ["pdftotext", "-enc", encoding, str(file_path), "-"]
output = subprocess.run(command, stdout=subprocess.PIPE, shell=False) # type: ignore
document = output.stdout.decode(errors="ignore")
pages = document.split("\f")
pages = pages[:-1] # the last page in the split is always empty.
return pages
class PDFToTextOCRConverter(BaseConverter):
def __init__(
self,
remove_numeric_tables: bool = False,
valid_languages: Optional[List[str]] = ["eng"],
):
"""
Extract text from image file using the pytesseract library (https://github.com/madmaze/pytesseract)
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages supported by tessarect
(https://tesseract-ocr.github.io/tessdoc/Data-Files-in-different-versions.html).
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
"""
# init image to text instance
self.image_2_text = ImageToTextConverter(remove_numeric_tables, valid_languages)
# save init parameters to enable export of component config as YAML
self.set_config(
remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages
)
super().__init__(
remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages
)
def convert(
self,
file_path: Path,
meta: Optional[Dict[str, str]] = None,
remove_numeric_tables: Optional[bool] = None,
valid_languages: Optional[List[str]] = None,
encoding: Optional[str] = "utf-8",
) -> Dict[str, Any]:
"""
Convert a file to a dictionary containing the text and any associated meta data.
File converters may extract file meta like name or size. In addition to it, user
supplied meta data like author, url, external IDs can be supplied as a dictionary.
:param file_path: path of the file to convert
:param meta: dictionary of meta data key-value pairs to append in the returned document.
:param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables.
The tabular structures in documents might be noise for the reader model if it
does not have table parsing capability for finding answers. However, tables
may also have long strings that could possible candidate for searching answers.
The rows containing strings are thus retained in this option.
:param valid_languages: validate languages from a list of languages specified in the ISO 639-1
(https://en.wikipedia.org/wiki/ISO_639-1) format.
This option can be used to add test for encoding errors. If the extracted text is
not one of the valid languages, then it might likely be encoding error resulting
in garbled text.
:param encoding: Select the file encoding (default is `utf-8`)
"""
pages = []
try:
images = convert_from_path(file_path)
for image in images:
temp_img = tempfile.NamedTemporaryFile(
dir=os.path.dirname(os.path.realpath(__file__)), suffix=".jpeg"
)
image.save(temp_img.name)
pages.append(self.image_2_text.convert(temp_img.name)["content"])
except Exception as exception:
logger.error(f"File {file_path} has an error \n {exception}")
raw_text = "\f".join(pages)
document = {"content": raw_text, "meta": meta}
return document
| 52.151261
| 120
| 0.592008
|
305abee0bcd92aa93cc3c354d2952b0d6c9d3490
| 4,647
|
py
|
Python
|
topi/python/topi/bifrost/depthwise_conv2d.py
|
robo-corg/incubator-tvm
|
4ddfdb4b15d05a5bf85a984837967d004efee5dd
|
[
"Apache-2.0"
] | 3
|
2021-02-23T22:06:01.000Z
|
2021-09-30T09:59:17.000Z
|
topi/python/topi/bifrost/depthwise_conv2d.py
|
robo-corg/incubator-tvm
|
4ddfdb4b15d05a5bf85a984837967d004efee5dd
|
[
"Apache-2.0"
] | 4
|
2021-03-30T11:59:59.000Z
|
2022-03-12T00:40:23.000Z
|
topi/python/topi/bifrost/depthwise_conv2d.py
|
robo-corg/incubator-tvm
|
4ddfdb4b15d05a5bf85a984837967d004efee5dd
|
[
"Apache-2.0"
] | 3
|
2021-07-20T07:40:15.000Z
|
2021-08-03T08:39:17.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument
"""depthwise_conv2d schedule on ARM Mali GPU"""
from __future__ import absolute_import as _abs
import tvm
from .. import generic
from .. import util
from .. import tag
@generic.schedule_depthwise_conv2d_nchw.register(["bifrost"])
def schedule_depthwise_conv2d_nchw(outs):
"""Schedule for depthwise_conv2d nchw forward.
Parameters
----------
outs: Array of Tensor
The computation graph description of depthwise_conv2d
in the format of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for depthwise_conv2d nchw.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])
def _schedule(pad_data, kernel, conv):
raw_data = s[pad_data].op.input_tensors[0]
if conv.op not in s.outputs: # has bias or relu
output = outs[0]
else: # no bias or relu
output = conv
def tile_and_bind3d(tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):
""" tile and bind 3d """
y_factor = y_factor or z_factor
x_factor = x_factor or y_factor
zo, zi = s[tensor].split(z, z_factor)
yo, yi = s[tensor].split(y, y_factor)
xo, xi = s[tensor].split(x, x_factor)
s[tensor].bind(zo, tvm.thread_axis("blockIdx.z"))
s[tensor].bind(zi, tvm.thread_axis("threadIdx.z"))
s[tensor].bind(yo, tvm.thread_axis("blockIdx.y"))
s[tensor].bind(yi, tvm.thread_axis("threadIdx.y"))
s[tensor].bind(xo, tvm.thread_axis("blockIdx.x"))
s[tensor].bind(xi, tvm.thread_axis("threadIdx.x"))
return zo, zi, yo, yi, xo, xi
# set tunable parameters
VH = 1
VW = 1
num_thread = 4
while util.get_const_int(conv.shape[3]) % (VW * 2) == 0 and VW * 2 <= 4:
VW = VW * 2
while util.get_const_int(conv.shape[2]) % (VH * 2) == 0 and VH * 2 <= 2:
VH = VH * 2
if raw_data.dtype == 'float16':
if util.get_const_int(conv.shape[3]) % (VW * 2) == 0:
VW *= 2
num_thread *= 2
else:
num_thread *= 2
# schedule padding
_, c, y, x = s[pad_data].op.axis
tile_and_bind3d(pad_data, c, y, x, num_thread, 1, 1)
# schedule conv
di, dj = s[conv].op.reduce_axis
s[conv].unroll(di)
s[conv].unroll(dj)
_, c, y, x = s[output].op.axis
y, x, yi, xi = s[output].tile(y, x, VH, VW)
s[output].unroll(yi)
s[output].vectorize(xi)
_, _, _, _, _, ji = tile_and_bind3d(output, c, y, x, num_thread, 1, 1)
if conv.op not in s.outputs:
_, c, y, x = s[conv].op.axis
y, x, yi, xi = s[conv].tile(y, x, VH, VW)
s[conv].unroll(yi)
s[conv].vectorize(xi)
s[conv].compute_at(s[output], ji)
def traverse(op):
"""Internal traverse function"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if tensor.op.input_tensors:
traverse(tensor.op)
# schedule depthwise_conv2d
if op.tag == 'depthwise_conv2d_nchw':
pad_data = op.input_tensors[0]
kernel = op.input_tensors[1]
if isinstance(kernel.op, tvm.tensor.ComputeOp) and 'dilate' in kernel.op.tag:
s[kernel].compute_inline()
conv = op.output(0)
_schedule(pad_data, kernel, conv)
traverse(outs[0].op)
return s
| 36.590551
| 89
| 0.594362
|
ea631f74beead1c09594c9d2aacf772853e8f8d6
| 2,566
|
py
|
Python
|
customuser/forms.py
|
vitaliyharchenko/Stepee
|
1daadc19e7538562a91fec01668e1d5791869af8
|
[
"MIT"
] | null | null | null |
customuser/forms.py
|
vitaliyharchenko/Stepee
|
1daadc19e7538562a91fec01668e1d5791869af8
|
[
"MIT"
] | null | null | null |
customuser/forms.py
|
vitaliyharchenko/Stepee
|
1daadc19e7538562a91fec01668e1d5791869af8
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from models import User
class UserCreationForm(forms.ModelForm):
error_messages = {
'duplicate_email': "A user with that email already exists.",
'password_mismatch': "The two password fields didn't match.",
}
password1 = forms.CharField(
label="Password",
widget=forms.PasswordInput)
password2 = forms.CharField(
label="Password confirmation",
widget=forms.PasswordInput,
help_text="Enter the same password as above, for verification.")
class Meta:
model = User
fields = ('email', 'grade')
def clean_email(self):
email = self.cleaned_data["email"]
try:
get_user_model().objects.get(email=email)
except get_user_model().DoesNotExist:
return email
raise forms.ValidationError(
self.error_messages['duplicate_email'],
code='duplicate_email',
)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField(label="Password", help_text=
"Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>.")
class Meta:
model = User
exclude = ()
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
return self.initial["password"]
class UserLoginForm(forms.Form):
email = forms.EmailField(label='Email', max_length=150)
# TODO: label localization
password = forms.CharField(label='Пароль', widget=forms.PasswordInput())
| 32.481013
| 76
| 0.644193
|
62608e2f17267c6945fe7819bcca54f7fdfacc59
| 57,680
|
py
|
Python
|
pandas/tests/io/formats/style/test_style.py
|
dorothykiz1/pandas
|
6033ed4b3383d874ee4a8a461724c0b8c2ca968d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T01:38:03.000Z
|
2022-03-29T01:38:03.000Z
|
pandas/tests/io/formats/style/test_style.py
|
dorothykiz1/pandas
|
6033ed4b3383d874ee4a8a461724c0b8c2ca968d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-08T02:15:07.000Z
|
2022-03-08T02:15:07.000Z
|
pandas/tests/io/formats/style/test_style.py
|
dorothykiz1/pandas
|
6033ed4b3383d874ee4a8a461724c0b8c2ca968d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-22T11:50:25.000Z
|
2022-03-22T11:50:25.000Z
|
import copy
import re
from textwrap import dedent
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
IndexSlice,
MultiIndex,
Series,
option_context,
)
import pandas._testing as tm
jinja2 = pytest.importorskip("jinja2")
from pandas.io.formats.style import ( # isort:skip
Styler,
)
from pandas.io.formats.style_render import (
_get_level_lengths,
_get_trimming_maximums,
maybe_convert_css_to_tuples,
non_reducing_slice,
)
@pytest.fixture
def mi_df():
return DataFrame(
[[1, 2], [3, 4]],
index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
dtype=int,
)
@pytest.fixture
def mi_styler(mi_df):
return Styler(mi_df, uuid_len=0)
@pytest.fixture
def mi_styler_comp(mi_styler):
# comprehensively add features to mi_styler
mi_styler = mi_styler._copy(deepcopy=True)
mi_styler.css = {**mi_styler.css, **{"row": "ROW", "col": "COL"}}
mi_styler.uuid_len = 5
mi_styler.uuid = "abcde"
mi_styler.set_caption("capt")
mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
mi_styler.hide(axis="columns")
mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
mi_styler.hide(axis="index")
mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
mi_styler.set_table_attributes('class="box"')
other = mi_styler.data.agg(["mean"])
other.index = MultiIndex.from_product([[""], other.index])
mi_styler.concat(other.style)
mi_styler.format(na_rep="MISSING", precision=3)
mi_styler.format_index(precision=2, axis=0)
mi_styler.format_index(precision=4, axis=1)
mi_styler.highlight_max(axis=None)
mi_styler.applymap_index(lambda x: "color: white;", axis=0)
mi_styler.applymap_index(lambda x: "color: black;", axis=1)
mi_styler.set_td_classes(
DataFrame(
[["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
)
)
mi_styler.set_tooltips(
DataFrame(
[["a2", "b2"], ["a2", "c2"]],
index=mi_styler.index,
columns=mi_styler.columns,
)
)
return mi_styler
@pytest.mark.parametrize(
"sparse_columns, exp_cols",
[
(
True,
[
{"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
{"is_visible": False, "attributes": "", "value": "c0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "c0"},
{"is_visible": True, "attributes": "", "value": "c0"},
],
),
],
)
def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
ctx = mi_styler._translate(True, sparse_columns)
assert exp_cols[0].items() <= ctx["head"][0][2].items()
assert exp_cols[1].items() <= ctx["head"][0][3].items()
assert exp_l1_c0.items() <= ctx["head"][1][2].items()
assert exp_l1_c1.items() <= ctx["head"][1][3].items()
@pytest.mark.parametrize(
"sparse_index, exp_rows",
[
(
True,
[
{"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
{"is_visible": False, "attributes": "", "value": "i0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "i0"},
{"is_visible": True, "attributes": "", "value": "i0"},
],
),
],
)
def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
ctx = mi_styler._translate(sparse_index, True)
assert exp_rows[0].items() <= ctx["body"][0][0].items()
assert exp_rows[1].items() <= ctx["body"][1][0].items()
assert exp_l1_r0.items() <= ctx["body"][0][1].items()
assert exp_l1_r1.items() <= ctx["body"][1][1].items()
def test_mi_styler_sparsify_options(mi_styler):
with option_context("styler.sparse.index", False):
html1 = mi_styler.to_html()
with option_context("styler.sparse.index", True):
html2 = mi_styler.to_html()
assert html1 != html2
with option_context("styler.sparse.columns", False):
html1 = mi_styler.to_html()
with option_context("styler.sparse.columns", True):
html2 = mi_styler.to_html()
assert html1 != html2
@pytest.mark.parametrize(
"rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn",
[
(100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements
(1000, 3, 750, None, None, 250, 3), # dynamically reduce rows to 250, keep cols
(4, 1000, 500, None, None, 4, 125), # dynamically reduce cols to 125, keep rows
(1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with max_row
(4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with max_col
(100, 100, 700, 50, 50, 25, 25), # rows cols below given maxes so < 700 elmts
],
)
def test_trimming_maximum(rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn):
rn, cn = _get_trimming_maximums(
rn, cn, max_els, max_rows, max_cols, scaling_factor=0.5
)
assert (rn, cn) == (exp_rn, exp_cn)
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_rows", 3),
],
)
def test_render_trimming_rows(option, val):
# test auto and specific trimming of rows
df = DataFrame(np.arange(120).reshape(60, 2))
with option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 3 # index + 2 data cols
assert len(ctx["body"]) == 4 # 3 data rows + trimming row
assert len(ctx["body"][0]) == 3 # index + 2 data cols
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_columns", 2),
],
)
def test_render_trimming_cols(option, val):
# test auto and specific trimming of cols
df = DataFrame(np.arange(30).reshape(3, 10))
with option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 4 # index + 2 data cols + trimming col
assert len(ctx["body"]) == 3 # 3 data rows
assert len(ctx["body"][0]) == 4 # index + 2 data cols + trimming col
def test_render_trimming_mi():
midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
df = DataFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
with option_context("styler.render.max_elements", 4):
ctx = df.style._translate(True, True)
assert len(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimming row
assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
assert len(ctx["body"]) == 3 # 2 data rows + trimming row
def test_render_empty_mi():
# GH 43305
df = DataFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
expected = dedent(
"""\
>
<thead>
<tr>
<th class="index_name level0" > </th>
<th class="index_name level1" >one</th>
</tr>
</thead>
"""
)
assert expected in df.style.to_html()
@pytest.mark.parametrize("comprehensive", [True, False])
@pytest.mark.parametrize("render", [True, False])
@pytest.mark.parametrize("deepcopy", [True, False])
def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp):
styler = mi_styler_comp if comprehensive else mi_styler
styler.uuid_len = 5
s2 = copy.deepcopy(styler) if deepcopy else copy.copy(styler) # make copy and check
assert s2 is not styler
if render:
styler.to_html()
excl = [
"na_rep", # deprecated
"precision", # deprecated
"cellstyle_map", # render time vars..
"cellstyle_map_columns",
"cellstyle_map_index",
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
if not deepcopy: # check memory locations are equal for all included attributes
for attr in [a for a in styler.__dict__ if (not callable(a) and a not in excl)]:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else: # check memory locations are different for nested or mutable vars
shallow = [
"data",
"columns",
"index",
"uuid_len",
"uuid",
"caption",
"cell_ids",
"hide_index_",
"hide_columns_",
"hide_index_names",
"hide_column_names",
"table_attributes",
]
for attr in shallow:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
for attr in [
a
for a in styler.__dict__
if (not callable(a) and a not in excl and a not in shallow)
]:
if getattr(s2, attr) is None:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else:
assert id(getattr(s2, attr)) != id(getattr(styler, attr))
def test_clear(mi_styler_comp):
# NOTE: if this test fails for new features then 'mi_styler_comp' should be updated
# to ensure proper testing of the 'copy', 'clear', 'export' methods with new feature
# GH 40675
styler = mi_styler_comp
styler._compute() # execute applied methods
clean_copy = Styler(styler.data, uuid=styler.uuid)
excl = [
"data",
"index",
"columns",
"uuid",
"uuid_len", # uuid is set to be the same on styler and clean_copy
"cell_ids",
"cellstyle_map", # execution time only
"cellstyle_map_columns", # execution time only
"cellstyle_map_index", # execution time only
"precision", # deprecated
"na_rep", # deprecated
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
# tests vars are not same vals on obj and clean copy before clear (except for excl)
for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
if hasattr(res, "__iter__") and len(res) > 0:
assert not all(res) # some element in iterable differs
elif hasattr(res, "__iter__") and len(res) == 0:
pass # empty array
else:
assert not res # explicit var differs
# test vars have same vales on obj and clean copy after clearing
styler.clear()
for attr in [a for a in styler.__dict__ if not (callable(a))]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert all(res) if hasattr(res, "__iter__") else res
def test_export(mi_styler_comp, mi_styler):
exp_attrs = [
"_todo",
"hide_index_",
"hide_index_names",
"hide_columns_",
"hide_column_names",
"table_attributes",
"table_styles",
"css",
]
for attr in exp_attrs:
check = getattr(mi_styler, attr) == getattr(mi_styler_comp, attr)
assert not (
all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
)
export = mi_styler_comp.export()
used = mi_styler.use(export)
for attr in exp_attrs:
check = getattr(used, attr) == getattr(mi_styler_comp, attr)
assert all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
used.to_html()
def test_hide_raises(mi_styler):
msg = "`subset` and `level` cannot be passed simultaneously"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", subset="something", level="something else")
msg = "`level` must be of type `int`, `str` or list of such"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
def test_hide_index_level(mi_styler, level):
mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
assert len(ctx["head"][0]) == 3
assert len(ctx["head"][1]) == 3
assert len(ctx["head"][2]) == 4
assert ctx["head"][2][0]["is_visible"]
assert not ctx["head"][2][1]["is_visible"]
assert ctx["body"][0][0]["is_visible"]
assert not ctx["body"][0][1]["is_visible"]
assert ctx["body"][1][0]["is_visible"]
assert not ctx["body"][1][1]["is_visible"]
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
@pytest.mark.parametrize("names", [True, False])
def test_hide_columns_level(mi_styler, level, names):
mi_styler.columns.names = ["zero", "one"]
if names:
mi_styler.index.names = ["zero", "one"]
ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
assert len(ctx["head"]) == (2 if names else 1)
@pytest.mark.parametrize("method", ["applymap", "apply"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header(method, axis):
# GH 41893
df = DataFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
func = {
"apply": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
"applymap": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
}
# test execution added to todo
result = getattr(df.style, f"{method}_index")(func[method], axis=axis)
assert len(result._todo) == 1
assert len(getattr(result, f"ctx_{axis}")) == 0
# test ctx object on compute
result._compute()
expected = {
(0, 0): [("attr", "val")],
}
assert getattr(result, f"ctx_{axis}") == expected
@pytest.mark.parametrize("method", ["apply", "applymap"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header_mi(mi_styler, method, axis):
# GH 41893
func = {
"apply": lambda s: ["attr: val;" if "b" in v else "" for v in s],
"applymap": lambda v: "attr: val" if "b" in v else "",
}
result = getattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
expected = {(1, 1): [("attr", "val")]}
assert getattr(result, f"ctx_{axis}") == expected
def test_apply_map_header_raises(mi_styler):
# GH 41893
with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"):
mi_styler.applymap_index(lambda v: "attr: val;", axis="bad")._compute()
class TestStyler:
def setup_method(self):
np.random.seed(24)
self.s = DataFrame({"A": np.random.permutation(range(6))})
self.df = DataFrame({"A": [0, 1], "B": np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo="bar"):
return Series(f"color: {foo}", index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = DataFrame({"A": ["color: red", "color: blue"]})
self.dataframes = [
self.df,
DataFrame({"f": [1.0, 2.0], "o": ["a", "b"], "c": Categorical(["a", "b"])}),
]
self.blank_value = " "
def test_init_non_pandas(self):
msg = "``data`` must be a Series or DataFrame"
with pytest.raises(TypeError, match=msg):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_repr_html_mathjax(self):
# gh-19824 / 41395
assert "tex2jax_ignore" not in self.styler._repr_html_()
with option_context("styler.html.mathjax", False):
assert "tex2jax_ignore" in self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_and_trailing_semi(self):
attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
self.styler._update_ctx(attrs)
expected = {
(0, 0): [("color", "red"), ("foo", "bar")],
(1, 0): [("color", "blue"), ("foo", "baz")],
}
assert self.styler.ctx == expected
def test_render(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_multiple_render(self):
# GH 39396
s = Styler(self.df, uuid_len=0).applymap(lambda x: "color: red;", subset=["A"])
s.to_html() # do 2 renders to ensure css styles not duplicated
assert (
'<style type="text/css">\n#T__row0_col0, #T__row1_col0 {\n'
" color: red;\n}\n</style>" in s.to_html()
)
def test_render_empty_dfs(self):
empty_df = DataFrame()
es = Styler(empty_df)
es.to_html()
# An index but no columns
DataFrame(columns=["a"]).style.to_html()
# A column but no index
DataFrame(index=["a"]).style.to_html()
# No IndexError raised?
def test_render_double(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: Series(
["color: red; border: 1px", "color: blue; border: 2px"], name=x.name
)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_set_properties(self):
df = DataFrame({"A": [0, 1]})
result = df.style.set_properties(color="white", size="10px")._compute().ctx
# order is deterministic
v = [("color", "white"), ("size", "10px")]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = DataFrame({"A": [0, 1]})
result = (
df.style.set_properties(subset=IndexSlice[0, "A"], color="white")
._compute()
.ctx
)
expected = {(0, 0): [("color", "white")]}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.style._translate(True, True)
assert len(result["head"]) == 1
expected = {
"class": "blank level0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
}
assert expected.items() <= result["head"][0][0].items()
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index("A").style._translate(True, True)
expected = {
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
}
assert expected.items() <= result["head"][1][0].items()
def test_numeric_columns(self):
# https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = DataFrame({0: [1, 2, 3]})
df.style._translate(True, True)
def test_apply_axis(self):
df = DataFrame({"A": [0, 0], "B": [1, 1]})
f = lambda x: [f"val: {x.max()}" for v in x]
result = df.style.apply(f, axis=1)
assert len(result._todo) == 1
assert len(result.ctx) == 0
result._compute()
expected = {
(0, 0): [("val", "1")],
(0, 1): [("val", "1")],
(1, 0): [("val", "1")],
(1, 1): [("val", "1")],
}
assert result.ctx == expected
result = df.style.apply(f, axis=0)
expected = {
(0, 0): [("val", "0")],
(0, 1): [("val", "1")],
(1, 0): [("val", "0")],
(1, 1): [("val", "1")],
}
result._compute()
assert result.ctx == expected
result = df.style.apply(f) # default
result._compute()
assert result.ctx == expected
@pytest.mark.parametrize("axis", [0, 1])
def test_apply_series_return(self, axis):
# GH 42014
df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
# test Series return where len(Series) < df.index or df.columns but labels OK
func = lambda s: Series(["color: red;"], index=["Y"])
result = df.style.apply(func, axis=axis)._compute().ctx
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
# test Series return where labels align but different order
func = lambda s: Series(["color: red;", "color: blue;"], index=["Y", "X"])
result = df.style.apply(func, axis=axis)._compute().ctx
assert result[(0, 0)] == [("color", "blue")]
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
assert result[(axis, 1 - axis)] == [("color", "blue")]
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("columns", [False, True])
def test_apply_dataframe_return(self, index, columns):
# GH 42014
df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
idxs = ["X", "Y"] if index else ["Y"]
cols = ["X", "Y"] if columns else ["Y"]
df_styles = DataFrame("color: red;", index=idxs, columns=cols)
result = df.style.apply(lambda x: df_styles, axis=None)._compute().ctx
assert result[(1, 1)] == [("color", "red")] # (Y,Y) styles always present
assert (result[(0, 1)] == [("color", "red")]) is index # (X,Y) only if index
assert (result[(1, 0)] == [("color", "red")]) is columns # (Y,X) only if cols
assert (result[(0, 0)] == [("color", "red")]) is (index and columns) # (X,X)
@pytest.mark.parametrize(
"slice_",
[
IndexSlice[:],
IndexSlice[:, ["A"]],
IndexSlice[[1], :],
IndexSlice[[1], ["A"]],
IndexSlice[:2, ["A", "B"]],
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_apply_subset(self, slice_, axis):
result = (
self.df.style.apply(self.h, axis=axis, subset=slice_, foo="baz")
._compute()
.ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
IndexSlice[:],
IndexSlice[:, ["A"]],
IndexSlice[[1], :],
IndexSlice[[1], ["A"]],
IndexSlice[:2, ["A", "B"]],
],
)
def test_applymap_subset(self, slice_):
result = (
self.df.style.applymap(lambda x: "color:baz;", subset=slice_)._compute().ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
IndexSlice[:, IndexSlice["x", "A"]],
IndexSlice[:, IndexSlice[:, "A"]],
IndexSlice[:, IndexSlice[:, ["A", "C"]]], # missing col element
IndexSlice[IndexSlice["a", 1], :],
IndexSlice[IndexSlice[:, 1], :],
IndexSlice[IndexSlice[:, [1, 3]], :], # missing row element
IndexSlice[:, ("x", "A")],
IndexSlice[("a", 1), :],
],
)
def test_applymap_subset_multiindex(self, slice_):
# GH 19861
# edited for GH 33562
warn = None
msg = "indexing on a MultiIndex with a nested sequence of labels"
if (
isinstance(slice_[-1], tuple)
and isinstance(slice_[-1][-1], list)
and "C" in slice_[-1][-1]
):
warn = FutureWarning
elif (
isinstance(slice_[0], tuple)
and isinstance(slice_[0][1], list)
and 3 in slice_[0][1]
):
warn = FutureWarning
idx = MultiIndex.from_product([["a", "b"], [1, 2]])
col = MultiIndex.from_product([["x", "y"], ["A", "B"]])
df = DataFrame(np.random.rand(4, 4), columns=col, index=idx)
with tm.assert_produces_warning(warn, match=msg):
df.style.applymap(lambda x: "color: red;", subset=slice_).to_html()
def test_applymap_subset_multiindex_code(self):
# https://github.com/pandas-dev/pandas/issues/25858
# Checks styler.applymap works with multindex when codes are provided
codes = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
columns = MultiIndex(
levels=[["a", "b"], ["%", "#"]], codes=codes, names=["", ""]
)
df = DataFrame(
[[1, -1, 1, 1], [-1, 1, 1, 1]], index=["hello", "world"], columns=columns
)
pct_subset = IndexSlice[:, IndexSlice[:, "%":"%"]]
def color_negative_red(val):
color = "red" if val < 0 else "black"
return f"color: {color}"
df.loc[pct_subset]
df.style.applymap(color_negative_red, subset=pct_subset)
def test_empty(self):
df = DataFrame({"A": [1, 0]})
s = df.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0"]},
{"props": [("", "")], "selectors": ["row1_col0"]},
]
assert result == expected
def test_duplicate(self):
df = DataFrame({"A": [1, 0]})
s = df.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]}
]
assert result == expected
def test_init_with_na_rep(self):
# GH 21527 28358
df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
ctx = Styler(df, na_rep="NA")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "NA"
assert ctx["body"][0][2]["display_value"] == "NA"
def test_caption(self):
styler = Styler(self.df, caption="foo")
result = styler.to_html()
assert all(["caption" in result, "foo" in result])
styler = self.df.style
result = styler.set_caption("baz")
assert styler is result
assert styler.caption == "baz"
def test_uuid(self):
styler = Styler(self.df, uuid="abc123")
result = styler.to_html()
assert "abc123" in result
styler = self.df.style
result = styler.set_uuid("aaa")
assert result is styler
assert result.uuid == "aaa"
def test_unique_id(self):
# See https://github.com/pandas-dev/pandas/issues/16780
df = DataFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]})
result = df.style.to_html(uuid="test")
assert "test" in result
ids = re.findall('id="(.*?)"', result)
assert np.unique(ids).size == len(ids)
def test_table_styles(self):
style = [{"selector": "th", "props": [("foo", "bar")]}] # default format
styler = Styler(self.df, table_styles=style)
result = " ".join(styler.to_html().split())
assert "th { foo: bar; }" in result
styler = self.df.style
result = styler.set_table_styles(style)
assert styler is result
assert styler.table_styles == style
# GH 39563
style = [{"selector": "th", "props": "foo:bar;"}] # css string format
styler = self.df.style.set_table_styles(style)
result = " ".join(styler.to_html().split())
assert "th { foo: bar; }" in result
def test_table_styles_multiple(self):
ctx = self.df.style.set_table_styles(
[
{"selector": "th,td", "props": "color:red;"},
{"selector": "tr", "props": "color:green;"},
]
)._translate(True, True)["table_styles"]
assert ctx == [
{"selector": "th", "props": [("color", "red")]},
{"selector": "td", "props": [("color", "red")]},
{"selector": "tr", "props": [("color", "green")]},
]
def test_table_styles_dict_multiple_selectors(self):
# GH 44011
result = self.df.style.set_table_styles(
{
"B": [
{"selector": "th,td", "props": [("border-left", "2px solid black")]}
]
}
)._translate(True, True)["table_styles"]
expected = [
{"selector": "th.col1", "props": [("border-left", "2px solid black")]},
{"selector": "td.col1", "props": [("border-left", "2px solid black")]},
]
assert result == expected
def test_maybe_convert_css_to_tuples(self):
expected = [("a", "b"), ("c", "d e")]
assert maybe_convert_css_to_tuples("a:b;c:d e;") == expected
assert maybe_convert_css_to_tuples("a: b ;c: d e ") == expected
expected = []
assert maybe_convert_css_to_tuples("") == expected
def test_maybe_convert_css_to_tuples_err(self):
msg = "Styles supplied as string must follow CSS rule formats"
with pytest.raises(ValueError, match=msg):
maybe_convert_css_to_tuples("err")
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.to_html()
assert 'class="foo" data-bar' in result
result = self.df.style.set_table_attributes(attributes).to_html()
assert 'class="foo" data-bar' in result
def test_apply_none(self):
def f(x):
return DataFrame(
np.where(x == x.max(), "color: red", ""),
index=x.index,
columns=x.columns,
)
result = DataFrame([[1, 2], [3, 4]]).style.apply(f, axis=None)._compute().ctx
assert result[(1, 1)] == [("color", "red")]
def test_trim(self):
result = self.df.style.to_html() # trim=True
assert result.count("#") == 0
result = self.df.style.highlight_max().to_html()
assert result.count("#") == len(self.df.columns)
def test_export(self):
f = lambda x: "color: red" if x > 0 else "color: blue"
g = lambda x, z: f"color: {z}" if x > 0 else f"color: {z}"
style1 = self.styler
style1.applymap(f).applymap(g, z="b").highlight_max()._compute() # = render
result = style1.export()
style2 = self.df.style
style2.use(result)
assert style1._todo == style2._todo
style2.to_html()
def test_bad_apply_shape(self):
df = DataFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["X", "Y"])
msg = "resulted in the apply method collapsing to a Series."
with pytest.raises(ValueError, match=msg):
df.style._apply(lambda x: "x")
msg = "created invalid {} labels"
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: [""])
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: ["", "", "", ""])
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: Series(["a:v;", ""], index=["A", "C"]), axis=0)
with pytest.raises(ValueError, match=msg.format("columns")):
df.style._apply(lambda x: ["", "", ""], axis=1)
with pytest.raises(ValueError, match=msg.format("columns")):
df.style._apply(lambda x: Series(["a:v;", ""], index=["X", "Z"]), axis=1)
msg = "returned ndarray with wrong shape"
with pytest.raises(ValueError, match=msg):
df.style._apply(lambda x: np.array([[""], [""]]), axis=None)
def test_apply_bad_return(self):
def f(x):
return ""
df = DataFrame([[1, 2], [3, 4]])
msg = (
"must return a DataFrame or ndarray when passed to `Styler.apply` "
"with axis=None"
)
with pytest.raises(TypeError, match=msg):
df.style._apply(f, axis=None)
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_bad_labels(self, axis):
def f(x):
return DataFrame(**{axis: ["bad", "labels"]})
df = DataFrame([[1, 2], [3, 4]])
msg = f"created invalid {axis} labels."
with pytest.raises(ValueError, match=msg):
df.style._apply(f, axis=None)
def test_get_level_lengths(self):
index = MultiIndex.from_product([["a", "b"], [0, 1, 2]])
expected = {
(0, 0): 3,
(0, 3): 3,
(1, 0): 1,
(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
(1, 4): 1,
(1, 5): 1,
}
result = _get_level_lengths(index, sparsify=True, max_index=100)
tm.assert_dict_equal(result, expected)
expected = {
(0, 0): 1,
(0, 1): 1,
(0, 2): 1,
(0, 3): 1,
(0, 4): 1,
(0, 5): 1,
(1, 0): 1,
(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
(1, 4): 1,
(1, 5): 1,
}
result = _get_level_lengths(index, sparsify=False, max_index=100)
tm.assert_dict_equal(result, expected)
def test_get_level_lengths_un_sorted(self):
index = MultiIndex.from_arrays([[1, 1, 2, 1], ["a", "b", "b", "d"]])
expected = {
(0, 0): 2,
(0, 2): 1,
(0, 3): 1,
(1, 0): 1,
(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
}
result = _get_level_lengths(index, sparsify=True, max_index=100)
tm.assert_dict_equal(result, expected)
expected = {
(0, 0): 1,
(0, 1): 1,
(0, 2): 1,
(0, 3): 1,
(1, 0): 1,
(1, 1): 1,
(1, 2): 1,
(1, 3): 1,
}
result = _get_level_lengths(index, sparsify=False, max_index=100)
tm.assert_dict_equal(result, expected)
def test_mi_sparse_index_names(self):
# Test the class names and displayed value are correct on rendering MI names
df = DataFrame(
{"A": [1, 2]},
index=MultiIndex.from_arrays(
[["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
),
)
result = df.style._translate(True, True)
head = result["head"][1]
expected = [
{
"class": "index_name level0",
"display_value": "idx_level_0",
"is_visible": True,
},
{
"class": "index_name level1",
"display_value": "idx_level_1",
"is_visible": True,
},
{
"class": "blank col0",
"display_value": self.blank_value,
"is_visible": True,
},
]
for i, expected_dict in enumerate(expected):
assert expected_dict.items() <= head[i].items()
def test_mi_sparse_column_names(self):
df = DataFrame(
np.arange(16).reshape(4, 4),
index=MultiIndex.from_arrays(
[["a", "a", "b", "a"], [0, 1, 1, 2]],
names=["idx_level_0", "idx_level_1"],
),
columns=MultiIndex.from_arrays(
[["C1", "C1", "C2", "C2"], [1, 0, 1, 0]], names=["colnam_0", "colnam_1"]
),
)
result = Styler(df, cell_ids=False)._translate(True, True)
for level in [0, 1]:
head = result["head"][level]
expected = [
{
"class": "blank",
"display_value": self.blank_value,
"is_visible": True,
},
{
"class": f"index_name level{level}",
"display_value": f"colnam_{level}",
"is_visible": True,
},
]
for i, expected_dict in enumerate(expected):
assert expected_dict.items() <= head[i].items()
def test_hide_column_headers(self):
ctx = self.styler.hide(axis="columns")._translate(True, True)
assert len(ctx["head"]) == 0 # no header entries with an unnamed index
self.df.index.name = "some_name"
ctx = self.df.style.hide(axis="columns")._translate(True, True)
assert len(ctx["head"]) == 1
# index names still visible, changed in #42101, reverted in 43404
def test_hide_single_index(self):
# GH 14194
# single unnamed index
ctx = self.df.style._translate(True, True)
assert ctx["body"][0][0]["is_visible"]
assert ctx["head"][0][0]["is_visible"]
ctx2 = self.df.style.hide(axis="index")._translate(True, True)
assert not ctx2["body"][0][0]["is_visible"]
assert not ctx2["head"][0][0]["is_visible"]
# single named index
ctx3 = self.df.set_index("A").style._translate(True, True)
assert ctx3["body"][0][0]["is_visible"]
assert len(ctx3["head"]) == 2 # 2 header levels
assert ctx3["head"][0][0]["is_visible"]
ctx4 = self.df.set_index("A").style.hide(axis="index")._translate(True, True)
assert not ctx4["body"][0][0]["is_visible"]
assert len(ctx4["head"]) == 1 # only 1 header levels
assert not ctx4["head"][0][0]["is_visible"]
def test_hide_multiindex(self):
# GH 14194
df = DataFrame(
{"A": [1, 2], "B": [1, 2]},
index=MultiIndex.from_arrays(
[["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
),
)
ctx1 = df.style._translate(True, True)
# tests for 'a' and '0'
assert ctx1["body"][0][0]["is_visible"]
assert ctx1["body"][0][1]["is_visible"]
# check for blank header rows
assert len(ctx1["head"][0]) == 4 # two visible indexes and two data columns
ctx2 = df.style.hide(axis="index")._translate(True, True)
# tests for 'a' and '0'
assert not ctx2["body"][0][0]["is_visible"]
assert not ctx2["body"][0][1]["is_visible"]
# check for blank header rows
assert len(ctx2["head"][0]) == 3 # one hidden (col name) and two data columns
assert not ctx2["head"][0][0]["is_visible"]
def test_hide_columns_single_level(self):
# GH 14194
# test hiding single column
ctx = self.df.style._translate(True, True)
assert ctx["head"][0][1]["is_visible"]
assert ctx["head"][0][1]["display_value"] == "A"
assert ctx["head"][0][2]["is_visible"]
assert ctx["head"][0][2]["display_value"] == "B"
assert ctx["body"][0][1]["is_visible"] # col A, row 1
assert ctx["body"][1][2]["is_visible"] # col B, row 1
ctx = self.df.style.hide("A", axis="columns")._translate(True, True)
assert not ctx["head"][0][1]["is_visible"]
assert not ctx["body"][0][1]["is_visible"] # col A, row 1
assert ctx["body"][1][2]["is_visible"] # col B, row 1
# test hiding mulitiple columns
ctx = self.df.style.hide(["A", "B"], axis="columns")._translate(True, True)
assert not ctx["head"][0][1]["is_visible"]
assert not ctx["head"][0][2]["is_visible"]
assert not ctx["body"][0][1]["is_visible"] # col A, row 1
assert not ctx["body"][1][2]["is_visible"] # col B, row 1
def test_hide_columns_index_mult_levels(self):
# GH 14194
# setup dataframe with multiple column levels and indices
i1 = MultiIndex.from_arrays(
[["a", "a"], [0, 1]], names=["idx_level_0", "idx_level_1"]
)
i2 = MultiIndex.from_arrays(
[["b", "b"], [0, 1]], names=["col_level_0", "col_level_1"]
)
df = DataFrame([[1, 2], [3, 4]], index=i1, columns=i2)
ctx = df.style._translate(True, True)
# column headers
assert ctx["head"][0][2]["is_visible"]
assert ctx["head"][1][2]["is_visible"]
assert ctx["head"][1][3]["display_value"] == "1"
# indices
assert ctx["body"][0][0]["is_visible"]
# data
assert ctx["body"][1][2]["is_visible"]
assert ctx["body"][1][2]["display_value"] == "3"
assert ctx["body"][1][3]["is_visible"]
assert ctx["body"][1][3]["display_value"] == "4"
# hide top column level, which hides both columns
ctx = df.style.hide("b", axis="columns")._translate(True, True)
assert not ctx["head"][0][2]["is_visible"] # b
assert not ctx["head"][1][2]["is_visible"] # 0
assert not ctx["body"][1][2]["is_visible"] # 3
assert ctx["body"][0][0]["is_visible"] # index
# hide first column only
ctx = df.style.hide([("b", 0)], axis="columns")._translate(True, True)
assert not ctx["head"][0][2]["is_visible"] # b
assert ctx["head"][0][3]["is_visible"] # b
assert not ctx["head"][1][2]["is_visible"] # 0
assert not ctx["body"][1][2]["is_visible"] # 3
assert ctx["body"][1][3]["is_visible"]
assert ctx["body"][1][3]["display_value"] == "4"
# hide second column and index
ctx = df.style.hide([("b", 1)], axis=1).hide(axis=0)._translate(True, True)
assert not ctx["body"][0][0]["is_visible"] # index
assert len(ctx["head"][0]) == 3
assert ctx["head"][0][1]["is_visible"] # b
assert ctx["head"][1][1]["is_visible"] # 0
assert not ctx["head"][1][2]["is_visible"] # 1
assert not ctx["body"][1][3]["is_visible"] # 4
assert ctx["body"][1][2]["is_visible"]
assert ctx["body"][1][2]["display_value"] == "3"
# hide top row level, which hides both rows so body empty
ctx = df.style.hide("a", axis="index")._translate(True, True)
assert ctx["body"] == []
# hide first row only
ctx = df.style.hide(("a", 0), axis="index")._translate(True, True)
for i in [0, 1, 2, 3]:
assert "row1" in ctx["body"][0][i]["class"] # row0 not included in body
assert ctx["body"][0][i]["is_visible"]
def test_pipe(self):
def set_caption_from_template(styler, a, b):
return styler.set_caption(f"Dataframe with a = {a} and b = {b}")
styler = self.df.style.pipe(set_caption_from_template, "A", b="B")
assert "Dataframe with a = A and b = B" in styler.to_html()
# Test with an argument that is a (callable, keyword_name) pair.
def f(a, b, styler):
return (a, b, styler)
styler = self.df.style
result = styler.pipe((f, "styler"), a=1, b=2)
assert result == (1, 2, styler)
def test_no_cell_ids(self):
# GH 35588
# GH 35663
df = DataFrame(data=[[0]])
styler = Styler(df, uuid="_", cell_ids=False)
styler.to_html()
s = styler.to_html() # render twice to ensure ctx is not updated
assert s.find('<td class="data row0 col0" >') != -1
@pytest.mark.parametrize(
"classes",
[
DataFrame(
data=[["", "test-class"], [np.nan, None]],
columns=["A", "B"],
index=["a", "b"],
),
DataFrame(data=[["test-class"]], columns=["B"], index=["a"]),
DataFrame(data=[["test-class", "unused"]], columns=["B", "C"], index=["a"]),
],
)
def test_set_data_classes(self, classes):
# GH 36159
df = DataFrame(data=[[0, 1], [2, 3]], columns=["A", "B"], index=["a", "b"])
s = Styler(df, uuid_len=0, cell_ids=False).set_td_classes(classes).to_html()
assert '<td class="data row0 col0" >0</td>' in s
assert '<td class="data row0 col1 test-class" >1</td>' in s
assert '<td class="data row1 col0" >2</td>' in s
assert '<td class="data row1 col1" >3</td>' in s
# GH 39317
s = Styler(df, uuid_len=0, cell_ids=True).set_td_classes(classes).to_html()
assert '<td id="T__row0_col0" class="data row0 col0" >0</td>' in s
assert '<td id="T__row0_col1" class="data row0 col1 test-class" >1</td>' in s
assert '<td id="T__row1_col0" class="data row1 col0" >2</td>' in s
assert '<td id="T__row1_col1" class="data row1 col1" >3</td>' in s
def test_set_data_classes_reindex(self):
# GH 39317
df = DataFrame(
data=[[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=[0, 1, 2], index=[0, 1, 2]
)
classes = DataFrame(
data=[["mi", "ma"], ["mu", "mo"]],
columns=[0, 2],
index=[0, 2],
)
s = Styler(df, uuid_len=0).set_td_classes(classes).to_html()
assert '<td id="T__row0_col0" class="data row0 col0 mi" >0</td>' in s
assert '<td id="T__row0_col2" class="data row0 col2 ma" >2</td>' in s
assert '<td id="T__row1_col1" class="data row1 col1" >4</td>' in s
assert '<td id="T__row2_col0" class="data row2 col0 mu" >6</td>' in s
assert '<td id="T__row2_col2" class="data row2 col2 mo" >8</td>' in s
def test_chaining_table_styles(self):
# GH 35607
df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"])
styler = df.style.set_table_styles(
[{"selector": "", "props": [("background-color", "yellow")]}]
).set_table_styles(
[{"selector": ".col0", "props": [("background-color", "blue")]}],
overwrite=False,
)
assert len(styler.table_styles) == 2
def test_column_and_row_styling(self):
# GH 35607
df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"])
s = Styler(df, uuid_len=0)
s = s.set_table_styles({"A": [{"selector": "", "props": [("color", "blue")]}]})
assert "#T_ .col0 {\n color: blue;\n}" in s.to_html()
s = s.set_table_styles(
{0: [{"selector": "", "props": [("color", "blue")]}]}, axis=1
)
assert "#T_ .row0 {\n color: blue;\n}" in s.to_html()
@pytest.mark.parametrize("len_", [1, 5, 32, 33, 100])
def test_uuid_len(self, len_):
# GH 36345
df = DataFrame(data=[["A"]])
s = Styler(df, uuid_len=len_, cell_ids=False).to_html()
strt = s.find('id="T_')
end = s[strt + 6 :].find('"')
if len_ > 32:
assert end == 32
else:
assert end == len_
@pytest.mark.parametrize("len_", [-2, "bad", None])
def test_uuid_len_raises(self, len_):
# GH 36345
df = DataFrame(data=[["A"]])
msg = "``uuid_len`` must be an integer in range \\[0, 32\\]."
with pytest.raises(TypeError, match=msg):
Styler(df, uuid_len=len_, cell_ids=False).to_html()
@pytest.mark.parametrize(
"slc",
[
IndexSlice[:, :],
IndexSlice[:, 1],
IndexSlice[1, :],
IndexSlice[[1], [1]],
IndexSlice[1, [1]],
IndexSlice[[1], 1],
IndexSlice[1],
IndexSlice[1, 1],
slice(None, None, None),
[0, 1],
np.array([0, 1]),
Series([0, 1]),
],
)
def test_non_reducing_slice(self, slc):
df = DataFrame([[0, 1], [2, 3]])
tslice_ = non_reducing_slice(slc)
assert isinstance(df.loc[tslice_], DataFrame)
@pytest.mark.parametrize("box", [list, Series, np.array])
def test_list_slice(self, box):
# like dataframe getitem
subset = box(["A"])
df = DataFrame({"A": [1, 2], "B": [3, 4]}, index=["A", "B"])
expected = IndexSlice[:, ["A"]]
result = non_reducing_slice(subset)
tm.assert_frame_equal(df.loc[result], df.loc[expected])
def test_non_reducing_slice_on_multiindex(self):
# GH 19861
dic = {
("a", "d"): [1, 4],
("a", "c"): [2, 3],
("b", "c"): [3, 2],
("b", "d"): [4, 1],
}
df = DataFrame(dic, index=[0, 1])
idx = IndexSlice
slice_ = idx[:, idx["b", "d"]]
tslice_ = non_reducing_slice(slice_)
result = df.loc[tslice_]
expected = DataFrame({("b", "d"): [4, 1]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"slice_",
[
IndexSlice[:, :],
# check cols
IndexSlice[:, IndexSlice[["a"]]], # inferred deeper need list
IndexSlice[:, IndexSlice[["a"], ["c"]]], # inferred deeper need list
IndexSlice[:, IndexSlice["a", "c", :]],
IndexSlice[:, IndexSlice["a", :, "e"]],
IndexSlice[:, IndexSlice[:, "c", "e"]],
IndexSlice[:, IndexSlice["a", ["c", "d"], :]], # check list
IndexSlice[:, IndexSlice["a", ["c", "d", "-"], :]], # allow missing
IndexSlice[:, IndexSlice["a", ["c", "d", "-"], "e"]], # no slice
# check rows
IndexSlice[IndexSlice[["U"]], :], # inferred deeper need list
IndexSlice[IndexSlice[["U"], ["W"]], :], # inferred deeper need list
IndexSlice[IndexSlice["U", "W", :], :],
IndexSlice[IndexSlice["U", :, "Y"], :],
IndexSlice[IndexSlice[:, "W", "Y"], :],
IndexSlice[IndexSlice[:, "W", ["Y", "Z"]], :], # check list
IndexSlice[IndexSlice[:, "W", ["Y", "Z", "-"]], :], # allow missing
IndexSlice[IndexSlice["U", "W", ["Y", "Z", "-"]], :], # no slice
# check simultaneous
IndexSlice[IndexSlice[:, "W", "Y"], IndexSlice["a", "c", :]],
],
)
def test_non_reducing_multi_slice_on_multiindex(self, slice_):
# GH 33562
cols = MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]])
idxs = MultiIndex.from_product([["U", "V"], ["W", "X"], ["Y", "Z"]])
df = DataFrame(np.arange(64).reshape(8, 8), columns=cols, index=idxs)
msg = "indexing on a MultiIndex with a nested sequence of labels"
warn = None
for lvl in [0, 1]:
key = slice_[lvl]
if isinstance(key, tuple):
for subkey in key:
if isinstance(subkey, list) and "-" in subkey:
# not present in the index level, ignored, will raise in future
warn = FutureWarning
with tm.assert_produces_warning(warn, match=msg):
expected = df.loc[slice_]
with tm.assert_produces_warning(warn, match=msg):
result = df.loc[non_reducing_slice(slice_)]
tm.assert_frame_equal(result, expected)
def test_hidden_index_names(mi_df):
mi_df.index.names = ["Lev0", "Lev1"]
mi_styler = mi_df.style
ctx = mi_styler._translate(True, True)
assert len(ctx["head"]) == 3 # 2 column index levels + 1 index names row
mi_styler.hide(axis="index", names=True)
ctx = mi_styler._translate(True, True)
assert len(ctx["head"]) == 2 # index names row is unparsed
for i in range(4):
assert ctx["body"][0][i]["is_visible"] # 2 index levels + 2 data values visible
mi_styler.hide(axis="index", level=1)
ctx = mi_styler._translate(True, True)
assert len(ctx["head"]) == 2 # index names row is still hidden
assert ctx["body"][0][0]["is_visible"] is True
assert ctx["body"][0][1]["is_visible"] is False
def test_hidden_column_names(mi_df):
mi_df.columns.names = ["Lev0", "Lev1"]
mi_styler = mi_df.style
ctx = mi_styler._translate(True, True)
assert ctx["head"][0][1]["display_value"] == "Lev0"
assert ctx["head"][1][1]["display_value"] == "Lev1"
mi_styler.hide(names=True, axis="columns")
ctx = mi_styler._translate(True, True)
assert ctx["head"][0][1]["display_value"] == " "
assert ctx["head"][1][1]["display_value"] == " "
mi_styler.hide(level=0, axis="columns")
ctx = mi_styler._translate(True, True)
assert len(ctx["head"]) == 1 # no index names and only one visible column headers
assert ctx["head"][0][1]["display_value"] == " "
@pytest.mark.parametrize("caption", [1, ("a", "b", "c"), (1, "s")])
def test_caption_raises(mi_styler, caption):
msg = "`caption` must be either a string or 2-tuple of strings."
with pytest.raises(ValueError, match=msg):
mi_styler.set_caption(caption)
def test_hiding_headers_over_index_no_sparsify():
# GH 43464
midx = MultiIndex.from_product([[1, 2], ["a", "a", "b"]])
df = DataFrame(9, index=midx, columns=[0])
ctx = df.style._translate(False, False)
assert len(ctx["body"]) == 6
ctx = df.style.hide((1, "a"), axis=0)._translate(False, False)
assert len(ctx["body"]) == 4
assert "row2" in ctx["body"][0][0]["class"]
def test_hiding_headers_over_columns_no_sparsify():
# GH 43464
midx = MultiIndex.from_product([[1, 2], ["a", "a", "b"]])
df = DataFrame(9, columns=midx, index=[0])
ctx = df.style._translate(False, False)
for ix in [(0, 1), (0, 2), (1, 1), (1, 2)]:
assert ctx["head"][ix[0]][ix[1]]["is_visible"] is True
ctx = df.style.hide((1, "a"), axis="columns")._translate(False, False)
for ix in [(0, 1), (0, 2), (1, 1), (1, 2)]:
assert ctx["head"][ix[0]][ix[1]]["is_visible"] is False
def test_get_level_lengths_mi_hidden():
# GH 43464
index = MultiIndex.from_arrays([[1, 1, 1, 2, 2, 2], ["a", "a", "b", "a", "a", "b"]])
expected = {
(0, 2): 1,
(0, 3): 1,
(0, 4): 1,
(0, 5): 1,
(1, 2): 1,
(1, 3): 1,
(1, 4): 1,
(1, 5): 1,
}
result = _get_level_lengths(
index,
sparsify=False,
max_index=100,
hidden_elements=[0, 1, 0, 1], # hidden element can repeat if duplicated index
)
tm.assert_dict_equal(result, expected)
def test_row_trimming_hide_index():
# gh 43703
df = DataFrame([[1], [2], [3], [4], [5]])
with option_context("styler.render.max_rows", 2):
ctx = df.style.hide([0, 1], axis="index")._translate(True, True)
assert len(ctx["body"]) == 3
for r, val in enumerate(["3", "4", "..."]):
assert ctx["body"][r][1]["display_value"] == val
def test_row_trimming_hide_index_mi():
# gh 44247
df = DataFrame([[1], [2], [3], [4], [5]])
df.index = MultiIndex.from_product([[0], [0, 1, 2, 3, 4]])
with option_context("styler.render.max_rows", 2):
ctx = df.style.hide([(0, 0), (0, 1)], axis="index")._translate(True, True)
assert len(ctx["body"]) == 3
# level 0 index headers (sparsified)
assert {"value": 0, "attributes": 'rowspan="2"', "is_visible": True}.items() <= ctx[
"body"
][0][0].items()
assert {"value": 0, "attributes": "", "is_visible": False}.items() <= ctx["body"][
1
][0].items()
assert {"value": "...", "is_visible": True}.items() <= ctx["body"][2][0].items()
for r, val in enumerate(["2", "3", "..."]):
assert ctx["body"][r][1]["display_value"] == val # level 1 index headers
for r, val in enumerate(["3", "4", "..."]):
assert ctx["body"][r][2]["display_value"] == val # data values
def test_col_trimming_hide_columns():
# gh 44272
df = DataFrame([[1, 2, 3, 4, 5]])
with option_context("styler.render.max_columns", 2):
ctx = df.style.hide([0, 1], axis="columns")._translate(True, True)
assert len(ctx["head"][0]) == 6 # blank, [0, 1 (hidden)], [2 ,3 (visible)], + trim
for c, vals in enumerate([(1, False), (2, True), (3, True), ("...", True)]):
assert ctx["head"][0][c + 2]["value"] == vals[0]
assert ctx["head"][0][c + 2]["is_visible"] == vals[1]
assert len(ctx["body"][0]) == 6 # index + 2 hidden + 2 visible + trimming col
def test_no_empty_apply(mi_styler):
# 45313
mi_styler.apply(lambda s: ["a:v;"] * 2, subset=[False, False])
mi_styler._compute()
| 36.879795
| 88
| 0.541609
|
016928020bd777ff5e6b1b0fa8fae3fe3fd4d165
| 9,032
|
py
|
Python
|
robosuite/devices/spacemouse.py
|
ElieAljalbout/robosuite
|
5092e805157ca7a255d3692f34a4d51599e72699
|
[
"MIT"
] | null | null | null |
robosuite/devices/spacemouse.py
|
ElieAljalbout/robosuite
|
5092e805157ca7a255d3692f34a4d51599e72699
|
[
"MIT"
] | null | null | null |
robosuite/devices/spacemouse.py
|
ElieAljalbout/robosuite
|
5092e805157ca7a255d3692f34a4d51599e72699
|
[
"MIT"
] | null | null | null |
"""Driver class for SpaceMouse controller.
This class provides a driver support to SpaceMouse on macOS.
In particular, we assume you are using a SpaceMouse Wireless by default.
To set up a new SpaceMouse controller:
1. Download and install driver from https://www.3dconnexion.com/service/drivers.html
2. Install hidapi library through pip
(make sure you run uninstall hid first if it is installed).
3. Make sure SpaceMouse is connected before running the script
4. (Optional) Based on the model of SpaceMouse, you might need to change the
vendor id and product id that correspond to the device.
For Linux support, you can find open-source Linux drivers and SDKs online.
See http://spacenav.sourceforge.net/
"""
import threading
import time
from collections import namedtuple
import numpy as np
try:
import hid
except ModuleNotFoundError as exc:
raise ImportError(
"Unable to load module hid, required to interface with SpaceMouse. "
"Only macOS is officially supported. Install the additional "
"requirements with `pip install -r requirements-extra.txt`"
) from exc
from robosuite.devices import Device
from robosuite.utils.transform_utils import rotation_matrix
AxisSpec = namedtuple("AxisSpec", ["channel", "byte1", "byte2", "scale"])
SPACE_MOUSE_SPEC = {
"x": AxisSpec(channel=1, byte1=1, byte2=2, scale=1),
"y": AxisSpec(channel=1, byte1=3, byte2=4, scale=-1),
"z": AxisSpec(channel=1, byte1=5, byte2=6, scale=-1),
"roll": AxisSpec(channel=1, byte1=7, byte2=8, scale=-1),
"pitch": AxisSpec(channel=1, byte1=9, byte2=10, scale=-1),
"yaw": AxisSpec(channel=1, byte1=11, byte2=12, scale=1),
}
def to_int16(y1, y2):
"""
Convert two 8 bit bytes to a signed 16 bit integer.
Args:
y1 (int): 8-bit byte
y2 (int): 8-bit byte
Returns:
int: 16-bit integer
"""
x = (y1) | (y2 << 8)
if x >= 32768:
x = -(65536 - x)
return x
def scale_to_control(x, axis_scale=350.0, min_v=-1.0, max_v=1.0):
"""
Normalize raw HID readings to target range.
Args:
x (int): Raw reading from HID
axis_scale (float): (Inverted) scaling factor for mapping raw input value
min_v (float): Minimum limit after scaling
max_v (float): Maximum limit after scaling
Returns:
float: Clipped, scaled input from HID
"""
x = x / axis_scale
x = min(max(x, min_v), max_v)
return x
def convert(b1, b2):
"""
Converts SpaceMouse message to commands.
Args:
b1 (int): 8-bit byte
b2 (int): 8-bit byte
Returns:
float: Scaled value from Spacemouse message
"""
return scale_to_control(to_int16(b1, b2))
class SpaceMouse(Device):
"""
A minimalistic driver class for SpaceMouse with HID library.
Note: Use hid.enumerate() to view all USB human interface devices (HID).
Make sure SpaceMouse is detected before running the script.
You can look up its vendor/product id from this method.
Args:
vendor_id (int): HID device vendor id
product_id (int): HID device product id
pos_sensitivity (float): Magnitude of input position command scaling
rot_sensitivity (float): Magnitude of scale input rotation commands scaling
"""
def __init__(self, vendor_id=9583, product_id=50735, pos_sensitivity=1.0, rot_sensitivity=1.0):
print("Opening SpaceMouse device")
self.device = hid.device()
self.device.open(vendor_id, product_id) # SpaceMouse
self.pos_sensitivity = pos_sensitivity
self.rot_sensitivity = rot_sensitivity
print("Manufacturer: %s" % self.device.get_manufacturer_string())
print("Product: %s" % self.device.get_product_string())
# 6-DOF variables
self.x, self.y, self.z = 0, 0, 0
self.roll, self.pitch, self.yaw = 0, 0, 0
self._display_controls()
self.single_click_and_hold = False
self._control = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self._reset_state = 0
self.rotation = np.array([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]])
self._enabled = False
# launch a new listener thread to listen to SpaceMouse
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
@staticmethod
def _display_controls():
"""
Method to pretty print controls.
"""
def print_command(char, info):
char += " " * (30 - len(char))
print("{}\t{}".format(char, info))
print("")
print_command("Control", "Command")
print_command("Right button", "reset simulation")
print_command("Left button (hold)", "close gripper")
print_command("Move mouse laterally", "move arm horizontally in x-y plane")
print_command("Move mouse vertically", "move arm vertically")
print_command("Twist mouse about an axis", "rotate arm about a corresponding axis")
print_command("ESC", "quit")
print("")
def _reset_internal_state(self):
"""
Resets internal state of controller, except for the reset signal.
"""
self.rotation = np.array([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]])
# Reset 6-DOF variables
self.x, self.y, self.z = 0, 0, 0
self.roll, self.pitch, self.yaw = 0, 0, 0
# Reset control
self._control = np.zeros(6)
# Reset grasp
self.single_click_and_hold = False
def start_control(self):
"""
Method that should be called externally before controller can
start receiving commands.
"""
self._reset_internal_state()
self._reset_state = 0
self._enabled = True
def get_controller_state(self):
"""
Grabs the current state of the 3D mouse.
Returns:
dict: A dictionary containing dpos, orn, unmodified orn, grasp, and reset
"""
dpos = self.control[:3] * 0.005 * self.pos_sensitivity
roll, pitch, yaw = self.control[3:] * 0.005 * self.rot_sensitivity
# convert RPY to an absolute orientation
drot1 = rotation_matrix(angle=-pitch, direction=[1.0, 0, 0], point=None)[:3, :3]
drot2 = rotation_matrix(angle=roll, direction=[0, 1.0, 0], point=None)[:3, :3]
drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.0], point=None)[:3, :3]
self.rotation = self.rotation.dot(drot1.dot(drot2.dot(drot3)))
return dict(
dpos=dpos,
rotation=self.rotation,
raw_drotation=np.array([roll, pitch, yaw]),
grasp=self.control_gripper,
reset=self._reset_state,
)
def run(self):
"""Listener method that keeps pulling new messages."""
t_last_click = -1
while True:
d = self.device.read(13)
if d is not None and self._enabled:
if d[0] == 1: ## readings from 6-DoF sensor
self.y = convert(d[1], d[2])
self.x = convert(d[3], d[4])
self.z = convert(d[5], d[6]) * -1.0
self.roll = convert(d[7], d[8])
self.pitch = convert(d[9], d[10])
self.yaw = convert(d[11], d[12])
self._control = [
self.x,
self.y,
self.z,
self.roll,
self.pitch,
self.yaw,
]
elif d[0] == 3: ## readings from the side buttons
# press left button
if d[1] == 1:
t_click = time.time()
elapsed_time = t_click - t_last_click
t_last_click = t_click
self.single_click_and_hold = True
# release left button
if d[1] == 0:
self.single_click_and_hold = False
# right button is for reset
if d[1] == 2:
self._reset_state = 1
self._enabled = False
self._reset_internal_state()
@property
def control(self):
"""
Grabs current pose of Spacemouse
Returns:
np.array: 6-DoF control value
"""
return np.array(self._control)
@property
def control_gripper(self):
"""
Maps internal states into gripper commands.
Returns:
float: Whether we're using single click and hold or not
"""
if self.single_click_and_hold:
return 1.0
return 0
if __name__ == "__main__":
space_mouse = SpaceMouse()
for i in range(100):
print(space_mouse.control, space_mouse.control_gripper)
time.sleep(0.02)
| 31.691228
| 99
| 0.580824
|
86c7e380cf0bfb30359cea5a8391517b944e4168
| 798
|
py
|
Python
|
source/FAST/Examples/Python/stream_from_clarius_ultrasound_scanner.py
|
andreped/FAST
|
361819190ea0ae5a2f068e7bd808a1c70af5a171
|
[
"BSD-2-Clause"
] | null | null | null |
source/FAST/Examples/Python/stream_from_clarius_ultrasound_scanner.py
|
andreped/FAST
|
361819190ea0ae5a2f068e7bd808a1c70af5a171
|
[
"BSD-2-Clause"
] | null | null | null |
source/FAST/Examples/Python/stream_from_clarius_ultrasound_scanner.py
|
andreped/FAST
|
361819190ea0ae5a2f068e7bd808a1c70af5a171
|
[
"BSD-2-Clause"
] | null | null | null |
## @example stream_from_clarius_ultrasound_scanner.py
# This example will stream images from a clarius ultrasound scanner, apply a non-local-means filter
# and display it in real-time. Note: Do run this example you have to have:
# * A Clarius ultrasound probe running with the research cast API enabled.
# * Unfreeze the ultrasond probe.
# * Your machine has to be connected to the Clarius probe wifi.
# * If on windows, disable the windows firewall, or add an exception.
import fast
streamer = fast.ClariusStreamer.create()
filter = fast.NonLocalMeans.create().connect(streamer)
renderer = fast.ImageRenderer.create().connect(streamer)
renderer2 = fast.ImageRenderer.create().connect(filter)
fast.DualViewWindow2D.create()\
.connectLeft(renderer)\
.connectRight(renderer2)\
.run()
| 38
| 99
| 0.769424
|
a80383a78d9daf42b5f9c8b91a96a2b29f3c09a0
| 81
|
py
|
Python
|
0/3/3004/3004.py
|
chr0m3/boj-codes
|
d71d0a22d0a3ae62c225f382442461275f56fe8f
|
[
"MIT"
] | 3
|
2017-07-08T16:29:06.000Z
|
2020-07-20T00:17:45.000Z
|
0/3/3004/3004.py
|
chr0m3/boj-codes
|
d71d0a22d0a3ae62c225f382442461275f56fe8f
|
[
"MIT"
] | null | null | null |
0/3/3004/3004.py
|
chr0m3/boj-codes
|
d71d0a22d0a3ae62c225f382442461275f56fe8f
|
[
"MIT"
] | 2
|
2017-11-20T14:06:06.000Z
|
2020-07-20T00:17:47.000Z
|
import math
n = int(input()) - 1
print((2 + (n // 2)) * (1 + math.ceil(n / 2)))
| 16.2
| 46
| 0.481481
|
3bd2e3468710e0566903e1522adcec563f2b7035
| 22,509
|
py
|
Python
|
spyne/server/twisted/http.py
|
florath/spyne-1
|
88d7b9b9020d5067b80927a30e229c3c5d64af85
|
[
"BSD-3-Clause"
] | null | null | null |
spyne/server/twisted/http.py
|
florath/spyne-1
|
88d7b9b9020d5067b80927a30e229c3c5d64af85
|
[
"BSD-3-Clause"
] | null | null | null |
spyne/server/twisted/http.py
|
florath/spyne-1
|
88d7b9b9020d5067b80927a30e229c3c5d64af85
|
[
"BSD-3-Clause"
] | null | null | null |
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The ``spyne.server.twisted`` module contains a server transport compatible
with the Twisted event loop. It uses the TwistedWebResource object as transport.
Also see the twisted examples in the examples directory of the source
distribution.
If you want to have a hard-coded URL in the wsdl document, this is how to do
it: ::
resource = TwistedWebResource(...)
resource.http_transport.doc.wsdl11.build_interface_document("http://example.com")
This is not strictly necessary. If you don't do this, Spyne will get the URL
from the first request, build the wsdl on-the-fly and cache it as a string in
memory for later requests. However, if you want to make sure you only have this
url on the WSDL, this is how to do it. Note that if your client takes the
information in wsdl seriously, all requests will go to the designated url above
which can make testing a bit difficult. Use in moderation.
"""
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import re
import cgi
import threading
from os import fstat
from mmap import mmap
from inspect import isclass
from collections import namedtuple
from twisted.web import static
from twisted.web.server import NOT_DONE_YET, Request
from twisted.web.resource import Resource, NoResource, ForbiddenResource
from twisted.web.static import getTypeAndEncoding
from twisted.python.log import err
from twisted.python.failure import Failure
from twisted.internet import reactor
from twisted.internet.task import deferLater
from twisted.internet.defer import Deferred
from twisted.internet.threads import deferToThread
from spyne import Redirect
from spyne.application import logger_server
from spyne.application import get_fault_string_from_exception
from spyne.error import InternalError
from spyne.auxproc import process_contexts
from spyne.const.ansi_color import LIGHT_GREEN
from spyne.const.ansi_color import END_COLOR
from spyne.const.http import HTTP_404, HTTP_200
from spyne.model import PushBase, File, ComplexModelBase
from spyne.model.fault import Fault
from spyne.protocol.http import HttpRpc
from spyne.server.http import HttpBase
from spyne.server.http import HttpMethodContext
from spyne.server.http import HttpTransportContext
from spyne.server.twisted._base import Producer
from spyne.util.six import text_type, string_types
from spyne.util.six.moves.urllib.parse import unquote
def _render_file(file, request):
"""
Begin sending the contents of this L{File} (or a subset of the
contents, based on the 'range' header) to the given request.
"""
file.restat(False)
if file.type is None:
file.type, file.encoding = getTypeAndEncoding(file.basename(),
file.contentTypes,
file.contentEncodings,
file.defaultType)
if not file.exists():
return file.childNotFound.render(request)
if file.isdir():
return file.redirect(request)
request.setHeader('accept-ranges', 'bytes')
try:
fileForReading = file.openForReading()
except IOError as e:
import errno
if e[0] == errno.EACCES:
return ForbiddenResource().render(request)
else:
raise
#if request.setLastModified(file.getmtime()) is CACHED:
# return ''
producer = file.makeProducer(request, fileForReading)
if request.method == 'HEAD':
return ''
producer.start()
# and make sure the connection doesn't get closed
return NOT_DONE_YET
def _set_response_headers(request, headers):
retval = []
for k, v in headers.items():
if isinstance(v, (list, tuple)):
request.responseHeaders.setRawHeaders(k, v)
else:
request.responseHeaders.setRawHeaders(k, [v])
return retval
def _reconstruct_url(request):
server_name = request.getRequestHostname()
server_port = request.getHost().port
if (bool(request.isSecure()), server_port) not in [(True, 443), (False, 80)]:
server_name = '%s:%d' % (server_name, server_port)
if request.isSecure():
url_scheme = 'https'
else:
url_scheme = 'http'
return ''.join([url_scheme, "://", server_name, request.uri])
class TwistedHttpTransportContext(HttpTransportContext):
def set_mime_type(self, what):
if isinstance(what, text_type):
what = what.encode('ascii', errors='replace')
super(TwistedHttpTransportContext, self).set_mime_type(what)
self.req.setHeader('Content-Type', what)
def get_cookie(self, key):
return self.req.getCookie(key)
def get_path(self):
return self.req.URLPath().path
def get_path_and_qs(self):
return self.req.uri
def get_request_method(self):
return self.req.method
def get_request_content_type(self):
return self.req.getHeader("Content-Type")
class TwistedHttpMethodContext(HttpMethodContext):
default_transport_context = TwistedHttpTransportContext
class TwistedHttpTransport(HttpBase):
def __init__(self, app, chunked=False, max_content_length=2 * 1024 * 1024,
block_length=8 * 1024):
super(TwistedHttpTransport, self).__init__(app, chunked=chunked,
max_content_length=max_content_length, block_length=block_length)
self.reactor_thread = None
def _cb():
self.reactor_thread = threading.current_thread()
deferLater(reactor, 0, _cb)
def pusher_init(self, p_ctx, gen, _cb_push_finish, pusher, interim):
if pusher.orig_thread != self.reactor_thread:
return deferToThread(super(TwistedHttpTransport, self).pusher_init,
p_ctx, gen, _cb_push_finish, pusher, interim)
return super(TwistedHttpTransport, self).pusher_init(
p_ctx, gen, _cb_push_finish, pusher, interim)
@staticmethod
def set_out_document_push(ctx):
class _ISwearImAGenerator(object):
def send(self, data):
if not data: return
ctx.out_stream.write(data)
ctx.out_document = _ISwearImAGenerator()
def pusher_try_close(self, ctx, pusher, retval):
# the whole point of this function is to call ctx.out_stream.finish()
# when a *root* pusher has no more data to send. interim pushers don't
# have to close anything.
if isinstance(retval, Deferred):
def _eb_push_close(f):
assert isinstance(f, Failure)
logger.error(f.getTraceback())
subretval = super(TwistedHttpTransport, self) \
.pusher_try_close(ctx, pusher, retval)
if not pusher.interim:
ctx.out_stream.finish()
return subretval
def _cb_push_close(r):
def _eb_inner(f):
if not pusher.interim:
ctx.out_stream.finish()
return f
if not isinstance(r, Deferred):
retval = super(TwistedHttpTransport, self) \
.pusher_try_close(ctx, pusher, r)
if not pusher.interim:
ctx.out_stream.finish()
return retval
return r \
.addCallback(_cb_push_close) \
.addErrback(_eb_inner)
return retval \
.addCallback(_cb_push_close) \
.addErrback(_eb_push_close) \
.addErrback(err)
if not pusher.interim:
retval = ctx.out_stream.finish()
super(TwistedHttpTransport, self).pusher_try_close(ctx, pusher, retval)
return retval
def decompose_incoming_envelope(self, prot, ctx, message):
"""This function is only called by the HttpRpc protocol to have the
twisted web's Request object is parsed into ``ctx.in_body_doc`` and
``ctx.in_header_doc``.
"""
request = ctx.in_document
assert isinstance(request, Request)
ctx.in_header_doc = dict(request.requestHeaders.getAllRawHeaders())
ctx.in_body_doc = request.args
for fi in ctx.transport.file_info:
assert isinstance(fi, _FileInfo)
data = request.args.get(fi.field_name, None)
if data is not None and fi.file_name is not None:
ctx.in_body_doc[fi.field_name] = \
[File.Value(
name=fi.file_name,
type=fi.file_type,
data=fi.data)]
# this is a huge hack because twisted seems to take the slashes in urls
# too seriously.
postpath = getattr(request, 'realpostpath', None)
if postpath is None:
postpath = request.path
params = self.match_pattern(ctx, request.method, postpath,
request.getHeader('Host'))
if ctx.method_request_string is None: # no pattern match
ctx.method_request_string = '{%s}%s' % (self.app.interface.get_tns(),
request.path.rsplit('/', 1)[-1])
logger.debug("%sMethod name: %r%s" % (LIGHT_GREEN,
ctx.method_request_string, END_COLOR))
for k, v in params.items():
val = ctx.in_body_doc.get(k, [])
val.extend(v)
ctx.in_body_doc[k] = val
r = {}
for k,v in ctx.in_body_doc.items():
l = []
for v2 in v:
if isinstance(v2, string_types):
l.append(unquote(v2))
else:
l.append(v2)
r[k] = l
ctx.in_body_doc = r
# This is consistent with what server.wsgi does.
if request.method in ('POST', 'PUT', 'PATCH'):
for k, v in ctx.in_body_doc.items():
if v == ['']:
ctx.in_body_doc[k] = [None]
FIELD_NAME_RE = re.compile(r'name="([^"]+)"')
FILE_NAME_RE = re.compile(r'filename="([^"]+)"')
_FileInfo = namedtuple("_FileInfo", "field_name file_name file_type data")
def _get_file_info(ctx):
"""We need this hack because twisted doesn't offer a way to get file name
from Content-Disposition header.
"""
retval = []
request = ctx.transport.req
headers = request.getAllHeaders()
content_type = headers.get('content-type', None)
if content_type is None:
return retval
img = cgi.FieldStorage(
fp=request.content,
headers=ctx.in_header_doc,
environ={
'REQUEST_METHOD': request.method,
'CONTENT_TYPE': content_type,
}
)
try:
keys = img.keys()
except TypeError:
return retval
for k in keys:
field = img[k]
file_type = field.type
file_name = field.disposition_options.get('filename', None)
if file_name is not None:
retval.append(_FileInfo(k, file_name, file_type,
[mmap(field.file.fileno(), 0)]))
return retval
def _has_fd(istr):
if not hasattr(istr, 'fileno'):
return False
try:
istr.fileno()
except IOError:
return False
else:
return True
class TwistedWebResource(Resource):
"""A server transport that exposes the application as a twisted web
Resource.
"""
def __init__(self, app, chunked=False, max_content_length=2 * 1024 * 1024,
block_length=8 * 1024, prepath=None):
Resource.__init__(self)
self.app = app
self.http_transport = TwistedHttpTransport(app, chunked,
max_content_length, block_length)
self._wsdl = None
self.prepath = prepath
def getChildWithDefault(self, path, request):
# this hack is necessary because twisted takes the slash character in
# http requests too seriously. i.e. it insists that a leaf node can only
# handle the last path fragment.
if self.prepath is None:
request.realprepath = '/' + '/'.join(request.prepath)
else:
if not self.prepath.startswith('/'):
request.realprepath = '/' + self.prepath
else:
request.realprepath = self.prepath
if path in self.children:
retval = self.children[path]
else:
retval = self.getChild(path, request)
if isinstance(retval, NoResource):
retval = self
else:
request.realpostpath = request.path[len(request.realprepath):]
return retval
def render(self, request):
if request.method == 'GET' and (
request.uri.endswith('.wsdl') or request.uri.endswith('?wsdl')):
return self.__handle_wsdl_request(request)
return self.handle_rpc(request)
def handle_rpc_error(self, p_ctx, others, error, request):
logger.error(error)
resp_code = p_ctx.transport.resp_code
# If user code set its own response code, don't touch it.
if resp_code is None:
resp_code = p_ctx.out_protocol.fault_to_http_response_code(error)
request.setResponseCode(int(resp_code[:3]))
_set_response_headers(request, p_ctx.transport.resp_headers)
# In case user code set its own out_* attributes before failing.
p_ctx.out_document = None
p_ctx.out_string = None
p_ctx.out_object = error
self.http_transport.get_out_string(p_ctx)
retval = ''.join(p_ctx.out_string)
p_ctx.close()
process_contexts(self.http_transport, others, p_ctx, error=error)
return retval
def handle_rpc(self, request):
initial_ctx = TwistedHttpMethodContext(self.http_transport, request,
self.http_transport.app.out_protocol.mime_type)
if _has_fd(request.content):
f = request.content
# it's best to avoid empty mappings.
if fstat(f.fileno()).st_size == 0:
initial_ctx.in_string = ['']
else:
initial_ctx.in_string = [mmap(f.fileno(), 0)]
else:
request.content.seek(0)
initial_ctx.in_string = [request.content.read()]
initial_ctx.transport.file_info = _get_file_info(initial_ctx)
contexts = self.http_transport.generate_contexts(initial_ctx)
p_ctx, others = contexts[0], contexts[1:]
p_ctx.active = True
p_ctx.out_stream = request
# TODO: Rate limiting
p_ctx.active = True
if p_ctx.in_error:
return self.handle_rpc_error(p_ctx, others, p_ctx.in_error, request)
else:
self.http_transport.get_in_object(p_ctx)
if p_ctx.in_error:
return self.handle_rpc_error(p_ctx, others, p_ctx.in_error,
request)
self.http_transport.get_out_object(p_ctx)
if p_ctx.out_error:
return self.handle_rpc_error(p_ctx, others, p_ctx.out_error,
request)
ret = p_ctx.out_object[0]
retval = NOT_DONE_YET
if isinstance(ret, Deferred):
ret.addCallback(_cb_deferred, request, p_ctx, others, resource=self)
ret.addErrback(_eb_deferred, request, p_ctx, others, resource=self)
elif isinstance(ret, PushBase):
self.http_transport.init_root_push(ret, p_ctx, others)
else:
try:
retval = _cb_deferred(p_ctx.out_object, request, p_ctx, others,
self, cb=False)
except Exception as e:
logger_server.exception(e)
_eb_deferred(Failure(), request, p_ctx, others, resource=self)
return retval
def __handle_wsdl_request(self, request):
ctx = TwistedHttpMethodContext(self.http_transport, request,
"text/xml; charset=utf-8")
url = _reconstruct_url(request)
if self.http_transport.doc.wsdl11 is None:
return HTTP_404
if self._wsdl is None:
self._wsdl = self.http_transport.doc.wsdl11.get_interface_document()
ctx.transport.wsdl = self._wsdl
_set_response_headers(request, ctx.transport.resp_headers)
try:
if self._wsdl is None:
self.http_transport.doc.wsdl11.build_interface_document(url)
ctx.transport.wsdl = self._wsdl = \
self.http_transport.doc.wsdl11.get_interface_document()
assert ctx.transport.wsdl is not None
self.http_transport.event_manager.fire_event('wsdl', ctx)
return ctx.transport.wsdl
except Exception as e:
ctx.transport.wsdl_error = e
self.http_transport.event_manager.fire_event('wsdl_exception', ctx)
raise
finally:
ctx.close()
def _cb_request_finished(retval, request, p_ctx):
request.finish()
p_ctx.close()
def _eb_request_finished(retval, request, p_ctx):
err(request)
p_ctx.close()
request.finish()
def _cb_deferred(ret, request, p_ctx, others, resource, cb=True):
### set response headers
resp_code = p_ctx.transport.resp_code
# If user code set its own response code, don't touch it.
if resp_code is None:
resp_code = HTTP_200
request.setResponseCode(int(resp_code[:3]))
_set_response_headers(request, p_ctx.transport.resp_headers)
### normalize response data
om = p_ctx.descriptor.out_message
single_class = None
if cb:
if p_ctx.descriptor.is_out_bare():
p_ctx.out_object = [ret]
elif (not issubclass(om, ComplexModelBase)) or len(om._type_info) <= 1:
p_ctx.out_object = [ret]
if len(om._type_info) == 1:
single_class, = om._type_info.values()
else:
p_ctx.out_object = ret
else:
p_ctx.out_object = ret
### start response
retval = NOT_DONE_YET
if isinstance(ret, PushBase):
pass
elif ((isclass(om) and issubclass(om, File)) or
(isclass(single_class) and issubclass(single_class, File))) and \
isinstance(p_ctx.out_protocol, HttpRpc) and \
getattr(ret, 'abspath', None) is not None:
file = static.File(ret.abspath,
defaultType=str(ret.type) or 'application/octet-stream')
retval = _render_file(file, request)
if retval != NOT_DONE_YET and cb:
request.write(retval)
request.finish()
p_ctx.close()
else:
def _close_only_context(ret):
p_ctx.close()
request.notifyFinish().addCallback(_close_only_context)
request.notifyFinish().addErrback(_eb_request_finished, request, p_ctx)
else:
ret = resource.http_transport.get_out_string(p_ctx)
if not isinstance(ret, Deferred):
producer = Producer(p_ctx.out_string, request)
producer.deferred.addCallback(_cb_request_finished, request, p_ctx)
producer.deferred.addErrback(_eb_request_finished, request, p_ctx)
try:
request.registerProducer(producer, False)
except Exception as e:
logger_server.exception(e)
_eb_deferred(Failure(), request, p_ctx, others, resource)
else:
def _cb(ret):
if isinstance(ret, Deferred):
return ret \
.addCallback(_cb) \
.addErrback(_eb_request_finished, request, p_ctx)
else:
return _cb_request_finished(ret, request, p_ctx)
ret \
.addCallback(_cb) \
.addErrback(_eb_request_finished, request, p_ctx)
process_contexts(resource.http_transport, others, p_ctx)
return retval
def _eb_deferred(ret, request, p_ctx, others, resource):
app = p_ctx.app
# DRY this with what's in Application.process_request
if issubclass(ret.type, Redirect):
try:
ret.value.do_redirect()
# Now that the processing is switched to the outgoing message,
# point ctx.protocol to ctx.out_protocol
p_ctx.protocol = p_ctx.outprot_ctx
_cb_deferred(None, request, p_ctx, others, resource, cb=False)
p_ctx.fire_event('method_redirect')
except Exception as e:
logger_server.exception(e)
p_ctx.out_error = Fault('Server', get_fault_string_from_exception(e))
p_ctx.fire_event('method_redirect_exception')
elif issubclass(ret.type, Fault):
p_ctx.out_error = ret.value
ret = resource.handle_rpc_error(p_ctx, others, p_ctx.out_error, request)
p_ctx.fire_event('method_exception_object')
request.write(ret)
else:
p_ctx.out_error = ret.value
ret.printTraceback()
p_ctx.out_error = InternalError(ret.value)
p_ctx.fire_event('method_exception_object')
request.finish()
| 33.101471
| 85
| 0.613444
|
616dd0b2d21a5988d748bfc9eb80b60f013b6394
| 1,369
|
py
|
Python
|
src/drivetools.py
|
jhanc97/backend
|
e747aa2d779b8856f37d0ca400164791ee45b61a
|
[
"MIT"
] | null | null | null |
src/drivetools.py
|
jhanc97/backend
|
e747aa2d779b8856f37d0ca400164791ee45b61a
|
[
"MIT"
] | null | null | null |
src/drivetools.py
|
jhanc97/backend
|
e747aa2d779b8856f37d0ca400164791ee45b61a
|
[
"MIT"
] | 1
|
2021-06-02T07:23:51.000Z
|
2021-06-02T07:23:51.000Z
|
def driveIter(root, drive):
params = {
"pageToken": None,
"supportsAllDrives": True,
"includeItemsFromAllDrives": True,
"fields": "files(id,name,mimeType), incompleteSearch, nextPageToken",
"q": "'%s' in parents and trashed = false and (mimeType = 'application/vnd.google-apps.folder' or mimeType contains 'video')"
% (root["id"]),
"orderBy": "name",
}
while True:
response = drive.files().list(**params).execute()
for file in response["files"]:
yield file
try:
params["pageToken"] = response["nextPageToken"]
except KeyError:
return
def driveWalk(root, drive, walk):
if root["mimeType"] == "application/vnd.google-apps.folder":
for item in driveIter(root, drive):
driveWalk(item, drive, walk)
elif "video" in root["mimeType"]:
root["type"] = "file"
walk["children"].append(root)
else:
return
return walk
def driveTree(root, drive):
if root["mimeType"] == "application/vnd.google-apps.folder":
tree = root
tree["type"] = "directory"
tree["children"] = [driveTree(item, drive) for item in driveIter(root, drive)]
elif "video" in root["mimeType"]:
tree = root
tree["type"] = "file"
else:
return
return tree
| 31.113636
| 133
| 0.577064
|
76ea90726c47ae425d358ce1a5376063f23c5cc3
| 6,886
|
py
|
Python
|
tests/test_searchpath.py
|
nfwprod/diffjson
|
ad7ea15c42e25f16f610491e95da3d0b2e35d654
|
[
"MIT"
] | 1
|
2021-02-13T08:24:52.000Z
|
2021-02-13T08:24:52.000Z
|
tests/test_searchpath.py
|
nfwstg/diffjson
|
52cb24e5629797b32c23e7971d36d111a6817121
|
[
"MIT"
] | 3
|
2021-03-13T06:44:27.000Z
|
2021-04-21T16:45:43.000Z
|
tests/test_searchpath.py
|
nfwstg/diffjson
|
52cb24e5629797b32c23e7971d36d111a6817121
|
[
"MIT"
] | 4
|
2021-03-13T06:39:30.000Z
|
2021-04-24T04:52:36.000Z
|
import pytest
import yaml
import diffjson
class TestSearchpathClasses(object):
def test_init_NodenameRoot(self):
n = diffjson.NodenameRoot()
expected = '/'
assert str(n) == expected
def test_init_NodenameAsterisk(self):
n = diffjson.NodenameAsterisk()
expected = '*'
assert str(n) == expected
def test_init_NodenameDescendant(self):
n = diffjson.NodenameDescendant()
expected = ''
assert str(n) == expected
def test_init_NodenameParent(self):
n = diffjson.NodenameParent()
expected = '..'
assert str(n) == expected
def test_init_NodenameSelf(self):
n = diffjson.NodenameSelf()
expected = '.'
assert str(n) == expected
def test_init_NodenameKey(self):
n = diffjson.NodenameKey('key')
expected = 'key'
assert str(n) == expected
def test_init_NodenameIndex(self):
n = diffjson.NodenameIndex(1)
expected = '[1]'
assert str(n) == expected
def test_init_LocationStep(self):
l = diffjson.LocationStep(diffjson.NodenameRoot())
expected = '/'
assert str(l) == expected
def test_init_LocationPath(self):
lp = diffjson.LocationPath(
[diffjson.LocationStep(diffjson.NodenameRoot())])
expected = '/'
assert str(lp) == expected
def test_init_Predicate(self):
plp = diffjson.LocationPath(
[diffjson.LocationStep(diffjson.NodenameKey('key'))])
p = diffjson.Predicate(plp, 'predicate')
expected = 'key=predicate'
assert str(p) == expected
def test_init_LocationStepWithPredicates(self):
plp = diffjson.LocationPath(
[diffjson.LocationStep(diffjson.NodenameKey('key'))])
p = diffjson.Predicate(plp, 'predicate')
ps = diffjson.Predicates([p])
l = diffjson.LocationStep(diffjson.NodenameRoot(), ps)
expected = '/[key=predicate]'
assert str(l) == expected
# Functions
def test_init_LocationPath_current(self):
lp = diffjson.LocationPath([
diffjson.LocationStep(diffjson.NodenameRoot()),
diffjson.LocationStep(diffjson.NodenameKey('key01')),
diffjson.LocationStep(diffjson.NodenameKey('key02')),
])
expected = '/'
assert str(lp.current()) == expected
def test_init_LocationPath_branch(self):
lp = diffjson.LocationPath([
diffjson.LocationStep(diffjson.NodenameRoot()),
diffjson.LocationStep(diffjson.NodenameKey('key01')),
diffjson.LocationStep(diffjson.NodenameKey('key02')),
])
expected = 'key01/key02'
assert str(lp.branch()) == expected
class TestSearchpathParses(object):
def test_parse_simple01(self):
"""
Parser must parse input string and output appropriate LocationPath instance.
"""
expected = diffjson.LocationPath([
diffjson.LocationStep(diffjson.NodenameRoot()),
diffjson.LocationStep(diffjson.NodenameKey('branch01')),
diffjson.LocationStep(diffjson.NodenameKey('b01-01')),
])
pathstring = '/branch01/b01-01'
p = diffjson.parse(pathstring)
assert p == expected
assert str(p) == pathstring
def test_parse_complex01(self):
"""
Parser must parse input string and output appropriate LocationPath instance.
"""
expected = diffjson.LocationPath([
diffjson.LocationStep(diffjson.NodenameRoot()),
diffjson.LocationStep(diffjson.NodenameKey('branch.01-02_03@example.net')),
diffjson.LocationStep(diffjson.NodenameKey('b01-01')),
])
pathstring = '/branch.01-02_03@example.net/b01-01'
p = diffjson.parse(pathstring)
assert p == expected
assert str(p) == pathstring
def test_parse_with_index(self):
"""
Parser with NodeIndex.
"""
expected = diffjson.LocationPath([
diffjson.LocationStep(diffjson.NodenameRoot()),
diffjson.LocationStep(diffjson.NodenameKey('branch01')),
diffjson.LocationStep(diffjson.NodenameKey('b01-01')),
diffjson.LocationStep(diffjson.NodenameIndex(1)),
diffjson.LocationStep(diffjson.NodenameKey('b01-01-01')),
])
pathstring = '/branch01/b01-01/[1]/b01-01-01'
p = diffjson.parse(pathstring)
assert p == expected
assert str(p) == pathstring
def test_parse_with_asterisk(self):
"""
Parser with Asterisk.
"""
expected = diffjson.LocationPath([
diffjson.LocationStep(diffjson.NodenameRoot()),
diffjson.LocationStep(diffjson.NodenameKey('branch01')),
diffjson.LocationStep(diffjson.NodenameAsterisk()),
diffjson.LocationStep(diffjson.NodenameKey('b01-01-01')),
])
pathstring = '/branch01/*/b01-01-01'
p = diffjson.parse(pathstring)
assert p == expected
assert str(p) == pathstring
def test_parse_with_decendant(self):
"""
Parser with Descendant.
"""
expected = diffjson.LocationPath([
diffjson.LocationStep(diffjson.NodenameRoot()),
diffjson.LocationStep(diffjson.NodenameKey('branch01')),
diffjson.LocationStep(diffjson.NodenameDescendant()),
diffjson.LocationStep(diffjson.NodenameKey('branch01-01-01')),
])
pathstring = '/branch01//branch01-01-01'
p = diffjson.parse(pathstring)
assert p == expected
assert str(p) == pathstring
def test_parse_with_parent(self):
"""
Parser with Parent.
"""
expected = diffjson.LocationPath([
diffjson.LocationStep(diffjson.NodenameRoot()),
diffjson.LocationStep(diffjson.NodenameKey('branch01')),
diffjson.LocationStep(diffjson.NodenameKey('b01-01')),
diffjson.LocationStep(diffjson.NodenameParent()),
diffjson.LocationStep(diffjson.NodenameKey('branch01-02')),
])
pathstring = '/branch01/b01-01/../branch01-02'
p = diffjson.parse(pathstring)
assert p == expected
assert str(p) == pathstring
def test_parse_with_self(self):
"""
Parser with Self.
"""
expected = diffjson.LocationPath([
diffjson.LocationStep(diffjson.NodenameRoot()),
diffjson.LocationStep(diffjson.NodenameKey('branch01')),
diffjson.LocationStep(diffjson.NodenameSelf()),
diffjson.LocationStep(diffjson.NodenameKey('b01-01')),
])
pathstring = '/branch01/./b01-01'
p = diffjson.parse(pathstring)
assert p == expected
assert str(p) == pathstring
| 32.027907
| 87
| 0.612257
|
45e18cfce83011f2251c5fb2bcd1403b35c6595b
| 16,500
|
py
|
Python
|
lightning_base.py
|
ksboy/ccks3
|
c500af33b6b879751ea04ce5fab456b01db9868c
|
[
"Apache-2.0"
] | 1
|
2021-07-14T06:30:20.000Z
|
2021-07-14T06:30:20.000Z
|
lightning_base.py
|
ksboy/ccks3
|
c500af33b6b879751ea04ce5fab456b01db9868c
|
[
"Apache-2.0"
] | null | null | null |
lightning_base.py
|
ksboy/ccks3
|
c500af33b6b879751ea04ce5fab456b01db9868c
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
logger = logging.getLogger(__name__)
MODEL_MODES = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeq2SeqLM,
"translation": AutoModelForSeq2SeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
arg_to_scheduler = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
arg_to_scheduler_choices = sorted(arg_to_scheduler.keys())
arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class BaseTransformer(pl.LightningModule):
def __init__(
self,
hparams: argparse.Namespace,
num_labels=None,
mode="base",
config=None,
tokenizer=None,
model=None,
**config_kwargs
):
"""Initialize a model, tokenizer and config."""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
self.config = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,
**({"num_labels": num_labels} if num_labels is not None else {}),
cache_dir=cache_dir,
**config_kwargs,
)
else:
self.config: PretrainedConfig = config
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,
cache_dir=cache_dir,
)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.model_type = MODEL_MODES[mode]
if model is None:
self.model = self.model_type.from_pretrained(
self.hparams.model_name_or_path,
from_tf=bool(".ckpt" in self.hparams.model_name_or_path),
config=self.config,
cache_dir=cache_dir,
)
else:
self.model = model
def load_hf_checkpoint(self, *args, **kwargs):
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
optimizer = Adafactor(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False
)
else:
optimizer = AdamW(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon
)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return [optimizer], [scheduler]
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
@property
def total_steps(self) -> int:
"""The number of total training steps that will be run. Used for lr scheduler purposes."""
num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores
effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
dataset_size = len(self.train_loader.dataset)
return (dataset_size / effective_batch_size) * self.hparams.max_epochs
def setup(self, stage):
if stage == "fit":
self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True)
def get_dataloader(self, type_path, batch_size, shuffle=False):
raise NotImplementedError("You must implement this for your task")
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(
self.hparams.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(),
str(self.hparams.max_seq_length),
),
)
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
save_path = self.output_dir.joinpath("checkpoint-best")
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--encoder_layerdrop",
type=float,
help="Encoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--decoder_layerdrop",
type=float,
help="Decoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--dropout",
type=float,
help="Dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--attention_dropout",
type=float,
help="Attention dropout probability (Optional). Goes into model.config",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--lr_scheduler",
default="linear",
choices=arg_to_scheduler_choices,
metavar=arg_to_scheduler_metavar,
type=str,
help="Learning rate scheduler",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int)
parser.add_argument("--train_batch_size", "--per_gpu_train_batch_size", default=32, type=int)
parser.add_argument("--eval_batch_size", "--per_gpu_eval_batch_size", default=32, type=int)
parser.add_argument("--adafactor", action="store_true")
class LoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lr_scheduler = trainer.lr_schedulers[0]["scheduler"]
lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(lrs)
def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Validation results *****")
metrics = trainer.callback_metrics
# Log results
# checkpoint_best_path = os.path.join(pl_module.hparams.output_dir, "checkpoint-best")
# if not os.path.exists(checkpoint_best_path): os.makedirs(checkpoint_best_path)
# output_eval_results_file = os.path.join(pl_module.hparams.output_dir, "eval_results.txt")
# with open(output_eval_results_file, "w") as writer:
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
# writer.write("{} = {}\n".format(key, str(metrics[key])))
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Test results *****")
metrics = trainer.callback_metrics
# Log and save results to file
# checkpoint_best_path = os.path.join(pl_module.hparams.output_dir, "checkpoint-best")
# if not os.path.exists(checkpoint_best_path): os.makedirs(checkpoint_best_path)
# output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
# with open(output_test_results_file, "w") as writer:
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
# writer.write("{} = {}\n".format(key, str(metrics[key])))
def add_generic_args(parser, root_dir) -> None:
# TODO(SS): allow all pl args? parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O2",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int)
parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--gradient_accumulation_steps",
dest="accumulate_grad_batches",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
def IntorFloat(string):
if '.' in string: return float(string)
else: return int(string)
parser.add_argument(
# "--every_n_train_steps",
"--val_check_interval",
"--logging_steps",
"--save_steps",
default=1.0,
type=IntorFloat, help="How often to check the validation set. Use float to check within a training epoch \
use int to check every n steps (batches)."
)
parser.add_argument(
"--gpus",
default=1,
type=int,
help="The number of GPUs allocated for this, it is by default 0 meaning none",
)
parser.add_argument(
"--monitor",
default='f1',
type=str,
help="CallBacks monitor Metric.",
)
parser.add_argument("--early_stop", default=4, type=int,
help="early stop when metric does not increases any more")
def generic_train(
model: BaseTransformer,
args: argparse.Namespace,
logger=True, # can pass WandbLogger() here
extra_callbacks=[],
checkpoint_callback=None,
early_stopping_callback=None,
logging_callback=None,
**extra_train_kwargs
):
pl.seed_everything(args.seed)
# init model
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
# add custom checkpoints
if checkpoint_callback is None:
checkpoint_callback = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir, prefix="checkpoint", monitor=args.monitor, mode="max", save_top_k=1
)
if logging_callback is None:
logging_callback = LoggingCallback()
if early_stopping_callback is None:
early_stopping_callback = pl.callbacks.EarlyStopping(patience=args.early_stop, monitor=args.monitor, mode="max")
train_params = {}
# TODO: remove with PyTorch 1.6 since pl uses native amp
if args.fp16:
train_params["precision"] = 16
train_params["amp_level"] = args.fp16_opt_level
if args.gpus > 1:
train_params["distributed_backend"] = "ddp"
train_params["accumulate_grad_batches"] = args.accumulate_grad_batches
train_params["val_check_interval"] = args.val_check_interval
trainer = pl.Trainer.from_argparse_args(
args,
default_root_dir=odir,
weights_summary=None,
callbacks=[logging_callback, early_stopping_callback] + extra_callbacks,
logger=logger,
checkpoint_callback=checkpoint_callback,
**train_params,
)
if args.do_train:
trainer.fit(model)
return trainer
| 38.732394
| 120
| 0.647697
|
4c8514cd2a30866d29e83ae612c6a0cf14d32abd
| 5,759
|
py
|
Python
|
config_dialog.py
|
shuming2/ONU_POWER_ANALYSIS
|
faa6603d3f5b7cd6e6d3dd9851e4d1b61bd7e8fd
|
[
"Apache-2.0"
] | null | null | null |
config_dialog.py
|
shuming2/ONU_POWER_ANALYSIS
|
faa6603d3f5b7cd6e6d3dd9851e4d1b61bd7e8fd
|
[
"Apache-2.0"
] | 2
|
2018-08-16T04:26:39.000Z
|
2018-08-18T12:33:37.000Z
|
config_dialog.py
|
shuming2/ONU_POWER_ANALYSIS
|
faa6603d3f5b7cd6e6d3dd9851e4d1b61bd7e8fd
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import sys
import tkinter
from tkinter import ttk, Spinbox
class ConfigDialog(tkinter.Toplevel):
def __init__(self, parent):
super().__init__()
self.transient(parent)
self.grab_set()
self.geometry("+%d+%d" % (parent.winfo_rootx() + 50, parent.winfo_rooty() + 50))
self.resizable(0, 0)
icopath = self._resource_path(r'pic/panda.ico')
if os.path.exists(icopath):
self.iconbitmap(icopath)
self.frame = ttk.Frame(self)
self.ok_button = tkinter.Button(self.frame, text="确定")
self.cancel_button = tkinter.Button(self.frame, text="取消", command=self.destroy)
def _set_config(self, config_dict=None):
if config_dict:
config_path = self._resource_path(r'config.py')
with open(config_path, 'r') as config_file:
lines = config_file.readlines()
with open(config_path, 'w') as new_config_file:
for line in lines:
config_name = re.sub(r' = .*', '', line.strip())
lower_config_name = str.lower(config_name)
if lower_config_name in config_dict:
new_config_file.write('{} = {}\n'.format(config_name, config_dict[lower_config_name]))
else:
new_config_file.write(line)
self.destroy()
def _get_config(self, config_name_lst=None):
config_value_lst = []
config_path = self._resource_path(r'config.py')
with open(config_path, 'r') as config_file:
for line in config_file:
line = line.strip()
config_name = re.sub(r' = .*', '', line)
if config_name_lst:
lower_config_name = str.lower(config_name)
if lower_config_name in config_name_lst:
config_value = line.split('=')[1].strip().strip("'")
config_value_lst.append(config_value)
return config_value_lst
@staticmethod
def _resource_path(relative):
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, relative)
return os.path.join(relative)
class AlertConfigDialog(ConfigDialog):
def __init__(self, parent):
super().__init__(parent)
self.title('告警设置')
self.alert_threshold_label = ttk.Label(self.frame, text='告警阈值:')
self.alert_threshold_value = tkinter.StringVar()
self.alert_threshold_spinbox = Spinbox(self.frame, from_=0, to=10, width=5, bd=1,
textvariable=self.alert_threshold_value)
self.alert_threshold_value.set(self._get_config(['alert_threshold'])[0])
self.ok_button.configure(command=self._update_alert_threshold)
def gui_arrang(self):
self.frame.grid(row=0, column=0, padx=10, pady=5)
self.alert_threshold_label.grid(row=0, column=0, sticky='W')
self.alert_threshold_spinbox.grid(row=0, column=1)
self.ok_button.grid(row=2, column=3, pady=5)
self.cancel_button.grid(row=2, column=4, pady=5)
self.focus()
def _update_alert_threshold(self):
self._set_config({'alert_threshold': self.alert_threshold_value.get()})
class DBConfigDialog(ConfigDialog):
def __init__(self, parent):
super().__init__(parent)
self.title('数据库设置')
self.config_names = ['ip', 'username', 'pwd', 'db_name']
self.ip_label = ttk.Label(self.frame, text='IP地址:')
self.username_label = ttk.Label(self.frame, text='用户名:')
self.pwd_label = ttk.Label(self.frame, text='密码:')
self.db_name_label = ttk.Label(self.frame, text='数据库名:')
# self.table_name_label = ttk.Label(self.frame, text='表名:')
self.ui_labels = [self.ip_label, self.username_label, self.pwd_label, self.db_name_label]
self.ip_value = tkinter.StringVar()
self.username_value = tkinter.StringVar()
self.pwd_value = tkinter.StringVar()
self.db_name_value = tkinter.StringVar()
# self.table_name_value = tkinter.StringVar()
self.ui_values = [self.ip_value, self.username_value, self.pwd_value, self.db_name_value]
self.ip_entry = tkinter.Entry(self.frame, width=20, textvariable=self.ip_value)
self.username_entry = tkinter.Entry(self.frame, width=20, textvariable=self.username_value)
self.pwd_entry = tkinter.Entry(self.frame, width=20, textvariable=self.pwd_value)
self.db_name_entry = tkinter.Entry(self.frame, width=20, textvariable=self.db_name_value)
# self.table_name_entry = tkinter.Entry(self.frame, width=20, textvariable=self.table_name_value)
self.ui_entries = [self.ip_entry, self.username_entry, self.pwd_entry, self.db_name_entry]
self.pwd_entry['show'] = '*'
config_values = self._get_config(self.config_names)
for i in range(len(self.config_names)):
self.ui_values[i].set(config_values[i])
self.ok_button.configure(command=self._update_db_config, text='连接')
def gui_arrang(self):
self.frame.grid(row=0, column=0, padx=10, pady=5)
for i in range(len(self.config_names)):
self.ui_labels[i].grid(row=i, column=0, sticky='W')
self.ui_entries[i].grid(row=i, column=1)
self.ok_button.grid(row=len(self.config_names), column=3, pady=5)
self.cancel_button.grid(row=len(self.config_names), column=4, pady=5)
self.focus()
def _update_db_config(self):
config_dict = {}
for i in range(len(self.config_names)):
config_dict[self.config_names[i]] = "'{}'".format(self.ui_values[i].get())
self._set_config(config_dict)
| 42.977612
| 110
| 0.632054
|
feef4705641ebc11180c7fd6fd308efaa6aff874
| 64
|
py
|
Python
|
Calc.py
|
danielwilson2017/Calculus-Project-Q2
|
d41c2c79c30d8d42444af4980422f19b1484d2cb
|
[
"MIT"
] | null | null | null |
Calc.py
|
danielwilson2017/Calculus-Project-Q2
|
d41c2c79c30d8d42444af4980422f19b1484d2cb
|
[
"MIT"
] | null | null | null |
Calc.py
|
danielwilson2017/Calculus-Project-Q2
|
d41c2c79c30d8d42444af4980422f19b1484d2cb
|
[
"MIT"
] | null | null | null |
nox = int(input("What is the function in values of x"))
if nox
| 21.333333
| 55
| 0.6875
|
d4719ccb2e4442b7738b0a6bfc737f37ae882c9b
| 470
|
py
|
Python
|
apps/core/views/viewsets/best_search.py
|
sparcs-kaist/new-ara-api
|
63998da575cb148347708199fe1345c4e7ee3e1b
|
[
"MIT"
] | 19
|
2017-09-13T07:51:58.000Z
|
2022-03-28T11:04:03.000Z
|
apps/core/views/viewsets/best_search.py
|
sparcs-kaist/new-ara-api
|
63998da575cb148347708199fe1345c4e7ee3e1b
|
[
"MIT"
] | 147
|
2017-09-14T13:45:30.000Z
|
2022-03-14T15:54:09.000Z
|
apps/core/views/viewsets/best_search.py
|
sparcs-kaist/new-ara-api
|
63998da575cb148347708199fe1345c4e7ee3e1b
|
[
"MIT"
] | 5
|
2019-08-31T13:13:30.000Z
|
2021-03-26T15:46:38.000Z
|
from rest_framework import viewsets, permissions
from ara.classes.viewset import ActionAPIViewSet
from apps.core.serializers.best_search import BestSearchSerializer
from apps.core.models import BestSearch
class BestSearchViewSet(viewsets.ReadOnlyModelViewSet, ActionAPIViewSet):
queryset = BestSearch.objects.all()
filterset_fields = ['latest']
serializer_class = BestSearchSerializer
permission_classes = (
permissions.IsAuthenticated,
)
| 31.333333
| 73
| 0.8
|
f33b6b926d31d0e6838d7afca730fca5ae8b5668
| 679
|
py
|
Python
|
picorss/src/infrastructure/models/meta.py
|
rok-povsic/picorss
|
7c182953868e56389d5c080f3c0b75d7c0fafa74
|
[
"MIT"
] | null | null | null |
picorss/src/infrastructure/models/meta.py
|
rok-povsic/picorss
|
7c182953868e56389d5c080f3c0b75d7c0fafa74
|
[
"MIT"
] | null | null | null |
picorss/src/infrastructure/models/meta.py
|
rok-povsic/picorss
|
7c182953868e56389d5c080f3c0b75d7c0fafa74
|
[
"MIT"
] | null | null | null |
from sqlalchemy import schema
from sqlalchemy.ext import declarative
# Recommended naming convention used by Alembic, as various different database
# providers will autogenerate vastly different names making migrations more
# difficult. See: http://alembic.zzzcomputing.com/en/latest/naming.html
NAMING_CONVENTION = {
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = schema.MetaData(naming_convention=NAMING_CONVENTION)
Base = declarative.declarative_base(metadata=metadata)
| 39.941176
| 78
| 0.764359
|
76dbfbcc891c69b629d44dbf36122b9b7ed21dbb
| 366
|
py
|
Python
|
djlang/views.py
|
Joneswn/Baloti
|
c499666dd9e2553fac88130dea2b6e9df8278234
|
[
"MIT"
] | 1
|
2022-02-24T17:30:53.000Z
|
2022-02-24T17:30:53.000Z
|
djlang/views.py
|
Joneswn/Baloti
|
c499666dd9e2553fac88130dea2b6e9df8278234
|
[
"MIT"
] | null | null | null |
djlang/views.py
|
Joneswn/Baloti
|
c499666dd9e2553fac88130dea2b6e9df8278234
|
[
"MIT"
] | 2
|
2021-10-06T11:52:41.000Z
|
2022-01-20T11:07:27.000Z
|
import json
from django.http import HttpResponse
from django.urls import path
from .models import Text
def text_view(request):
data = list(Text.objects.order_by('id', 'key').distinct('id', 'key').values())
return HttpResponse(json.dumps(data, ensure_ascii=False), content_type='application/json')
urlpatterns = [
path('', text_view, name='text')
]
| 21.529412
| 94
| 0.718579
|
88c77a5110793edeb73475372e23f5f19b694a29
| 1,997
|
py
|
Python
|
django/utils/translation/trans_null.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | 1
|
2015-11-08T11:42:08.000Z
|
2015-11-08T11:42:08.000Z
|
django/utils/translation/trans_null.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | null | null | null |
django/utils/translation/trans_null.py
|
ericholscher/django
|
b9a90b371c90a987ed57f7a4a7cc1274c432b438
|
[
"BSD-3-Clause"
] | null | null | null |
# These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe, SafeData
def ngettext(singular, plural, number):
if number == 1:
return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_text(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
# date formats shouldn't be used using gettext anymore. This
# is kept for backward compatibility
TECHNICAL_ID_MAP = {
"DATE_WITH_TIME_FULL": settings.DATETIME_FORMAT,
"DATE_FORMAT": settings.DATE_FORMAT,
"DATETIME_FORMAT": settings.DATETIME_FORMAT,
"TIME_FORMAT": settings.TIME_FORMAT,
"YEAR_MONTH_FORMAT": settings.YEAR_MONTH_FORMAT,
"MONTH_DAY_FORMAT": settings.MONTH_DAY_FORMAT,
}
def gettext(message):
result = TECHNICAL_ID_MAP.get(message, message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def ugettext(message):
return force_text(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def get_language_from_request(request, check_path=False):
return settings.LANGUAGE_CODE
def get_language_from_path(request, supported=None):
return None
| 31.203125
| 77
| 0.755133
|
b80c82b923ed59f76053d23109369b83e38effe1
| 2,114
|
py
|
Python
|
wizard/core/custom_logger.py
|
Wizard-collab/wizard_2
|
a2cb23362e178a0205f6dd0b9b4328c329b5b142
|
[
"MIT"
] | 1
|
2021-10-13T15:07:32.000Z
|
2021-10-13T15:07:32.000Z
|
wizard/core/custom_logger.py
|
Wizard-collab/wizard_2
|
a2cb23362e178a0205f6dd0b9b4328c329b5b142
|
[
"MIT"
] | null | null | null |
wizard/core/custom_logger.py
|
Wizard-collab/wizard_2
|
a2cb23362e178a0205f6dd0b9b4328c329b5b142
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Author: Leo BRUNEL
# Contact: contact@leobrunel.com
# This file is part of Wizard
# MIT License
# Copyright (c) 2021 Leo brunel
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Python modules
import logging
import os
import sys
# Wizard modules
from wizard.vars import user_vars
from wizard.core import path_utils
def get_root_logger():
create_prefs_folder()
root_logger = logging.getLogger()
if 'DEBUG' in sys.argv:
root_logger.setLevel(logging.DEBUG)
else:
root_logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(user_vars._user_logger_file_)
file_handler.setFormatter(logging.Formatter('%(asctime)s [%(name)-23.23s] [%(levelname)-5.5s] %(message)s'))
root_logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(asctime)s [%(name)-23.23s] [%(levelname)-5.5s] %(message)s'))
root_logger.addHandler(stream_handler)
def create_prefs_folder():
if not path_utils.isdir(user_vars._user_path_):
path_utils.makedirs(user_vars._user_path_)
| 38.436364
| 114
| 0.76017
|
f3b2c12f4c9acb5877fb1ddd88506f8d3b37d1c2
| 2,241
|
py
|
Python
|
samba_edit/add_option.py
|
tosbaa/samba-liman
|
ddc76b8ab322a07a8d918a9c581440984939d7fe
|
[
"MIT"
] | 1
|
2020-07-03T06:04:36.000Z
|
2020-07-03T06:04:36.000Z
|
samba_edit/add_option.py
|
tosbaa/samba-liman
|
ddc76b8ab322a07a8d918a9c581440984939d7fe
|
[
"MIT"
] | null | null | null |
samba_edit/add_option.py
|
tosbaa/samba-liman
|
ddc76b8ab322a07a8d918a9c581440984939d7fe
|
[
"MIT"
] | null | null | null |
import sys
import configparser
import os
import subprocess
SAMBA_CONFIG_PARSER = configparser.ConfigParser()
SAMBA_FILE_PATH = '../smb.conf'
SAMBA_CONFIG_PARSER.read(SAMBA_FILE_PATH)
SECTION_NAME = sys.argv[2]
OPTION_NAME = sys.argv[3]
VALUE = sys.argv[4]
def section_exist(section_name):
""" Takes section_name(str) argument and returns True if the given name exist """
return SAMBA_CONFIG_PARSER.has_section(section_name)
def option_exist(section_name, option_name):
""" Checks section_name(str) and option_name(str) and returns True if option exist """
return SAMBA_CONFIG_PARSER.has_option(section_name, option_name)
def add_option(section_name, option_name, value):
sed_script = "sed -i '/\[{:s}\]/a {:s} \= {:s}' {:s}".format(SECTION_NAME, OPTION_NAME, VALUE, SAMBA_FILE_PATH)
subprocess.Popen(sed_script, shell=True)
def get_option_value(section_name, option_name):
return SAMBA_CONFIG_PARSER.get(section_name, option_name)
def make_bash_call(stage_name):
bash = ['python3.7', __file__, stage_name, sys.argv[2], sys.argv[3], sys.argv[4]]
output = subprocess.Popen(bash, stdout=subprocess.PIPE).stdout
return output.read().decode('utf-8')
def automate():
before_output = make_bash_call('before')
if before_output != "ok\n":
print(before_output)
exit()
print('before ok')
make_bash_call('run')
after_output = make_bash_call('after')
if after_output != 'ok\n':
print(after_output)
exit()
print('after ok')
def before():
if not section_exist(SECTION_NAME):
print('Section : {:s} is not exist'.format(SECTION_NAME))
exit()
if option_exist(SECTION_NAME, OPTION_NAME):
print('Option : {:s} is already defined'.format(OPTION_NAME))
exit()
print('ok')
def run():
add_option(SECTION_NAME, OPTION_NAME, VALUE)
def after():
if not option_exist(SECTION_NAME, OPTION_NAME):
print('Option : {:s} can not be created'.format(VALUE))
exit()
if VALUE != get_option_value(SECTION_NAME, OPTION_NAME):
print('Value : {:s} can not be assigned'.format(VALUE))
exit()
print('ok')
if __name__ == "__main__":
globals()[sys.argv[1]]()
| 28.0125
| 114
| 0.683623
|
f9a315bde2ba93a5c1aea438076f1fcd3a123828
| 2,512
|
py
|
Python
|
src/models/user_serviceprovider.py
|
kumardeepak/user-mgmt
|
43f37fd1bf0a21ae3d17126b21e0b906145ecaf6
|
[
"MIT"
] | null | null | null |
src/models/user_serviceprovider.py
|
kumardeepak/user-mgmt
|
43f37fd1bf0a21ae3d17126b21e0b906145ecaf6
|
[
"MIT"
] | null | null | null |
src/models/user_serviceprovider.py
|
kumardeepak/user-mgmt
|
43f37fd1bf0a21ae3d17126b21e0b906145ecaf6
|
[
"MIT"
] | null | null | null |
import json
from datetime import datetime
class UserServiceProvider:
def __init__(self, args, ignore_username=False):
if ignore_username == False:
self.username = args['username']
self.role = args['role']
self.first_name = args['first_name']
self.last_name = args['last_name']
self.contact_mobile_number = args['contact_mobile_number']
self.contact_email_address = args['contact_email_address']
self.address = args['address']
self.country = args['country']
self.state = args['state']
self.zip_code = args['zip_code']
else:
self.username = ''
self.role = ''
self.first_name = ''
self.last_name = ''
self.contact_mobile_number = ''
self.contact_email_address = ''
self.address = ''
self.country = ''
self.state = ''
self.zip_code = ''
self.app_key = args['app_key']
self.app_secret = args['app_secret']
self.created_on = datetime.utcnow()
self.modified_on = datetime.utcnow()
self.created_by = ''
def to_dict(self):
user = {
'username': self.username,
'role': self.role,
'first_name': self.first_name,
'last_name': self.last_name,
'contact_mobile_number': self.contact_mobile_number,
'contact_email_address': self.contact_email_address,
'address': self.address,
'country': self.country,
'state': self.state,
'zip_code': self.zip_code,
'created_on': str(self.created_on),
'modified_on': str(self.modified_on),
'created_by': self.created_by,
'app_key': self.app_key,
'app_secret': self.app_secret,
}
return user
def get_role(self):
return 'service_provider'
| 43.310345
| 71
| 0.435111
|
4905650b3f0609f8b2ceb77a96da114c500071e1
| 624
|
py
|
Python
|
manage.py
|
SoftwareDevTest/softwaredapi
|
44feb67c7b3f7cdfa05d0e81aca5013e71b999b5
|
[
"Apache-2.0"
] | 5
|
2019-12-10T05:20:59.000Z
|
2019-12-10T09:59:34.000Z
|
manage.py
|
SoftwareDevTest/softwaredapi
|
44feb67c7b3f7cdfa05d0e81aca5013e71b999b5
|
[
"Apache-2.0"
] | 6
|
2020-02-12T02:30:31.000Z
|
2021-06-09T18:46:58.000Z
|
manage.py
|
SoftwareDevTest/softwaredapi
|
44feb67c7b3f7cdfa05d0e81aca5013e71b999b5
|
[
"Apache-2.0"
] | 1
|
2019-12-10T05:21:02.000Z
|
2019-12-10T05:21:02.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dapi.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.363636
| 73
| 0.68109
|
bb0c46c27910ba2d06b5a99b2ca7480d34f23af4
| 8,493
|
py
|
Python
|
h1/model/iso.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/model/iso.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
h1/model/iso.py
|
hyperonecom/h1-client-python
|
4ce355852ba3120ec1b8f509ab5894a5c08da730
|
[
"MIT"
] | null | null | null |
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from h1.model.iso_metadata import IsoMetadata
from h1.model.tag import Tag
globals()['IsoMetadata'] = IsoMetadata
globals()['Tag'] = Tag
class Iso(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('state',): {
'UPLOADING': "Uploading",
'ONLINE': "Online",
'UNKNOWN': "Unknown",
'PROCESSING': "Processing",
'NOTCREATED': "NotCreated",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (str,), # noqa: E501
'name': (str,), # noqa: E501
'flavour': (str,), # noqa: E501
'modified_on': (datetime,), # noqa: E501
'modified_by': (str,), # noqa: E501
'created_on': (datetime,), # noqa: E501
'created_by': (str,), # noqa: E501
'state': (str,), # noqa: E501
'project': (str,), # noqa: E501
'uri': (str,), # noqa: E501
'size': (float,), # noqa: E501
'metadata': (IsoMetadata,), # noqa: E501
'tag': ([Tag],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'name': 'name', # noqa: E501
'flavour': 'flavour', # noqa: E501
'modified_on': 'modifiedOn', # noqa: E501
'modified_by': 'modifiedBy', # noqa: E501
'created_on': 'createdOn', # noqa: E501
'created_by': 'createdBy', # noqa: E501
'state': 'state', # noqa: E501
'project': 'project', # noqa: E501
'uri': 'uri', # noqa: E501
'size': 'size', # noqa: E501
'metadata': 'metadata', # noqa: E501
'tag': 'tag', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Iso - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
flavour (str): [optional] # noqa: E501
modified_on (datetime): [optional] # noqa: E501
modified_by (str): [optional] # noqa: E501
created_on (datetime): [optional] # noqa: E501
created_by (str): [optional] # noqa: E501
state (str): [optional] # noqa: E501
project (str): [optional] # noqa: E501
uri (str): [optional] # noqa: E501
size (float): [optional] # noqa: E501
metadata (IsoMetadata): [optional] # noqa: E501
tag ([Tag]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 39.138249
| 110
| 0.554221
|
d6a52dc1101c14a2ba5c46cdf21a7dc4477789cc
| 15,659
|
py
|
Python
|
timeseriesql_matplotlib/__init__.py
|
mbeale/tiemseriesql-matplotlib
|
5eb388fd1224790dfb69690e0be289a10f82c2e0
|
[
"BSD-2-Clause"
] | 5
|
2020-01-28T18:56:08.000Z
|
2020-05-25T22:30:08.000Z
|
timeseriesql_matplotlib/__init__.py
|
mbeale/tiemseriesql-matplotlib
|
5eb388fd1224790dfb69690e0be289a10f82c2e0
|
[
"BSD-2-Clause"
] | null | null | null |
timeseriesql_matplotlib/__init__.py
|
mbeale/tiemseriesql-matplotlib
|
5eb388fd1224790dfb69690e0be289a10f82c2e0
|
[
"BSD-2-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
from matplotlib.dates import (
DayLocator,
HourLocator,
MonthLocator,
YearLocator,
MinuteLocator,
DateFormatter,
)
import numpy as np
import math
from timeseriesql.plot import Plot
DEFAULT_FIGURE_SIZE = (20, 10)
FIGURE_COUNTER = 1
def init_ax_if_none(func):
def inner(*args, **kwargs):
if 'ax' not in kwargs or kwargs['ax'] == None:
global FIGURE_COUNTER
fig = plt.figure(FIGURE_COUNTER, figsize=DEFAULT_FIGURE_SIZE)
fig.autofmt_xdate()
ax = fig.add_subplot(111)
print(id(ax))
kwargs['ax'] = ax
FIGURE_COUNTER += 1
return func(*args, **kwargs)
return inner
def auto_plot(ts):
diff = ts.time[-1] - ts.time[0]
if diff > 31536000:
return "y"
elif diff > 2678400:
return "mth"
elif diff > 604800:
return "dow"
elif diff > 86400:
return "h"
elif diff > 3600:
return "m"
return "s"
timebox_funcs = {
"s": ["second", 60, None],
"m": ["minute", 60, None],
"h": ["hour", 24, None],
"dow": ["weekday", 7, ["Mon", "Tues", "Wed", "Thu", "Fri", "Sat", "Sun"]],
"d": ["day", 31, None],
"mth": [
"month",
12,
[
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
],
],
}
class MatplotlibTQL(Plot):
def _create_xaxis_date(self, ax, date_index):
""" Uses buest guess for x axis labels """
window = (date_index[-1] - date_index[0]).astype(int)
interval=5
xlabel = ""
if window <= 3600:
minor_locator = MinuteLocator(interval=interval)
minor_formatter = DateFormatter("%M")
major_locator = HourLocator()
major_formatter = DateFormatter("\n%Y-%m-%d %H:%M")
xlabel = "Minute"
elif window <= 86400:
minor_locator = HourLocator(interval=interval)
minor_formatter = DateFormatter("%H:%M")
major_locator = DayLocator()
major_formatter = DateFormatter("\n%Y-%m-%d")
xlabel = "Hour"
elif window <= (7 * 86400):
minor_locator = HourLocator(interval=6)
minor_formatter = DateFormatter("%H:%M")
major_locator = DayLocator()
major_formatter = DateFormatter("\n%Y-%m-%d")
xlabel = "Hour"
elif window <= (60 * 86400):
#if len(date_index) > 30:
# interval = 2
minor_locator = DayLocator(interval=interval)
minor_formatter = DateFormatter("%m-%d")
major_locator = YearLocator()
major_formatter = DateFormatter("\n%Y")
xlabel = "Day"
else:
minor_locator = MonthLocator(interval=interval)
minor_formatter = DateFormatter("%B")
major_locator = YearLocator()
major_formatter = DateFormatter("\n%Y")
xlabel = "Month"
ax.xaxis.set_minor_locator(minor_locator)
ax.xaxis.set_minor_formatter(minor_formatter)
ax.xaxis.set_major_locator(major_locator)
ax.xaxis.set_major_formatter(major_formatter)
ax.fmt_xdata = DateFormatter("%Y-%m-%d %H:%M:%S")
ax.set_xlabel(xlabel)
def subplot(self, func, ts, **kwargs):
"""
Plot each column in it's own chart
func: function
The plotting function
ts: TimeSeries
the time series
kwargs: arguement list
this will be pass to the plotting function
Example
-------
subplot(dist_plot,ts, percentiles=[50, 99])
>>>
"""
size = ts.shape[1]
fig, ax = plt.subplots(
size, 1, figsize=DEFAULT_FIGURE_SIZE, sharex="col", sharey="row"
)
for i, col in enumerate(ax):
func(ts[:, i], ax=col, **kwargs)
@init_ax_if_none
def line_plot(self, ts, ax=None, legend=True, labels=None, ylabel=None, **kwargs):
"""Plot charts using sane time series defaults with Matplotlib.
Parameters
----------
ts: TimeSeries
time series to plot
ax : matplotlib.axes.Axes
an Axes to plot against. One will be created if not included
legend: Boolean
Decision to generate a legend
labels: list
A list of labels to use instead of generated labels
ylabel: string
yaxis_label
Returns
-------
None
Example
-------
line_plot(ts)
>>>
"""
ylabel = self.ylabel_func(ts) if ylabel is None else ylabel
date_index = ts.time.dt
self._create_xaxis_date(ax, date_index)
ax.set_title(self.title_func(ts), fontsize=18)
ax.plot(date_index, ts.data, **kwargs)
ax.set_ylabel(ylabel)
if legend:
if not labels:
labels = self.legend_labels_func(ts)
if labels != [""]:
ax.legend(title="Streams", labels=labels[:5])
@init_ax_if_none
def dist_plot(self, ts, ax=None, percentiles=None, xlabel=None, **kwargs):
"""
Create a distribution plot
Parameters
----------
ts: TimeSeries
time series to plot
ax : matplotlib.axes.Axes
an Axes to plot against. One will be created if not included
percentiles: list
an array of percentiles to plot
xlabel: string
xaxis_label
Returns
-------
None
Example
-------
dist_plot(ts, percentiles=[50, 99])
>>>
"""
xlabel = self.xlabel_func(ts) if xlabel is None else xlabel
hist = ax.hist(ts.data.flatten(), bins="auto", rwidth=0.99)
ax.set_title("Distribution for " + self.title_func(ts), fontsize=18)
ax.set_ylabel("Count")
ax.set_xlabel(xlabel)
if percentiles:
m = max(hist[0])
for p in percentiles:
value = np.percentile(ts.data.flatten(), p)
ax.axvline(value, color="r", linestyle="--", ymax=0.95)
ax.text(x=value, y=m, s=f"p{p}")
@init_ax_if_none
def stacked_plot(self, ts, ax=None, ylabel=None, **kwargs):
"""Plot stacked charts using sane time series defaults with Matplotlib.
Parameters
----------
ts: TimeSeries
time series to plot
ax : matplotlib.axes.Axes
an Axes to plot against. One will be created if not included
ylabel: string
yaxis_label
Returns
-------
None
Example
-------
stacked_plot(ts)
>>>
"""
ylabel = self.ylabel_func(ts) if ylabel is None else ylabel
date_index = ts.time.dt
self._create_xaxis_date(ax, date_index)
labels = self.legend_labels_func(ts)
ax.set_title(self.title_func(ts), fontsize=18)
ax.stackplot(date_index, ts.data.T, labels=labels[:5])
ax.set_ylabel(ylabel)
ax.legend(title="Streams", labels=labels[:5])
@init_ax_if_none
def timebox_plot(self, ts, ax=None, plot="auto", ylabel=None, **kwargs):
"""
A time boxplot for time series EDA.
plot: string
options
-------
auto - find the best possible time range
s - second buckets
m - minute buckets
h - hour buckets
d - day buckets
mth - month buckets
y - year buckets
ax: axes
to use for plotting. One is generated if not passed
ylabel: string
yaxis_label
kwargs: kwargs
pass to the axes as options
"""
ylabel = self.ylabel_func(ts) if ylabel is None else ylabel
if plot == "auto":
plot = auto_plot(ts)
func, max_size, labels = timebox_funcs[plot]
dates = ts.time.dt
if plot == "dow": # <-----:puke:
dates = np.array([getattr(x.tolist(), func)() for x in dates])
else:
dates = np.array([getattr(x.tolist(), func) for x in dates])
max_count = 0
for i in range(max_size):
l = len(np.argwhere(dates == i))
if l > max_count:
max_count = l
data = np.empty((max_count, max_size))
data[:] = np.nan
for i in range(max_size):
row_slice = slice(None, None, None)
temp = ts.data[np.argwhere(dates == i)[:, 0]][:, 0]
if temp.shape != data[:, i].shape:
row_slice = slice(None, temp.shape[0], None)
data[row_slice, i] = temp
# drop nan
new_data = []
for col in data.T:
new_data.append(col[~np.isnan(col)])
ax.boxplot(new_data, **kwargs)
ax.set_ylabel(ylabel)
ax.set_xlabel(func[0].upper() + func[1:])
ax.set_title(self.title_func(ts), fontsize=18)
if labels:
ax.set_xticklabels(labels)
@init_ax_if_none
def correlogram_plot(self, ts, ax=None, **kwargs):
"""Plot stacked charts using sane time series defaults with Matplotlib.
Parameters
----------
ts: TimeSeries
time series to plot
ax : matplotlib.axes.Axes
an Axes to plot against. One will be created if not included
Returns
-------
None
Example
-------
correlogram_plot(ts)
>>>
"""
ax.acorr(ts.data.flatten(), usevlines=True, normed=True, lw=2, **kwargs)
ax.set_ylabel("Correlation")
ax.set_xlabel("Lag")
def lag_plot(self, ts, lags=12, max_per_row=3, **kwargs):
"""Plot lag plots
Parameters
----------
ts: TimeSeries
time series to plot
lags: int or iterable
the lags to plot. If an iterable, a custom lag configuration with be used
ax : matplotlib.axes.Axes
an Axes to plot against. One will be created if not included
max_per_row : int
the maximum charts in a row
Returns
-------
None
Example
-------
lag_plot(ts)
>>>
"""
if isinstance(lags, int):
obj = range(1, lags+1)
else:
obj = lags
try:
iter(obj)
except TypeError:
raise TypeError("Expecting lags to be either an int or an iterable")
s = {}
l = len(ts)
for i in obj:
if isinstance(i, int):
original_slice = slice(0, -(i))
lag_slice = slice(i, l)
s[i] = [original_slice, lag_slice]
size = len(s)
fig, ax = plt.subplots(
math.ceil(size / max_per_row), max_per_row, figsize=DEFAULT_FIGURE_SIZE, sharex="col", sharey="row"
)
for index, (key, (original,compare)) in enumerate(s.items()):
if max_per_row == 1:
ax[index].scatter(ts.data[original],ts.data[compare])
ax[index].set_title(f"lag -{key}")
else:
ax[index // max_per_row][index % max_per_row].scatter(ts.data[original],ts.data[compare])
ax[index // max_per_row][index % max_per_row].set_title(f"lag -{key}")
return None
@init_ax_if_none
def heatmap_plot(self, ts, ax=None, plot="auto", cmap="Blues", title="", ylabel=None, **kwargs):
"""
A heatmap for time series EDA.
plot: string
options
-------
auto - find the best possible time range
s - second buckets
m - minute buckets
h - hour buckets
d - day buckets
mth - month buckets
y - year buckets
ax: axes
to use for plotting. One is generated if not passed
cmap : string
the value to use for the coloring. see https://matplotlib.org/tutorials/colors/colormaps.html
ylabel: string
yaxis_label
kwargs: kwargs
pass to the axes as options
"""
ylabel = self.ylabel_func(ts) if ylabel is None else ylabel
if plot == "auto":
plot = auto_plot(ts)
func, max_size, labels = timebox_funcs[plot]
hist, bins = np.histogram(ts)
new_ts = np.zeros((len(bins)-1, max_size))
dates = ts.time.dt
if plot == "dow": # <-----:puke:
dates = np.array([getattr(x.tolist(), func)() for x in dates])
else:
dates = np.array([getattr(x.tolist(), func) for x in dates])
for i in range(max_size):
new_ts[:,i] = np.histogram(ts[np.argwhere(dates==i)], bins)[0]
im = ax.imshow(new_ts, cmap=cmap, interpolation='nearest')
plt.colorbar(im, ax=ax)
test = ax.get_xticklabels()
ax.set_ylabel(ylabel)
ax.set_xlabel(func[0].upper() + func[1:])
ax.set_title(self.title_func(ts), fontsize=18)
if labels:
labels = [labels[int(x)] for x in ax.get_xticks() if x < len(labels)]
ax.set_xticklabels(labels)
ax.set_yticklabels([math.ceil(x) for x in bins])
return None
@init_ax_if_none
def text_plot(self, value, ax=None, title="", fontsize=48, thresholds=None, **kwargs):
"""
Plot a single value
Parameters
----------
value: number
a value to display
ax : matplotlib.axes.Axes
an Axes to plot against. One will be created if not included
title: string
title for box
fontsize: int
size for the text
thresholds: list(tuple)
tuples that colorize the text and background color based on the value of the numer
The format is (threshold, background color, font color). Based on the color palette at:
https://matplotlib.org/3.1.0/gallery/color/named_colors.html
If threshold is None, then the condition is considered met. The default background color
is 'white' and the font color is 'black'
Returns
-------
None
Example
-------
text_plot(value, title="A Nice Text Box", thresholds=[(0, 'green', 'white'), (20, 'cornflowerblue', 'white'), (None, 'darkorange', 'white')])
>>>
"""
fontcolor = 'black'
facecolor = 'white'
if thresholds:
for t in thresholds:
if t[0] is not None:
if value < t[0]:
if t[1]:
facecolor = t[1]
if t[2]:
fontcolor = t[2]
break
else:
if t[1]:
facecolor = t[1]
if t[2]:
fontcolor = t[2]
break
ax.text(0.5, 0.5, "%.2f" % (value), va="center", ha="center", fontsize=fontsize, color=fontcolor)
ax.set_title(title)
ax.set_facecolor(facecolor)
ax.tick_params(labelbottom=False, labelleft=False)
ax.grid(False)
| 30.524366
| 149
| 0.520723
|
6d45cb282714357f188f75097614850eca42e10a
| 3,774
|
py
|
Python
|
org-watch.py
|
NicholasTaylor/ticket-watch
|
75db54ef060a4aa0d0944898865ba32b08e4d57c
|
[
"MIT"
] | null | null | null |
org-watch.py
|
NicholasTaylor/ticket-watch
|
75db54ef060a4aa0d0944898865ba32b08e4d57c
|
[
"MIT"
] | null | null | null |
org-watch.py
|
NicholasTaylor/ticket-watch
|
75db54ef060a4aa0d0944898865ba32b08e4d57c
|
[
"MIT"
] | null | null | null |
import requests, functions, config, json
from classes import Event
def getOrgs():
from pathlib import Path
return json.loads(Path('orgs.json').read_text())
def getOrgStatus(org,latestPage,events):
page = latestPage
multipage = True if latestPage and latestPage > 1 else False
url = 'https://www.eventbriteapi.com/v3/organizers/' +str(org) +'/events?page=' +str(latestPage) if multipage else 'https://www.eventbriteapi.com/v3/organizers/' +str(org) +'/events/?page=1'
headers = {
'Authorization': 'Bearer ' +config.event_auth
}
r = requests.get(url, headers=headers)
eventsList = []
jsonOutput = r.json()
print('Org: %i' % (org))
try:
print('Reading from page ' +str(jsonOutput['pagination']['page_number']) +'.')
except:
pass
outputs = jsonOutput['events']
try:
continuation = jsonOutput['pagination']['continuation']
except:
pass
has_more_items = jsonOutput['pagination']['has_more_items']
while has_more_items and continuation:
page += 1
print('More data available. Pulling from next page.')
url2 = url +'&continuation=' +continuation
headers2 = {
'Authorization': 'Bearer ' +config.event_auth
}
r2 = requests.get(url2, headers=headers2)
json2 = r2.json()
print('Reading from page ' +str(json2['pagination']['page_number']) +'.')
outputs += json2['events']
try:
continuation = json2['pagination']['continuation']
except:
pass
has_more_items = json2['pagination']['has_more_items']
for event in outputs:
candidate = Event(event['id'],event['name']['text'],event['start']['utc'],event['url'])
if candidate.isFuture() and candidate.id not in events:
eventsList.append(candidate)
return page, eventsList
def orgValidate(orgs):
for org in orgs:
try:
checks = [(str(org['orgId']), 'Organization ID'), (org['name'], 'Organization Name')]
for check in checks:
cField, cName = check
functions.configCheck(cField, cName)
except AttributeError:
print('Your orgs file is missing certain required key/value pairs. Check orgs.json. Exiting.')
# Checking if latestPage exists
try:
print(str(org['latestPage']))
except KeyError:
print('No latestPage found. Setting default to 1. Backfilling from beginning.')
org['latestPage'] = 1
# Checking if events array exists
try:
print(str(org['events']))
except KeyError:
print('No events array found. Not filtering for this run.')
org['events'] = []
def main(orgs):
contacts = functions.genContacts(config.twilio_contacts)
for org in orgs:
newPage, newEventsList = (getOrgStatus(org['orgId'],org['latestPage'],org['events']))
org['latestPage'] = newPage
contacts_batch = []
for contact in contacts:
if contact.is_optin(org['orgId']):
contacts_batch.append(contact.number)
if len(newEventsList) > 0:
plural = 's' if len(newEventsList) > 1 else ''
msg = 'Heads up. New event' +plural +' from ' +org['name'] +':\n'
for event in newEventsList:
msg += event.name +': ' +event.url +'\n'
org['events'].append(event.id)
if len(contacts_batch) > 0:
for number in contacts_batch:
print(functions.sendTxt(number,msg))
with open('orgs.json', 'w+') as writeFile:
json.dump(orgs, writeFile)
functions.validate()
orgs = getOrgs()
orgValidate(orgs)
main(orgs)
| 38.907216
| 194
| 0.595919
|
1fc3f520fd7266f1340185f8a44a05e7a7f33dee
| 1,714
|
py
|
Python
|
tacker/sol_refactored/objects/v1/placement_constraint.py
|
h1r0mu/tacker
|
8c69dda51fcfe215c4878a86b82018d2b96e5561
|
[
"Apache-2.0"
] | 116
|
2015-10-18T02:57:08.000Z
|
2022-03-15T04:09:18.000Z
|
tacker/sol_refactored/objects/v1/placement_constraint.py
|
h1r0mu/tacker
|
8c69dda51fcfe215c4878a86b82018d2b96e5561
|
[
"Apache-2.0"
] | 6
|
2016-11-07T22:15:54.000Z
|
2021-05-09T06:13:08.000Z
|
tacker/sol_refactored/objects/v1/placement_constraint.py
|
h1r0mu/tacker
|
8c69dda51fcfe215c4878a86b82018d2b96e5561
|
[
"Apache-2.0"
] | 166
|
2015-10-20T15:31:52.000Z
|
2021-11-12T08:39:49.000Z
|
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.sol_refactored.objects import base
from tacker.sol_refactored.objects import fields
# NFV-SOL 003
# - v2.6.1 9.5.3.6 (API version: 1.3.0)
# - v2.7.1 9.5.3.6 (API version: 1.3.0)
# - v2.8.1 9.5.3.6 (API version: 1.3.0)
# - v3.3.1 9.5.3.6 (API version: 1.4.0)
@base.TackerObjectRegistry.register
class PlacementConstraintV1(base.TackerObject, base.TackerObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'affinityOrAntiAffinity': fields.EnumField(
valid_values=[
'AFFINITY',
'ANTI_AFFINITY',
],
nullable=False
),
'scope': fields.EnumField(
valid_values=[
'NFVI_POP',
'ZONE',
'ZONE_GROUP',
'NFVI_NODE',
],
nullable=False,
),
'resource': fields.ListOfObjectsField(
'ConstraintResourceRefV1', nullable=False),
'fallbackBestEffort': fields.BooleanField(nullable=True),
}
| 32.961538
| 78
| 0.627188
|
66946f3e174b00e1a68bd347aa535c685b1e8b8f
| 6,939
|
py
|
Python
|
examples/pwr_run/checkpointing/debug/k80_only/job48.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/debug/k80_only/job48.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/debug/k80_only/job48.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 32
args_lr = 0.0014
args_model = 'resnet101'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_k80_only/' + job_name + '*'
total_epochs = 134
starting_epoch = 0
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_k80_only/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 31.684932
| 118
| 0.702407
|
3f7020ba676457ca01fc3888aa5bf0a94acb05e0
| 3,450
|
py
|
Python
|
lib/common.py
|
haribommi/vaapi-fits
|
cbf2a463bd3b2c9af5c45a1376b0bde2b703ed23
|
[
"BSD-3-Clause"
] | null | null | null |
lib/common.py
|
haribommi/vaapi-fits
|
cbf2a463bd3b2c9af5c45a1376b0bde2b703ed23
|
[
"BSD-3-Clause"
] | null | null | null |
lib/common.py
|
haribommi/vaapi-fits
|
cbf2a463bd3b2c9af5c45a1376b0bde2b703ed23
|
[
"BSD-3-Clause"
] | null | null | null |
###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
import os
import slash
import subprocess
import threading
import time
class memoize:
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args):
try:
return self.memoized[args]
except KeyError:
r = self.function(*args)
self.memoized[args] = r
return r
def __repr__(self):
return str(self.function.__name__)
@memoize
def get_media():
return slash.plugins.manager.get_plugin("media")
def killproc(proc):
result = proc.poll()
if result is not None:
return result
# try to 'gently' terminate proc
proc.terminate()
for i in xrange(5):
result = proc.poll()
if result is not None:
return result
time.sleep(1) # wait a little longer for proc to terminate
# failed to terminate proc, so kill it
proc.kill()
for i in xrange(10):
result = proc.poll()
if result is not None:
return result
time.sleep(1) # give system more time to kill proc
# failed to kill proc
if result is None:
slash.logger.warn('Failed to kill process with pid {}'.format(proc.pid))
return result
def call(command, withSlashLogger = True):
calls_allowed = get_media()._calls_allowed()
assert calls_allowed, "call refused"
if withSlashLogger:
logger = slash.logger.info
else:
logger = lambda x: None
def readproc(proc):
for line in iter(proc.stdout.readline, ''):
readproc.output += line
logger(line.rstrip('\n'))
readproc.output = ""
def timeout(proc):
timeout.triggered = proc.poll() is None
killproc(proc)
timeout.triggered = False
error = False
message = ""
# Without "exec", the shell will launch the "command" in a child process and
# proc.pid will represent the shell (not the "command"). And therefore, the
# "command" will not get killed with proc.terminate() or proc.kill().
#
# When we use "exec" to run the "command". This will cause the "command" to
# inherit the shell process and proc.pid will represent the actual "command".
proc = subprocess.Popen(
"exec " + command,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
shell = True)
logger("CALL: {} (pid: {})".format(command, proc.pid))
reader = threading.Thread(target = readproc, args = [proc])
timer = threading.Timer(get_media().call_timeout, timeout, [proc])
reader.daemon = True
timer.daemon = True
reader.start()
timer.start()
try: # in case of user interrupt
proc.wait()
timer.cancel()
except:
killproc(proc)
raise
finally:
timer.cancel()
timer.join(30)
reader.join(30)
if timeout.triggered:
error = True
get_media()._report_call_timeout()
message = "CALL TIMEOUT: timeout after {} seconds (pid: {}).".format(
get_media().call_timeout, proc.pid)
elif proc.returncode != 0:
error = True
message = "CALL ERROR: failed with exitcode {} (pid: {})".format(proc.returncode, proc.pid)
assert not error, message
return readproc.output
def try_call(command):
try:
subprocess.check_output(command, stderr = subprocess.STDOUT, shell = True)
except:
return False
return True
def mapRange(value, srcRange, destRange):
(smin, smax), (dmin, dmax) = srcRange, destRange
return dmin + ((value - smin) * (dmax - dmin) / (smax - smin))
| 24.642857
| 95
| 0.666667
|
d75b375e44855e0f0428ce2891275fc67b0cdd33
| 2,654
|
py
|
Python
|
tests/test_fits.py
|
dennisvang/imageio
|
54e957a8d4c5faa90c6dd16ae3f64346a5ceafa4
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_fits.py
|
dennisvang/imageio
|
54e957a8d4c5faa90c6dd16ae3f64346a5ceafa4
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_fits.py
|
dennisvang/imageio
|
54e957a8d4c5faa90c6dd16ae3f64346a5ceafa4
|
[
"BSD-2-Clause"
] | null | null | null |
""" Test fits plugin functionality.
"""
import pytest
import imageio.v2 as iio
from imageio.core import Request
import numpy as np
from conftest import deprecated_test
fits = pytest.importorskip("astropy.io.fits", reason="astropy is not installed")
@deprecated_test
def setup_module():
# During this test, pretend that FITS is the default format
iio.formats.sort("FITS")
@deprecated_test
def teardown_module():
# Set back to normal
iio.formats.sort()
@pytest.fixture
def normal_plugin_order():
# Use fixture to temporarily set context of normal plugin order for
# tests and return to module setup afterwards
teardown_module()
yield
setup_module()
@deprecated_test
def test_fits_format(test_images):
# Test selection
for name in ["fits", ".fits"]:
format = iio.formats["fits"]
assert format.name == "FITS"
assert format.__module__.endswith(".fits")
# Test cannot read
png = test_images / "chelsea.png"
assert not format.can_read(Request(png, "ri"))
assert not format.can_write(Request(png, "wi"))
def test_fits_reading(test_images):
"""Test reading fits"""
simple = test_images / "simple.fits"
multi = test_images / "multi.fits"
compressed = test_images / "compressed.fits.fz"
# One image
im = iio.imread(simple, format="fits")
ims = iio.mimread(simple, format="fits")
assert (im == ims[0]).all()
assert len(ims) == 1
# Multiple images
ims = iio.mimread(multi, format="fits")
assert len(ims) == 3
R = iio.read(multi, format="fits")
assert R.format.name == "FITS"
ims = list(R) # == [im for im in R]
assert len(ims) == 3
# Fail
raises = pytest.raises
raises(IndexError, R.get_data, -1)
raises(IndexError, R.get_data, 3)
raises(RuntimeError, R.get_meta_data, None) # no meta data support
raises(RuntimeError, R.get_meta_data, 0) # no meta data support
# Compressed image
im = iio.imread(compressed, format="fits")
assert im.shape == (2042, 3054)
def test_fits_get_reader(normal_plugin_order, tmp_path):
"""Test reading fits with get_reader method
This is a regression test that closes GitHub issue #636
"""
sigma = 10
xx, yy = np.meshgrid(np.arange(512), np.arange(512))
z = (1 / (2 * np.pi * (sigma**2))) * np.exp(
-((xx**2) + (yy**2)) / (2 * (sigma**2))
)
img = np.log(z, where=z != 0, out=np.zeros_like(z))
phdu = fits.PrimaryHDU()
ihdu = fits.ImageHDU(img)
hdul = fits.HDUList([phdu, ihdu])
hdul.writeto(tmp_path / "test.fits")
iio.get_reader(tmp_path / "test.fits", format="fits")
| 26.54
| 80
| 0.653353
|
c2d465d84b58928e4efc3321e844c1e2b270b96e
| 97
|
py
|
Python
|
flow/__init__.py
|
nschloe/flow
|
ef45bdd4181d385b1b01042e9ce0b48e4cdc2318
|
[
"MIT"
] | 6
|
2018-05-01T18:04:03.000Z
|
2020-12-29T08:05:53.000Z
|
flow/__init__.py
|
nschloe/flow
|
ef45bdd4181d385b1b01042e9ce0b48e4cdc2318
|
[
"MIT"
] | 1
|
2018-08-09T07:17:59.000Z
|
2018-08-09T07:52:18.000Z
|
flow/__init__.py
|
nschloe/flow
|
ef45bdd4181d385b1b01042e9ce0b48e4cdc2318
|
[
"MIT"
] | 4
|
2018-01-29T18:46:12.000Z
|
2020-12-27T11:44:05.000Z
|
# -*- coding: utf-8 -*-
#
from . import message
from . import navier_stokes
from . import stokes
| 16.166667
| 27
| 0.680412
|
0f6bab726e1761e0c8a1e579432081ffda8e4fac
| 5,047
|
py
|
Python
|
tests/test_describe.py
|
andras-tim/sadisplay
|
0f63a67b7a831fce812c662da8b7ca92407c7376
|
[
"BSD-3-Clause"
] | 1
|
2021-11-06T17:13:58.000Z
|
2021-11-06T17:13:58.000Z
|
tests/test_describe.py
|
andras-tim/sadisplay
|
0f63a67b7a831fce812c662da8b7ca92407c7376
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_describe.py
|
andras-tim/sadisplay
|
0f63a67b7a831fce812c662da8b7ca92407c7376
|
[
"BSD-3-Clause"
] | 1
|
2021-11-06T17:13:48.000Z
|
2021-11-06T17:13:48.000Z
|
# -*- coding: utf-8 -*-
from nose.tools import assert_equal
import sadisplay
import model
class TestDescribe(object):
def test_single_mapper(self):
objects, relations, inherits = sadisplay.describe([model.User])
assert len(objects) == 1
assert relations == []
assert inherits == []
assert_equal(
objects[0],
{
'name': model.User.__name__,
'cols': [
('INTEGER', 'id', 'pk'),
('VARCHAR(50)', 'name', None),
],
'props': ['address', 'books', ],
'methods': ['login', ],
}
)
def test_single_table(self):
objects, relations, inherits = sadisplay.describe([model.notes])
assert len(objects) == 1
assert relations == []
assert inherits == []
assert_equal(
objects[0],
{
'name': model.notes.name,
'cols': [
('INTEGER', 'id', 'pk'),
('INTEGER', 'user_id', 'fk'),
('VARCHAR(50)', 'name', None),
],
'props': [],
'methods': [],
}
)
def test_inherits(self):
objects, relations, inherits = sadisplay \
.describe([model.User, model.Admin, model.Manager])
assert len(relations) == 0
assert len(objects) == 3
assert len(inherits) == 2
assert_equal(
objects[1],
{
'name': model.Admin.__name__,
'cols': [
('INTEGER', 'id', 'pk'),
('VARCHAR(50)', 'name', None),
('VARCHAR(50)', 'phone', None),
],
'props': ['address', 'books', ],
'methods': ['permissions', ],
}
)
assert_equal(
inherits[0],
{
'child': model.Admin.__name__,
'parent': model.User.__name__,
}
)
def test_relation(self):
objects, relations, inherits = sadisplay \
.describe([model.User, model.Address])
assert len(objects) == 2
assert_equal(
objects[1],
{
'name': model.Address.__name__,
'cols': [
('INTEGER', 'id', 'pk'),
('INTEGER', 'user_id', 'fk'),
],
'props': ['user'],
'methods': [],
}
)
assert len(inherits) == 0
assert_equal(
relations[0],
{
'from': model.Address.__name__,
'to': model.User.__name__,
'by': 'user_id',
}
)
def test_table(self):
objects, relations, inherits = sadisplay \
.describe([model.Book])
assert len(objects) == 1
assert_equal(
objects[0],
{
'name': model.Book.__name__,
'cols': [
('INTEGER', 'id', 'pk'),
('INTEGER', 'user_id', 'fk'),
('VARCHAR(50)', 'title', None),
],
'props': ['user'],
'methods': [],
}
)
objects, relations, inherits = sadisplay \
.describe([model.Book, model.books])
assert len(objects) == 1
assert_equal(
objects[0],
{
'name': model.Book.__name__,
'cols': [
('INTEGER', 'id', 'pk'),
('INTEGER', 'user_id', 'fk'),
('VARCHAR(50)', 'title', None),
],
'props': ['user'],
'methods': [],
}
)
objects, relations, inherits = sadisplay \
.describe([model.books])
assert len(objects) == 1
assert_equal(
objects[0],
{
'name': model.books.name,
'cols': [
('INTEGER', 'id', 'pk'),
('INTEGER', 'user_id', 'fk'),
('VARCHAR(50)', 'title', None),
],
'props': [],
'methods': [],
}
)
def test_column_property(self):
objects, relations, inherits = sadisplay \
.describe([model.Employee])
assert_equal(len(objects), 1)
assert_equal(
objects[0],
{
'name': model.Employee.__name__,
'cols': [
('INTEGER', 'id', 'pk'),
('INTEGER', 'manager_id', 'fk'),
('VARCHAR(50)', 'name', None),
('VARCHAR(50)', 'rank', None),
],
'props': ['address', 'books', 'department'],
'methods': [],
})
| 27.429348
| 72
| 0.385377
|
3cce34f2a12d3d5b8569dbb303beaa191fb0fc7e
| 271
|
py
|
Python
|
venv/Lib/site-packages/mcipc/rcon/je/commands/spectate.py
|
Svesnav2/Discord-Bot-Minecraft-server-status
|
ee34948e741930567a3adb557197523f9d32ace1
|
[
"Unlicense"
] | null | null | null |
venv/Lib/site-packages/mcipc/rcon/je/commands/spectate.py
|
Svesnav2/Discord-Bot-Minecraft-server-status
|
ee34948e741930567a3adb557197523f9d32ace1
|
[
"Unlicense"
] | null | null | null |
venv/Lib/site-packages/mcipc/rcon/je/commands/spectate.py
|
Svesnav2/Discord-Bot-Minecraft-server-status
|
ee34948e741930567a3adb557197523f9d32ace1
|
[
"Unlicense"
] | null | null | null |
"""Implementation of the spectate command."""
from mcipc.rcon.client import Client
__all__ = ['spectate']
def spectate(self: Client, target: str = None, player: str = None) -> str:
"""Start or stop spectating."""
return self.run('spectate', target, player)
| 20.846154
| 74
| 0.678967
|
394f33a8496008d2a0589e40471ec10911f80978
| 8,386
|
py
|
Python
|
moe/optimal_learning/python/interfaces/expected_improvement_interface.py
|
misokg/Cornell-MOE
|
1547d6b168b7fc70857d522baa0d5d45c41d3cdf
|
[
"Apache-2.0"
] | 218
|
2017-10-14T03:54:00.000Z
|
2022-03-25T14:48:38.000Z
|
moe/optimal_learning/python/interfaces/expected_improvement_interface.py
|
Tracy3370/Cornell-MOE
|
df299d1be882d2af9796d7a68b3f9505cac7a53e
|
[
"Apache-2.0"
] | 45
|
2017-09-27T14:33:31.000Z
|
2020-12-16T09:32:50.000Z
|
moe/optimal_learning/python/interfaces/expected_improvement_interface.py
|
Tracy3370/Cornell-MOE
|
df299d1be882d2af9796d7a68b3f9505cac7a53e
|
[
"Apache-2.0"
] | 63
|
2017-09-25T14:23:57.000Z
|
2022-03-17T01:41:42.000Z
|
# -*- coding: utf-8 -*-
r"""Interface for computation of the Expected Improvement at points sampled from a GaussianProcess.
.. NOTE:: These comments were copied from the file comments in gpp_math.cpp.
See the package docs (:mod:`moe.optimal_learning.python.interfaces`) for the basics of expected improvement and the definition of the q,p-EI problem.
Then the improvement for this single sample is:
``I = { best_known - min(y) if (best_known - min(y) > 0) (Equation 5)``
`` { 0 else``
where y is a particular prediction from the underlying Gaussian Process and best_known is the best observed value (min) so far.
And the expected improvement, EI, can be computed by averaging repeated computations of I; i.e., monte-carlo integration.
This is done in :mod:`moe.optimal_learning.python.interfaces.expected_improvement_interface.ExpectedImprovementInterface.compute_expected_improvement`;
we can also compute the gradient. This computation is needed in the optimization of q,p-EI.
There is also a special, analytic case of EI computation that does not require monte-carlo integration. This special
case can only be used to compute 1,0-EI (and its gradient). Still this can be very useful (e.g., the heuristic
optimization in gpp_heuristic_expected_improvement_optimization.hpp estimates q,0-EI by repeatedly solving
1,0-EI).
From there, since EI is taken from a sum of gaussians, we expect it to be reasonably smooth
and apply multistart, restarted gradient descent to find the optimum. The use of gradient descent
implies the need for all of the various "grad" functions, e.g., gaussian_process.compute_grad_mean_of_points().
This is handled by coupling an implementation of
:class:`moe.optimal_learning.python.interfaces.expected_improvement_interface.ExpectedImprovementInterface`
to an optimizer (:mod:`moe.optimal_learning.python.interfaces.optimization_interface`).
"""
from builtins import object
from abc import ABCMeta, abstractmethod, abstractproperty
from future.utils import with_metaclass
class ExpectedImprovementInterface(with_metaclass(ABCMeta, object)):
r"""Interface for Expected Improvement computation: EI and its gradient at specified point(s) sampled from a GaussianProcess.
A class to encapsulate the computation of expected improvement and its spatial gradient using points sampled from an
associated GaussianProcess. The general EI computation requires monte-carlo integration; it can support q,p-EI optimization.
It is designed to work with any GaussianProcess.
See file docs for a description of what EI is and an overview of how it can be computed.
Implementers are responsible for dealing with PRNG state for any randomness needed in EI computation.
Implementers are also responsible for storing ``points_to_sample`` and ``points_being_sampled``:
:param points_to_sample: points at which to evaluate EI and/or its gradient to check their value in future experiments (i.e., "q" in q,p-EI)
:type points_to_sample: array of float64 with shape (num_to_sample, dim)
:param points_being_sampled: points being sampled in concurrent experiments (i.e., "p" in q,p-EI)
:type points_being_sampled: array of float64 with shape (num_being_sampled, dim)
"""
@abstractproperty
def dim(self):
"""Return the number of spatial dimensions."""
pass
@abstractproperty
def num_to_sample(self):
"""Number of points at which to compute/optimize EI, aka potential points to sample in future experiments; i.e., the ``q`` in ``q,p-EI``."""
pass
@abstractproperty
def num_being_sampled(self):
"""Number of points being sampled in concurrent experiments; i.e., the ``p`` in ``q,p-EI``."""
pass
@abstractmethod
def compute_expected_improvement(self, **kwargs):
r"""Compute the expected improvement at ``points_to_sample``, with ``points_being_sampled`` concurrent points being sampled.
.. NOTE:: These comments were copied from ExpectedImprovementEvaluator::ComputeExpectedImprovement in gpp_math.hpp.
and duplicated in
:meth:`moe.optimal_learning.python.cpp_wrappers.expected_improvement.ExpectedImprovement.compute_expected_improvement` and
:meth:`moe.optimal_learning.python.python_version.expected_improvement.ExpectedImprovement.compute_expected_improvement`
``points_to_sample`` is the "q" and ``points_being_sampled`` is the "p" in q,p-EI.
Computes the expected improvement ``EI(Xs) = E_n[[f^*_n(X) - min(f(Xs_1),...,f(Xs_m))]^+]``, where ``Xs``
are potential points to sample (union of ``points_to_sample`` and ``points_being_sampled``) and ``X`` are
already sampled points. The ``^+`` indicates that the expression in the expectation evaluates to 0 if it
is negative. ``f^*(X)`` is the MINIMUM over all known function evaluations (``points_sampled_value``),
whereas ``f(Xs)`` are *GP-predicted* function evaluations.
In words, we are computing the expected improvement (over the current ``best_so_far``, best known
objective function value) that would result from sampling (aka running new experiments) at
``points_to_sample`` with ``points_being_sampled`` concurrent/ongoing experiments.
In general, the EI expression is complex and difficult to evaluate; hence we use Monte-Carlo simulation to approximate it.
When faster (e.g., analytic) techniques are available, we will prefer them.
The idea of the MC approach is to repeatedly sample at the union of ``points_to_sample`` and
``points_being_sampled``. This is analogous to gaussian_process_interface.sample_point_from_gp,
but we sample ``num_union`` points at once:
``y = \mu + Lw``
where ``\mu`` is the GP-mean, ``L`` is the ``chol_factor(GP-variance)`` and ``w`` is a vector
of ``num_union`` draws from N(0, 1). Then:
``improvement_per_step = max(max(best_so_far - y), 0.0)``
Observe that the inner ``max`` means only the smallest component of ``y`` contributes in each iteration.
We compute the improvement over many random draws and average.
:return: the expected improvement from sampling ``points_to_sample`` with ``points_being_sampled`` concurrent experiments
:rtype: float64
"""
pass
@abstractmethod
def compute_grad_expected_improvement(self, **kwargs):
r"""Compute the gradient of expected improvement at ``points_to_sample`` wrt ``points_to_sample``, with ``points_being_sampled`` concurrent samples.
.. NOTE:: These comments were copied from ExpectedImprovementEvaluator::ComputeGradExpectedImprovement in gpp_math.hpp
and duplicated
:meth:`moe.optimal_learning.python.cpp_wrappers.expected_improvement.ExpectedImprovement.compute_grad_expected_improvement` and
:meth:`moe.optimal_learning.python.python_version.expected_improvement.ExpectedImprovement.compute_grad_expected_improvement`
``points_to_sample`` is the "q" and ``points_being_sampled`` is the "p" in q,p-EI.
In general, the expressions for gradients of EI are complex and difficult to evaluate; hence we use
Monte-Carlo simulation to approximate it. When faster (e.g., analytic) techniques are available, we will prefer them.
The MC computation of grad EI is similar to the computation of EI (decsribed in
compute_expected_improvement). We differentiate ``y = \mu + Lw`` wrt ``points_to_sample``;
only terms from the gradient of ``\mu`` and ``L`` contribute. In EI, we computed:
``improvement_per_step = max(max(best_so_far - y), 0.0)``
and noted that only the smallest component of ``y`` may contribute (if it is > 0.0).
Call this index ``winner``. Thus in computing grad EI, we only add gradient terms
that are attributable to the ``winner``-th component of ``y``.
:return: gradient of EI, ``\pderiv{EI(Xq \cup Xp)}{Xq_{i,d}}`` where ``Xq`` is ``points_to_sample``
and ``Xp`` is ``points_being_sampled`` (grad EI from sampling ``points_to_sample`` with
``points_being_sampled`` concurrent experiments wrt each dimension of the points in ``points_to_sample``)
:rtype: array of float64 with shape (num_to_sample, dim)
"""
pass
| 60.330935
| 156
| 0.72788
|
45349fefd8152498d09e20e2c45957e59446778f
| 1,242
|
py
|
Python
|
TCP/TCP_S.py
|
WilliamWuLH/Network_Basic_Programming
|
284f8d3664340b0270271da5c50d5b8bb7ce8534
|
[
"MIT"
] | 1
|
2020-11-29T14:56:22.000Z
|
2020-11-29T14:56:22.000Z
|
TCP/TCP_S.py
|
WilliamWuLH/Network_Basic_Programming
|
284f8d3664340b0270271da5c50d5b8bb7ce8534
|
[
"MIT"
] | null | null | null |
TCP/TCP_S.py
|
WilliamWuLH/Network_Basic_Programming
|
284f8d3664340b0270271da5c50d5b8bb7ce8534
|
[
"MIT"
] | null | null | null |
import socket
import threading
import time
import sys
IP = '127.0.0.1'
PORT = 5050
SER_ADDR = IP,PORT
BUFFSIZE = 1024
def server_socket():
try:
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sk.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sk.bind(SER_ADDR)
sk.listen(10)
except socket.error as msg:
print(msg)
sys.exit(1)
print('server waiting...')
while True:
conn,addr = sk.accept()
t = threading.Thread(target = handle_data, args = (conn,addr))
t.start()
def handle_data(conn, addr):
print('Accept new connection from {0}'.format(addr))
conn.send(('Hi, Welcome to the server!').encode())
while True:
client_data = conn.recv(BUFFSIZE)
print('{0} client send data is {1}'.format(addr, client_data.decode()))
time.sleep(1)
if client_data.decode() == 'exit' or not client_data:
print('{0} connection close'.format(addr))
conn.send( ('Connection closed!').encode() )
break
massage = input("Server : ").strip()
conn.send( ("Server : "+massage).encode() )
conn.close()
if __name__ == "__main__":
server_socket()
#socket server
| 24.84
| 79
| 0.605475
|
0df2781e7962d8d02dca4d1d2ed83132ca287e7e
| 1,192
|
py
|
Python
|
solutions/codeforces/1425Adp.py
|
forxhunter/ComputingIntro
|
50fa2ac030748626c694ec5c884c5ac32f0b42a8
|
[
"Apache-2.0"
] | 1
|
2021-01-02T04:31:34.000Z
|
2021-01-02T04:31:34.000Z
|
solutions/codeforces/1425Adp.py
|
forxhunter/ComputingIntro
|
50fa2ac030748626c694ec5c884c5ac32f0b42a8
|
[
"Apache-2.0"
] | null | null | null |
solutions/codeforces/1425Adp.py
|
forxhunter/ComputingIntro
|
50fa2ac030748626c694ec5c884c5ac32f0b42a8
|
[
"Apache-2.0"
] | null | null | null |
'''
1425A. Arena of Greed
games, greedy, 1400
https://codeforces.com/problemset/problem/1425/A
'''
# get the input at once
t = int(input())
golds = []
for _ in range(t):
golds.append(int(input()))
# save the solved puzzle
dp = {}
# save the special case when n == 4
dp[4] = [3, 1]
def optimum(n):
left = n
greedy_first = 0
greedy_second = 0
# if Chanek is first
fs = True
if left & 1 == 1:
left -= 1
fs = False
while left:
# for first man
# dp
if left in dp:
greedy_first += dp[left][0]
greedy_second += dp[left][1]
left -= (dp[left][0]+dp[left][0])
break
else:
if (left //2) & 1:
greedy_first += left // 2
greedy_second += 1
left = (left//2) - 1
else:
greedy_first += 1
greedy_second += 1
left -= 2
dp[n] = [greedy_first, greedy_second]
return [greedy_second+1,greedy_first][fs]
# find the answer
ans = []
for gold in golds:
if gold == 1:
ans.append(1)
else:
ans.append(optimum(gold))
print(*ans,sep='\n')
| 21.285714
| 48
| 0.5
|
139da666ed14015ffb6dd0c13b72c5836ffcd054
| 5,866
|
py
|
Python
|
hack/gen-content.py
|
berendt/fluxcd-website
|
fc8470f0e4cd4c9dfd8dd629257b69b999c7d885
|
[
"Apache-2.0"
] | 6
|
2019-08-14T05:48:27.000Z
|
2020-06-16T22:07:39.000Z
|
hack/gen-content.py
|
berendt/fluxcd-website
|
fc8470f0e4cd4c9dfd8dd629257b69b999c7d885
|
[
"Apache-2.0"
] | 22
|
2019-08-15T06:22:15.000Z
|
2020-10-30T15:15:37.000Z
|
hack/gen-content.py
|
berendt/fluxcd-website
|
fc8470f0e4cd4c9dfd8dd629257b69b999c7d885
|
[
"Apache-2.0"
] | 10
|
2019-08-13T17:42:09.000Z
|
2020-10-05T17:50:13.000Z
|
#!/usr/bin/env python3
# This was inspired by
# http://sigs.k8s.io/contributor-site/hack/gen-content.sh
# Check out
# external-sources/README.md for some instructions on how
# the file format works.
import csv
import os
import re
import shutil
import sys
import subprocess
import tempfile
# Workaround to make this work in Netlify...
LOCAL_PY_PATH = '/opt/buildhome/python3.8/lib/python3.8/site-packages/'
if LOCAL_PY_PATH not in sys.path:
sys.path.append(LOCAL_PY_PATH)
import pathlib
global link_mapping
link_mapping = []
'''
We are adding basic Front-Matter here.
`docs` files can't have front-matter and # (h1)
'''
def rewrite_header(out_file, title=None, docs=False, weight=None):
lines = open(out_file, 'r').readlines()
if not title or title == '-':
title = os.path.basename(out_file).split('.md')[0].title()
header_lines = [
'---\n',
'title: {}\n'.format(title),
'importedDoc: true\n'
]
if docs:
header_lines += ['type: docs\n']
if weight:
header_lines += ['weight: {}\n'.format(weight)]
header_lines += [
'---\n',
'\n'
]
file_desc = open(out_file, 'w')
file_desc.writelines(header_lines)
for line in lines:
if not docs or not line.startswith('# ') or lines.index(line) >= 4:
if not line.startswith('<!-- '): #FML!
file_desc.write(line)
file_desc.close()
class Repo():
def __init__(self, external_sources_dir, repo_fn):
self.temp_dir = tempfile.mkdtemp()
self.repo_id = repo_fn.split(external_sources_dir)[1][1:]
self.gh_source = 'https://github.com/{}/'.format(self.repo_id).strip('/')
self.repo_fn = repo_fn
self.dest = os.path.join(self.temp_dir, self.repo_id)
self.file_list = []
global link_mapping
with open(self.repo_fn, 'r') as file_desc:
csv_reader = csv.reader(file_desc)
for line in csv_reader:
self.file_list += [[entry.strip('/') for entry in line]]
link_mapping += [[self.gh_source]+[entry.strip('/') for entry in line]]
def __del__(self):
shutil.rmtree(self.temp_dir)
def clone_repo(self):
subprocess.call([
'git', 'clone', '--depth=1', '-q',
self.gh_source, self.dest])
def rewrite_links(self, out_file):
'''
Types of links in Markdown
1: <url> # hard to find - we have markdown that's mixed with html
2: [label](url)
3: [url]
4: [label]: url together with [label]
'''
with open(out_file, 'r') as file_desc:
content = file_desc.read()
# links_1 = re.findall(r'\<.+?\>', content, re.MULTILINE)
links_2 = re.findall(r'\[.+?\]\((.+?)\)', content, re.MULTILINE)
links_3 = re.findall(r'\[(.+?)]\s+', content, re.MULTILINE)
links_4 = re.findall(r'\[(.+?)\]\:\s+.+?', content, re.MULTILINE)
# set() because we only want to review every link just once
# str.split('#')[0] because we ignore anchors
links = set([a.split('#')[0]
for a in links_2+links_3+links_4
if not a.startswith('#')])
link_rewrites = {}
for link in links:
if not link:
continue
global link_mapping
for entry in link_mapping:
if link == entry[1]:
link_rewrites[link] = '/{}'.format(entry[2].lower().split('.md')[0])
elif link.startswith(self.gh_source) and link.endswith(entry[1]) and \
self.gh_source == entry[0]:
link_rewrites[link] = '/{}'.format(entry[2].lower().split('.md')[0])
if link not in link_rewrites and os.path.exists(os.path.join(self.dest, link)) and \
not link.startswith('https://') and not link.startswith('http://'):
link_rewrites[link] = os.path.join(self.gh_source, 'blob/main', link)
for key in link_rewrites:
content = content.replace(key, link_rewrites[key])
with open(out_file, 'w') as file_desc:
file_desc.write(content)
def copy_files(self, content_dir):
for entry in self.file_list:
out_file = os.path.join(content_dir, entry[1])
shutil.copyfile(
os.path.join(self.dest, entry[0]),
out_file)
docs = entry[1].startswith('docs/') or entry[1].startswith('legacy/') or \
entry[1].startswith('flagger/')
title = None
weight = None
if len(entry) == 4:
weight = entry[3]
if len(entry) >= 3:
title = entry[2]
rewrite_header(out_file, title=title, docs=docs, weight=weight)
self.rewrite_links(out_file)
def get_repo_list(external_sources_dir):
repos = []
file_refs = pathlib.Path(external_sources_dir).glob('*/*')
for file in file_refs:
repo_fn = str(file)
if os.path.isfile(repo_fn):
repos += [Repo(external_sources_dir, repo_fn)]
return repos
def main():
repo_root = os.path.realpath(
os.path.join(os.path.dirname(__file__), '..'))
if os.getcwd() != repo_root:
print('Please run this script from top-level of the repository.')
sys.exit(1)
content_dir = os.path.join(repo_root, 'content/en')
external_sources_dir = os.path.join(repo_root, 'external-sources')
repos = get_repo_list(external_sources_dir)
for repo in repos:
repo.clone_repo()
for repo in repos:
repo.copy_files(content_dir)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("Aborted.", sys.stderr)
sys.exit(1)
| 32.230769
| 96
| 0.573645
|
ba6c89d444d8505bfe5837a9cb06454a2c8a0d73
| 557
|
py
|
Python
|
Blips Server/alembic/versions/331f3604a692_altered_timelines.py
|
andrewapperley/Blips-Server
|
136a68ca41cec682a727cf3df20d0d8b1fc35526
|
[
"MIT"
] | null | null | null |
Blips Server/alembic/versions/331f3604a692_altered_timelines.py
|
andrewapperley/Blips-Server
|
136a68ca41cec682a727cf3df20d0d8b1fc35526
|
[
"MIT"
] | null | null | null |
Blips Server/alembic/versions/331f3604a692_altered_timelines.py
|
andrewapperley/Blips-Server
|
136a68ca41cec682a727cf3df20d0d8b1fc35526
|
[
"MIT"
] | null | null | null |
"""altered_timelines
Revision ID: 331f3604a692
Revises: 58972abd14d9
Create Date: 2014-06-04 23:07:22.652321
"""
# revision identifiers, used by Alembic.
revision = '331f3604a692'
down_revision = '58972abd14d9'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column("notification_users", "notification_receiver_id")
op.drop_column("timelines", "description")
op.drop_column("timelines", "cover_image")
op.drop_column("timelines", "start_date")
op.drop_column("timelines", "title")
def downgrade():
pass
| 20.62963
| 68
| 0.736086
|
c52066be3cc8098d9be3c3c613baf4d682334e3c
| 26,676
|
py
|
Python
|
Lib/test/test_contextlib.py
|
Ricky-Wilson/Python
|
9896d7a9901dabea4b3d555af471577a624d1b95
|
[
"PSF-2.0"
] | 6
|
2016-03-15T19:07:14.000Z
|
2018-08-15T13:11:03.000Z
|
Lib/test/test_contextlib.py
|
Ricky-Wilson/Python
|
9896d7a9901dabea4b3d555af471577a624d1b95
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_contextlib.py
|
Ricky-Wilson/Python
|
9896d7a9901dabea4b3d555af471577a624d1b95
|
[
"PSF-2.0"
] | 2
|
2016-08-24T14:29:14.000Z
|
2021-12-29T19:27:06.000Z
|
"""Unit tests for contextlib.py, and other context managers."""
import io
import sys
import tempfile
import unittest
from contextlib import * # Tests __all__
from test import support
try:
import threading
except ImportError:
threading = None
class ContextManagerTestCase(unittest.TestCase):
def test_contextmanager_plain(self):
state = []
@contextmanager
def woohoo():
state.append(1)
yield 42
state.append(999)
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_finally(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_no_reraise(self):
@contextmanager
def whee():
yield
ctx = whee()
ctx.__enter__()
# Calling __exit__ should not result in an exception
self.assertFalse(ctx.__exit__(TypeError, TypeError("foo"), None))
def test_contextmanager_trap_yield_after_throw(self):
@contextmanager
def whoo():
try:
yield
except:
yield
ctx = whoo()
ctx.__enter__()
self.assertRaises(
RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
)
def test_contextmanager_except(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError as e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@contextmanager
@attribs(foo='bar')
def baz(spam):
"""Whee!"""
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@support.requires_docstrings
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
@support.requires_docstrings
def test_instance_docstring_given_cm_docstring(self):
baz = self._create_contextmanager_attribs()(None)
self.assertEqual(baz.__doc__, "Whee!")
class ClosingTestCase(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = closing.__doc__
obj = closing(None)
self.assertEqual(obj.__doc__, cm_docstring)
def test_closing(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with closing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
def test_closing_error(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
with closing(x) as y:
self.assertEqual(x, y)
1 / 0
self.assertEqual(state, [1])
class FileContextTestCase(unittest.TestCase):
def testWithOpen(self):
tfn = tempfile.mktemp()
try:
f = None
with open(tfn, "w") as f:
self.assertFalse(f.closed)
f.write("Booh\n")
self.assertTrue(f.closed)
f = None
with self.assertRaises(ZeroDivisionError):
with open(tfn, "r") as f:
self.assertFalse(f.closed)
self.assertEqual(f.read(), "Booh\n")
1 / 0
self.assertTrue(f.closed)
finally:
support.unlink(tfn)
@unittest.skipUnless(threading, 'Threading required for this test.')
class LockContextTestCase(unittest.TestCase):
def boilerPlate(self, lock, locked):
self.assertFalse(locked())
with lock:
self.assertTrue(locked())
self.assertFalse(locked())
with self.assertRaises(ZeroDivisionError):
with lock:
self.assertTrue(locked())
1 / 0
self.assertFalse(locked())
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
class mycontext(ContextDecorator):
"""Example decoration-compatible context manager for testing"""
started = False
exc = None
catch = False
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
return self.catch
class TestContextDecorator(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = mycontext.__doc__
obj = mycontext()
self.assertEqual(obj.__doc__, cm_docstring)
def test_contextdecorator(self):
context = mycontext()
with context as result:
self.assertIs(result, context)
self.assertTrue(context.started)
self.assertEqual(context.exc, (None, None, None))
def test_contextdecorator_with_exception(self):
context = mycontext()
with self.assertRaisesRegex(NameError, 'foo'):
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
context = mycontext()
context.catch = True
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorator(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_decorator_with_exception(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
raise NameError('foo')
with self.assertRaisesRegex(NameError, 'foo'):
test()
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorating_method(self):
context = mycontext()
class Test(object):
@context
def method(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
# these tests are for argument passing when used as a decorator
test = Test()
test.method(1, 2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
self.assertEqual(test.c, None)
test = Test()
test.method('a', 'b', 'c')
self.assertEqual(test.a, 'a')
self.assertEqual(test.b, 'b')
self.assertEqual(test.c, 'c')
test = Test()
test.method(a=1, b=2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
def test_typo_enter(self):
class mycontext(ContextDecorator):
def __unter__(self):
pass
def __exit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_typo_exit(self):
class mycontext(ContextDecorator):
def __enter__(self):
pass
def __uxit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_contextdecorator_as_mixin(self):
class somecontext(object):
started = False
exc = None
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
class mycontext(somecontext, ContextDecorator):
pass
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_contextmanager_as_decorator(self):
@contextmanager
def woohoo(y):
state.append(y)
yield
state.append(999)
state = []
@woohoo(1)
def test(x):
self.assertEqual(state, [1])
state.append(x)
test('something')
self.assertEqual(state, [1, 'something', 999])
# Issue #11647: Ensure the decorated function is 'reusable'
state = []
test('something else')
self.assertEqual(state, [1, 'something else', 999])
class TestExitStack(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = ExitStack.__doc__
obj = ExitStack()
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_resources(self):
with ExitStack():
pass
def test_callback(self):
expected = [
((), {}),
((1,), {}),
((1,2), {}),
((), dict(example=1)),
((1,), dict(example=1)),
((1,2), dict(example=1)),
]
result = []
def _exit(*args, **kwds):
"""Test metadata propagation"""
result.append((args, kwds))
with ExitStack() as stack:
for args, kwds in reversed(expected):
if args and kwds:
f = stack.callback(_exit, *args, **kwds)
elif args:
f = stack.callback(_exit, *args)
elif kwds:
f = stack.callback(_exit, **kwds)
else:
f = stack.callback(_exit)
self.assertIs(f, _exit)
for wrapper in stack._exit_callbacks:
self.assertIs(wrapper.__wrapped__, _exit)
self.assertNotEqual(wrapper.__name__, _exit.__name__)
self.assertIsNone(wrapper.__doc__, _exit.__doc__)
self.assertEqual(result, expected)
def test_push(self):
exc_raised = ZeroDivisionError
def _expect_exc(exc_type, exc, exc_tb):
self.assertIs(exc_type, exc_raised)
def _suppress_exc(*exc_details):
return True
def _expect_ok(exc_type, exc, exc_tb):
self.assertIsNone(exc_type)
self.assertIsNone(exc)
self.assertIsNone(exc_tb)
class ExitCM(object):
def __init__(self, check_exc):
self.check_exc = check_exc
def __enter__(self):
self.fail("Should not be called!")
def __exit__(self, *exc_details):
self.check_exc(*exc_details)
with ExitStack() as stack:
stack.push(_expect_ok)
self.assertIs(stack._exit_callbacks[-1], _expect_ok)
cm = ExitCM(_expect_ok)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
stack.push(_suppress_exc)
self.assertIs(stack._exit_callbacks[-1], _suppress_exc)
cm = ExitCM(_expect_exc)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
stack.push(_expect_exc)
self.assertIs(stack._exit_callbacks[-1], _expect_exc)
stack.push(_expect_exc)
self.assertIs(stack._exit_callbacks[-1], _expect_exc)
1/0
def test_enter_context(self):
class TestCM(object):
def __enter__(self):
result.append(1)
def __exit__(self, *exc_details):
result.append(3)
result = []
cm = TestCM()
with ExitStack() as stack:
@stack.callback # Registered first => cleaned up last
def _exit():
result.append(4)
self.assertIsNotNone(_exit)
stack.enter_context(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
result.append(2)
self.assertEqual(result, [1, 2, 3, 4])
def test_close(self):
result = []
with ExitStack() as stack:
@stack.callback
def _exit():
result.append(1)
self.assertIsNotNone(_exit)
stack.close()
result.append(2)
self.assertEqual(result, [1, 2])
def test_pop_all(self):
result = []
with ExitStack() as stack:
@stack.callback
def _exit():
result.append(3)
self.assertIsNotNone(_exit)
new_stack = stack.pop_all()
result.append(1)
result.append(2)
new_stack.close()
self.assertEqual(result, [1, 2, 3])
def test_exit_raise(self):
with self.assertRaises(ZeroDivisionError):
with ExitStack() as stack:
stack.push(lambda *exc: False)
1/0
def test_exit_suppress(self):
with ExitStack() as stack:
stack.push(lambda *exc: True)
1/0
def test_exit_exception_chaining_reference(self):
# Sanity check to make sure that ExitStack chaining matches
# actual nested with statements
class RaiseExc:
def __init__(self, exc):
self.exc = exc
def __enter__(self):
return self
def __exit__(self, *exc_details):
raise self.exc
class RaiseExcWithContext:
def __init__(self, outer, inner):
self.outer = outer
self.inner = inner
def __enter__(self):
return self
def __exit__(self, *exc_details):
try:
raise self.inner
except:
raise self.outer
class SuppressExc:
def __enter__(self):
return self
def __exit__(self, *exc_details):
type(self).saved_details = exc_details
return True
try:
with RaiseExc(IndexError):
with RaiseExcWithContext(KeyError, AttributeError):
with SuppressExc():
with RaiseExc(ValueError):
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = SuppressExc.saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
def test_exit_exception_chaining(self):
# Ensure exception chaining matches the reference behaviour
def raise_exc(exc):
raise exc
saved_details = None
def suppress_exc(*exc_details):
nonlocal saved_details
saved_details = exc_details
return True
try:
with ExitStack() as stack:
stack.callback(raise_exc, IndexError)
stack.callback(raise_exc, KeyError)
stack.callback(raise_exc, AttributeError)
stack.push(suppress_exc)
stack.callback(raise_exc, ValueError)
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
def test_exit_exception_non_suppressing(self):
# http://bugs.python.org/issue19092
def raise_exc(exc):
raise exc
def suppress_exc(*exc_details):
return True
try:
with ExitStack() as stack:
stack.callback(lambda: None)
stack.callback(raise_exc, IndexError)
except Exception as exc:
self.assertIsInstance(exc, IndexError)
else:
self.fail("Expected IndexError, but no exception was raised")
try:
with ExitStack() as stack:
stack.callback(raise_exc, KeyError)
stack.push(suppress_exc)
stack.callback(raise_exc, IndexError)
except Exception as exc:
self.assertIsInstance(exc, KeyError)
else:
self.fail("Expected KeyError, but no exception was raised")
def test_exit_exception_with_correct_context(self):
# http://bugs.python.org/issue20317
@contextmanager
def gets_the_context_right(exc):
try:
yield
finally:
raise exc
exc1 = Exception(1)
exc2 = Exception(2)
exc3 = Exception(3)
exc4 = Exception(4)
# The contextmanager already fixes the context, so prior to the
# fix, ExitStack would try to fix it *again* and get into an
# infinite self-referential loop
try:
with ExitStack() as stack:
stack.enter_context(gets_the_context_right(exc4))
stack.enter_context(gets_the_context_right(exc3))
stack.enter_context(gets_the_context_right(exc2))
raise exc1
except Exception as exc:
self.assertIs(exc, exc4)
self.assertIs(exc.__context__, exc3)
self.assertIs(exc.__context__.__context__, exc2)
self.assertIs(exc.__context__.__context__.__context__, exc1)
self.assertIsNone(
exc.__context__.__context__.__context__.__context__)
def test_exit_exception_with_existing_context(self):
# Addresses a lack of test coverage discovered after checking in a
# fix for issue 20317 that still contained debugging code.
def raise_nested(inner_exc, outer_exc):
try:
raise inner_exc
finally:
raise outer_exc
exc1 = Exception(1)
exc2 = Exception(2)
exc3 = Exception(3)
exc4 = Exception(4)
exc5 = Exception(5)
try:
with ExitStack() as stack:
stack.callback(raise_nested, exc4, exc5)
stack.callback(raise_nested, exc2, exc3)
raise exc1
except Exception as exc:
self.assertIs(exc, exc5)
self.assertIs(exc.__context__, exc4)
self.assertIs(exc.__context__.__context__, exc3)
self.assertIs(exc.__context__.__context__.__context__, exc2)
self.assertIs(
exc.__context__.__context__.__context__.__context__, exc1)
self.assertIsNone(
exc.__context__.__context__.__context__.__context__.__context__)
def test_body_exception_suppress(self):
def suppress_exc(*exc_details):
return True
try:
with ExitStack() as stack:
stack.push(suppress_exc)
1/0
except IndexError as exc:
self.fail("Expected no exception, got IndexError")
def test_exit_exception_chaining_suppress(self):
with ExitStack() as stack:
stack.push(lambda *exc: True)
stack.push(lambda *exc: 1/0)
stack.push(lambda *exc: {}[1])
def test_excessive_nesting(self):
# The original implementation would die with RecursionError here
with ExitStack() as stack:
for i in range(10000):
stack.callback(int)
def test_instance_bypass(self):
class Example(object): pass
cm = Example()
cm.__exit__ = object()
stack = ExitStack()
self.assertRaises(AttributeError, stack.enter_context, cm)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1], cm)
class TestRedirectStream:
redirect_stream = None
orig_stream = None
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = self.redirect_stream.__doc__
obj = self.redirect_stream(None)
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_redirect_in_init(self):
orig_stdout = getattr(sys, self.orig_stream)
self.redirect_stream(None)
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
def test_redirect_to_string_io(self):
f = io.StringIO()
msg = "Consider an API like help(), which prints directly to stdout"
orig_stdout = getattr(sys, self.orig_stream)
with self.redirect_stream(f):
print(msg, file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue().strip()
self.assertEqual(s, msg)
def test_enter_result_is_target(self):
f = io.StringIO()
with self.redirect_stream(f) as enter_result:
self.assertIs(enter_result, f)
def test_cm_is_reusable(self):
f = io.StringIO()
write_to_f = self.redirect_stream(f)
orig_stdout = getattr(sys, self.orig_stream)
with write_to_f:
print("Hello", end=" ", file=getattr(sys, self.orig_stream))
with write_to_f:
print("World!", file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue()
self.assertEqual(s, "Hello World!\n")
def test_cm_is_reentrant(self):
f = io.StringIO()
write_to_f = self.redirect_stream(f)
orig_stdout = getattr(sys, self.orig_stream)
with write_to_f:
print("Hello", end=" ", file=getattr(sys, self.orig_stream))
with write_to_f:
print("World!", file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue()
self.assertEqual(s, "Hello World!\n")
class TestRedirectStdout(TestRedirectStream, unittest.TestCase):
redirect_stream = redirect_stdout
orig_stream = "stdout"
class TestRedirectStderr(TestRedirectStream, unittest.TestCase):
redirect_stream = redirect_stderr
orig_stream = "stderr"
class TestSuppress(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = suppress.__doc__
obj = suppress()
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_result_from_enter(self):
with suppress(ValueError) as enter_result:
self.assertIsNone(enter_result)
def test_no_exception(self):
with suppress(ValueError):
self.assertEqual(pow(2, 5), 32)
def test_exact_exception(self):
with suppress(TypeError):
len(5)
def test_exception_hierarchy(self):
with suppress(LookupError):
'Hello'[50]
def test_other_exception(self):
with self.assertRaises(ZeroDivisionError):
with suppress(TypeError):
1/0
def test_no_args(self):
with self.assertRaises(ZeroDivisionError):
with suppress():
1/0
def test_multiple_exception_args(self):
with suppress(ZeroDivisionError, TypeError):
1/0
with suppress(ZeroDivisionError, TypeError):
len(5)
def test_cm_is_reentrant(self):
ignore_exceptions = suppress(Exception)
with ignore_exceptions:
pass
with ignore_exceptions:
len(5)
with ignore_exceptions:
1/0
with ignore_exceptions: # Check nested usage
len(5)
if __name__ == "__main__":
unittest.main()
| 31.569231
| 80
| 0.575611
|
bf24cd43748302e2843970cdd696ef350068adcd
| 566
|
py
|
Python
|
packages/server/build.py
|
arraypad/automuse
|
09aa75375048c4175950eb91a32d114cbc22940a
|
[
"Unlicense"
] | 1
|
2021-03-13T17:22:36.000Z
|
2021-03-13T17:22:36.000Z
|
packages/server/build.py
|
arraypad/automuse
|
09aa75375048c4175950eb91a32d114cbc22940a
|
[
"Unlicense"
] | null | null | null |
packages/server/build.py
|
arraypad/automuse
|
09aa75375048c4175950eb91a32d114cbc22940a
|
[
"Unlicense"
] | 1
|
2021-02-22T17:08:17.000Z
|
2021-02-22T17:08:17.000Z
|
#!/usr/bin/env python
import os
import glob
import json
from shutil import copyfile
def read(path):
with open(path, 'r') as f:
return f.read()
templates = {
os.path.splitext(os.path.basename(f))[0]: read(f) \
for f in glob.glob('../ui/example/*.js')
}
index = read('src/index.js').replace(
'templates = {}',
'templates = {}'.format(json.dumps(templates))
).replace(
'uiDist = ``',
'uiDist = {}'.format(json.dumps(read('../ui/dist/index.js'))),
)
with open('dist/index.js', 'w') as f:
f.write(index)
copyfile('src/templates.js', 'dist/templates.js')
| 19.517241
| 63
| 0.64311
|
8f1be471721acb22dda30cab8c39b00d21f8b5df
| 21,298
|
py
|
Python
|
printrun/stlplater.py
|
aurelianammon/flask-socketio-printer
|
addd318d1468891fdf46adb1f01f319ae33f2044
|
[
"MIT"
] | null | null | null |
printrun/stlplater.py
|
aurelianammon/flask-socketio-printer
|
addd318d1468891fdf46adb1f01f319ae33f2044
|
[
"MIT"
] | null | null | null |
printrun/stlplater.py
|
aurelianammon/flask-socketio-printer
|
addd318d1468891fdf46adb1f01f319ae33f2044
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import os
# Set up Internationalization using gettext
# searching for installed locales on /usr/share; uses relative folder if not found (windows)
from .utils import install_locale
install_locale('pronterface')
import wx
import time
import logging
import threading
import math
import sys
import re
import traceback
import subprocess
from copy import copy
from printrun import stltool
from printrun.objectplater import make_plater, PlaterPanel
glview = '--no-gl' not in sys.argv
if glview:
try:
from printrun import stlview
except:
glview = False
logging.warning("Could not load 3D viewer for plater:"
+ "\n" + traceback.format_exc())
def evalme(s):
return eval(s[s.find("(") + 1:s.find(")")])
def transformation_matrix(model):
matrix = stltool.I
if any(model.centeroffset):
matrix = model.translation_matrix(model.centeroffset).dot(matrix)
if model.rot:
matrix = model.rotation_matrix([0, 0, model.rot]).dot(matrix)
if any(model.offsets):
matrix = model.translation_matrix(model.offsets).dot(matrix)
return matrix
class showstl(wx.Window):
def __init__(self, parent, size, pos):
super().__init__(parent, size = size, pos = pos)
self.i = 0
self.parent = parent
self.previ = 0
self.Bind(wx.EVT_MOUSEWHEEL, self.rot)
self.Bind(wx.EVT_MOUSE_EVENTS, self.move)
self.Bind(wx.EVT_PAINT, self.repaint)
self.Bind(wx.EVT_KEY_DOWN, self.keypress)
self.triggered = 0
self.initpos = None
self.prevsel = -1
def prepare_model(self, m, scale):
m.bitmap = wx.Bitmap(800, 800, 32)
dc = wx.MemoryDC()
dc.SelectObject(m.bitmap)
dc.SetBackground(wx.Brush((0, 0, 0, 0)))
dc.SetBrush(wx.Brush((0, 0, 0, 255)))
dc.SetBrush(wx.Brush(wx.Colour(128, 255, 128)))
dc.SetPen(wx.Pen(wx.Colour(128, 128, 128)))
for i in m.facets:
dc.DrawPolygon([wx.Point(400 + scale * p[0], (400 - scale * p[1])) for p in i[1]])
dc.SelectObject(wx.NullBitmap)
m.bitmap.SetMask(wx.Mask(m.bitmap, wx.Colour(0, 0, 0, 255)))
def move_shape(self, delta):
"""moves shape (selected in l, which is list ListBox of shapes)
by an offset specified in tuple delta.
Positive numbers move to (rigt, down)"""
name = self.parent.l.GetSelection()
if name == wx.NOT_FOUND:
return False
name = self.parent.l.GetString(name)
model = self.parent.models[name]
model.offsets = [model.offsets[0] + delta[0],
model.offsets[1] + delta[1],
model.offsets[2]
]
self.Refresh()
return True
def move(self, event):
if event.ButtonUp(wx.MOUSE_BTN_LEFT):
if self.initpos is not None:
currentpos = event.GetPosition()
delta = (0.5 * (currentpos[0] - self.initpos[0]),
-0.5 * (currentpos[1] - self.initpos[1])
)
self.move_shape(delta)
self.Refresh()
self.initpos = None
elif event.ButtonDown(wx.MOUSE_BTN_RIGHT):
self.parent.right(event)
elif event.Dragging():
if self.initpos is None:
self.initpos = event.GetPosition()
self.Refresh()
dc = wx.ClientDC(self)
p = event.GetPosition()
dc.DrawLine(self.initpos[0], self.initpos[1], p[0], p[1])
del dc
else:
event.Skip()
def rotate_shape(self, angle):
"""rotates active shape
positive angle is clockwise
"""
self.i += angle
if not self.triggered:
self.triggered = 1
threading.Thread(target = self.cr).start()
def keypress(self, event):
"""gets keypress events and moves/rotates active shape"""
keycode = event.GetKeyCode()
step = 5
angle = 18
if event.ControlDown():
step = 1
angle = 1
# h
if keycode == 72:
self.move_shape((-step, 0))
# l
if keycode == 76:
self.move_shape((step, 0))
# j
if keycode == 75:
self.move_shape((0, step))
# k
if keycode == 74:
self.move_shape((0, -step))
# [
if keycode == 91:
self.rotate_shape(-angle)
# ]
if keycode == 93:
self.rotate_shape(angle)
event.Skip()
def rotateafter(self):
if self.i != self.previ:
i = self.parent.l.GetSelection()
if i != wx.NOT_FOUND:
self.parent.models[self.parent.l.GetString(i)].rot -= 5 * (self.i - self.previ)
self.previ = self.i
self.Refresh()
def cr(self):
time.sleep(0.01)
wx.CallAfter(self.rotateafter)
self.triggered = 0
def rot(self, event):
z = event.GetWheelRotation()
s = self.parent.l.GetSelection()
if self.prevsel != s:
self.i = 0
self.prevsel = s
self.rotate_shape(-1 if z < 0 else 1) #TEST
def repaint(self, event):
dc = wx.PaintDC(self)
self.paint(dc = dc)
def paint(self, coord1 = "x", coord2 = "y", dc = None):
if dc is None:
dc = wx.ClientDC(self)
scale = 2
dc.SetPen(wx.Pen(wx.Colour(100, 100, 100)))
for i in range(20):
dc.DrawLine(0, i * scale * 10, 400, i * scale * 10)
dc.DrawLine(i * scale * 10, 0, i * scale * 10, 400)
dc.SetPen(wx.Pen(wx.Colour(0, 0, 0)))
for i in range(4):
dc.DrawLine(0, i * scale * 50, 400, i * scale * 50)
dc.DrawLine(i * scale * 50, 0, i * scale * 50, 400)
dc.SetBrush(wx.Brush(wx.Colour(128, 255, 128)))
dc.SetPen(wx.Pen(wx.Colour(128, 128, 128)))
dcs = wx.MemoryDC()
for m in self.parent.models.values():
b = m.bitmap
im = b.ConvertToImage()
imgc = wx.Point(im.GetWidth() / 2, im.GetHeight() / 2)
im = im.Rotate(math.radians(m.rot), imgc, 0)
bm = wx.BitmapFromImage(im)
dcs.SelectObject(bm)
bsz = bm.GetSize()
dc.Blit(scale * m.offsets[0] - bsz[0] / 2, 400 - (scale * m.offsets[1] + bsz[1] / 2), bsz[0], bsz[1], dcs, 0, 0, useMask = 1)
del dc
class StlPlaterPanel(PlaterPanel):
load_wildcard = _("STL files (*.stl;*.STL)|*.stl;*.STL|OpenSCAD files (*.scad)|*.scad")
save_wildcard = _("STL files (*.stl;*.STL)|*.stl;*.STL")
def prepare_ui(self, filenames = [], callback = None,
parent = None, build_dimensions = None, circular_platform = False,
simarrange_path = None, antialias_samples = 0):
super().prepare_ui(filenames, callback, parent, build_dimensions)
self.cutting = False
self.cutting_axis = None
self.cutting_dist = None
if glview:
viewer = stlview.StlViewPanel(self, wx.DefaultSize,
build_dimensions = self.build_dimensions,
circular = circular_platform,
antialias_samples = antialias_samples)
# Cutting tool
nrows = self.menusizer.GetRows()
self.menusizer.Add(wx.StaticText(self.menupanel, -1, _("Cut along:")),
pos = (nrows, 0), span = (1, 1), flag = wx.ALIGN_CENTER)
cutconfirmbutton = wx.Button(self.menupanel, label = _("Confirm cut"))
cutconfirmbutton.Bind(wx.EVT_BUTTON, self.cut_confirm)
cutconfirmbutton.Disable()
self.cutconfirmbutton = cutconfirmbutton
self.menusizer.Add(cutconfirmbutton, pos = (nrows, 1), span = (1, 1), flag = wx.EXPAND)
cutpanel = wx.Panel(self.menupanel)
cutsizer = self.cutsizer = wx.BoxSizer(wx.HORIZONTAL)
cutpanel.SetSizer(cutsizer)
cutxplusbutton = wx.ToggleButton(cutpanel, label = _(">X"), style = wx.BU_EXACTFIT)
cutxplusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "x", 1))
cutsizer.Add(cutxplusbutton, 1, flag = wx.EXPAND)
cutzplusbutton = wx.ToggleButton(cutpanel, label = _(">Y"), style = wx.BU_EXACTFIT)
cutzplusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "y", 1))
cutsizer.Add(cutzplusbutton, 1, flag = wx.EXPAND)
cutzplusbutton = wx.ToggleButton(cutpanel, label = _(">Z"), style = wx.BU_EXACTFIT)
cutzplusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "z", 1))
cutsizer.Add(cutzplusbutton, 1, flag = wx.EXPAND)
cutxminusbutton = wx.ToggleButton(cutpanel, label = _("<X"), style = wx.BU_EXACTFIT)
cutxminusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "x", -1))
cutsizer.Add(cutxminusbutton, 1, flag = wx.EXPAND)
cutzminusbutton = wx.ToggleButton(cutpanel, label = _("<Y"), style = wx.BU_EXACTFIT)
cutzminusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "y", -1))
cutsizer.Add(cutzminusbutton, 1, flag = wx.EXPAND)
cutzminusbutton = wx.ToggleButton(cutpanel, label = _("<Z"), style = wx.BU_EXACTFIT)
cutzminusbutton.Bind(wx.EVT_TOGGLEBUTTON, lambda event: self.start_cutting_tool(event, "z", -1))
cutsizer.Add(cutzminusbutton, 1, flag = wx.EXPAND)
self.menusizer.Add(cutpanel, pos = (nrows + 1, 0), span = (1, 2), flag = wx.EXPAND)
else:
viewer = showstl(self, (580, 580), (0, 0))
self.simarrange_path = simarrange_path
self.set_viewer(viewer)
def start_cutting_tool(self, event, axis, direction):
toggle = event.EventObject
self.cutting = toggle.Value
if toggle.Value:
# Disable the other toggles
for child in self.cutsizer.Children:
child = child.Window
if child != toggle:
child.Value = False
self.cutting_axis = axis
self.cutting_direction = direction
else:
self.cutting_axis = None
self.cutting_direction = None
self.cutting_dist = None
def cut_confirm(self, event):
name = self.l.GetSelection()
name = self.l.GetString(name)
model = self.models[name]
transformation = transformation_matrix(model)
transformed = model.transform(transformation)
logging.info(_("Cutting %s alongside %s axis") % (name, self.cutting_axis))
axes = ["x", "y", "z"]
cut = transformed.cut(axes.index(self.cutting_axis),
self.cutting_direction,
self.cutting_dist)
cut.offsets = [0, 0, 0]
cut.rot = 0
cut.scale = model.scale
cut.filename = model.filename
cut.centeroffset = [0, 0, 0]
self.s.prepare_model(cut, 2)
self.models[name] = cut
self.cutconfirmbutton.Disable()
self.cutting = False
self.cutting_axis = None
self.cutting_dist = None
self.cutting_direction = None
for child in self.cutsizer.GetChildren():
child = child.GetWindow()
child.SetValue(False)
def clickcb(self, event, single = False):
if not isinstance(self.s, stlview.StlViewPanel):
return
if self.cutting:
self.clickcb_cut(event)
else:
self.clickcb_rebase(event)
def clickcb_cut(self, event):
axis = self.cutting_axis
self.cutting_dist, _, _ = self.s.get_cutting_plane(axis, None,
local_transform = True)
if self.cutting_dist is not None:
self.cutconfirmbutton.Enable()
def clickcb_rebase(self, event):
x, y = event.GetPosition()
ray_near, ray_far = self.s.mouse_to_ray(x, y, local_transform = True)
best_match = None
best_facet = None
best_dist = float("inf")
for key, model in self.models.items():
transformation = transformation_matrix(model)
transformed = model.transform(transformation)
if not transformed.intersect_box(ray_near, ray_far):
logging.debug("Skipping %s for rebase search" % key)
continue
facet, facet_dist = transformed.intersect(ray_near, ray_far)
if facet is not None and facet_dist < best_dist:
best_match = key
best_facet = facet
best_dist = facet_dist
if best_match is not None:
logging.info("Rebasing %s" % best_match)
model = self.models[best_match]
newmodel = model.rebase(best_facet)
newmodel.offsets = list(model.offsets)
newmodel.rot = 0
newmodel.scale = model.scale
newmodel.filename = model.filename
newmodel.centeroffset = [-(newmodel.dims[1] + newmodel.dims[0]) / 2,
-(newmodel.dims[3] + newmodel.dims[2]) / 2,
0]
self.s.prepare_model(newmodel, 2)
self.models[best_match] = newmodel
wx.CallAfter(self.Refresh)
def done(self, event, cb):
if not os.path.exists("tempstl"):
os.mkdir("tempstl")
name = "tempstl/" + str(int(time.time()) % 10000) + ".stl"
self.export_to(name)
if cb is not None:
cb(name)
if self.destroy_on_done:
self.Destroy()
def load_file(self, filename):
if filename.lower().endswith(".stl"):
try:
self.load_stl(filename)
except:
dlg = wx.MessageDialog(self, _("Loading STL file failed"),
_("Error:") + traceback.format_exc(),
wx.OK)
dlg.ShowModal()
logging.error(_("Loading STL file failed:")
+ "\n" + traceback.format_exc())
elif filename.lower().endswith(".scad"):
try:
self.load_scad(filename)
except:
dlg = wx.MessageDialog(self, _("Loading OpenSCAD file failed"),
_("Error:") + traceback.format_exc(),
wx.OK)
dlg.ShowModal()
logging.error(_("Loading OpenSCAD file failed:")
+ "\n" + traceback.format_exc())
def load_scad(self, name):
lf = open(name)
s = [i.replace("\n", "").replace("\r", "").replace(";", "") for i in lf if "stl" in i]
lf.close()
for i in s:
parts = i.split()
for part in parts:
if 'translate' in part:
translate_list = evalme(part)
for part in parts:
if 'rotate' in part:
rotate_list = evalme(part)
for part in parts:
if 'import' in part:
stl_file = evalme(part)
newname = os.path.split(stl_file.lower())[1]
c = 1
while newname in self.models:
newname = os.path.split(stl_file.lower())[1]
newname = newname + "(%d)" % c
c += 1
stl_path = os.path.join(os.path.split(name)[0:len(os.path.split(stl_file)) - 1])
stl_full_path = os.path.join(stl_path[0], str(stl_file))
self.load_stl_into_model(stl_full_path, stl_file, translate_list, rotate_list[2])
def load_stl(self, name):
if not os.path.exists(name):
logging.error(_("Couldn't load non-existing file %s") % name)
return
path = os.path.split(name)[0]
self.basedir = path
if name.lower().endswith(".stl"):
for model in self.models.values():
if model.filename == name:
newmodel = copy(model)
newmodel.offsets = list(model.offsets)
newmodel.rot = model.rot
newmodel.scale = list(model.scale)
self.add_model(name, newmodel)
self.s.prepare_model(newmodel, 2)
break
else:
# Filter out the path, just show the STL filename.
self.load_stl_into_model(name, name)
wx.CallAfter(self.Refresh)
def load_stl_into_model(self, path, name, offset = None, rotation = 0, scale = [1.0, 1.0, 1.0]):
model = stltool.stl(path)
if offset is None:
offset = [self.build_dimensions[3], self.build_dimensions[4], 0]
model.offsets = list(offset)
model.rot = rotation
model.scale = list(scale)
model.filename = name
self.add_model(name, model)
model.centeroffset = [-(model.dims[1] + model.dims[0]) / 2,
-(model.dims[3] + model.dims[2]) / 2,
0]
self.s.prepare_model(model, 2)
def export_to(self, name):
with open(name.replace(".", "_") + ".scad", "w") as sf:
facets = []
for model in self.models.values():
r = model.rot
o = model.offsets
co = model.centeroffset
sf.write("translate([%s, %s, %s])"
"rotate([0, 0, %s])"
"translate([%s, %s, %s])"
"import(\"%s\");\n" % (o[0], o[1], o[2],
r,
co[0], co[1], co[2],
model.filename))
model = model.transform(transformation_matrix(model))
facets += model.facets
stltool.emitstl(name, facets, "plater_export")
logging.info(_("Wrote plate to %s") % name)
def autoplate(self, event = None):
if self.simarrange_path:
try:
self.autoplate_simarrange()
except Exception as e:
logging.warning(_("Failed to use simarrange for plating, "
"falling back to the standard method. "
"The error was: ") + e)
super(StlPlaterPanel, self).autoplate()
else:
super(StlPlaterPanel, self).autoplate()
def autoplate_simarrange(self):
logging.info(_("Autoplating using simarrange"))
models = dict(self.models)
files = [model.filename for model in models.values()]
command = [self.simarrange_path, "--dryrun",
"-m", # Pack around center
"-x", str(int(self.build_dimensions[0])),
"-y", str(int(self.build_dimensions[1]))] + files
p = subprocess.Popen(command, stdout = subprocess.PIPE, universal_newlines = True)
pos_regexp = re.compile("File: (.*) minx: ([0-9]+), miny: ([0-9]+), minrot: ([0-9]+)")
for line in p.stdout:
line = line.rstrip()
if "Generating plate" in line:
plateid = int(line.split()[-1])
if plateid > 0:
logging.error(_("Plate full, please remove some objects"))
break
if "File:" in line:
bits = pos_regexp.match(line).groups()
filename = bits[0]
x = float(bits[1])
y = float(bits[2])
rot = -float(bits[3])
for name, model in list(models.items()):
# FIXME: not sure this is going to work superwell with utf8
if model.filename == filename:
model.offsets[0] = x + self.build_dimensions[3]
model.offsets[1] = y + self.build_dimensions[4]
model.rot = rot
del models[name]
break
if p.wait() != 0:
raise RuntimeError(_("simarrange failed"))
StlPlater = make_plater(StlPlaterPanel)
| 40.957692
| 137
| 0.5416
|
133b42d22c9f7356dfd6b17ce9a10517755aed30
| 650
|
py
|
Python
|
examples/16_inverse_design/inverse_spar_design.py
|
johnjasa/WISDEM
|
a4571e71cb5b9869c81790f8abb1bb7fba8fdb02
|
[
"Apache-2.0"
] | null | null | null |
examples/16_inverse_design/inverse_spar_design.py
|
johnjasa/WISDEM
|
a4571e71cb5b9869c81790f8abb1bb7fba8fdb02
|
[
"Apache-2.0"
] | null | null | null |
examples/16_inverse_design/inverse_spar_design.py
|
johnjasa/WISDEM
|
a4571e71cb5b9869c81790f8abb1bb7fba8fdb02
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import time
from wisdem.commonse.mpi_tools import MPI
from wisdem.glue_code.runWISDEM import run_wisdem
## File management
run_dir = os.path.dirname(os.path.realpath(__file__)) + os.sep
wisdem_examples = os.path.dirname(os.path.dirname(run_dir))
fname_wt_input_oc3 = os.path.join(wisdem_examples, "09_floating", "nrel5mw-spar_oc3.yaml")
fname_modeling_options = os.path.join(wisdem_examples, "09_floating", "modeling_options_noRNA.yaml")
fname_analysis_options = run_dir + os.sep + "analysis_options.yaml"
wt_opt, modeling_options, opt_options = run_wisdem(fname_wt_input_oc3, fname_modeling_options, fname_analysis_options)
| 38.235294
| 118
| 0.818462
|
78916087743484625c09bb1a4f5249d6c4693fad
| 13,366
|
py
|
Python
|
tods/feature_analysis/StatisticalSkew.py
|
ZhuangweiKang/tods
|
fe3f55f8ccb306dd292c668e0f1154f1afdfa556
|
[
"Apache-2.0"
] | 544
|
2020-09-21T06:02:33.000Z
|
2022-03-27T07:16:32.000Z
|
tods/feature_analysis/StatisticalSkew.py
|
ZhuangweiKang/tods
|
fe3f55f8ccb306dd292c668e0f1154f1afdfa556
|
[
"Apache-2.0"
] | 35
|
2020-09-21T06:33:13.000Z
|
2022-03-11T14:20:21.000Z
|
tods/feature_analysis/StatisticalSkew.py
|
ZhuangweiKang/tods
|
fe3f55f8ccb306dd292c668e0f1154f1afdfa556
|
[
"Apache-2.0"
] | 86
|
2020-09-21T16:44:33.000Z
|
2022-03-11T18:20:22.000Z
|
import os
from typing import Any,Optional,List
import statsmodels.api as sm
import numpy as np
from d3m import container, utils as d3m_utils
from d3m import utils
from numpy import ndarray
from collections import OrderedDict
from scipy import sparse
import os
from scipy import stats
import numpy
import typing
import time
import uuid
from d3m import container
from d3m.primitive_interfaces import base, transformer
from d3m.container import DataFrame as d3m_dataframe
from d3m.metadata import hyperparams, params, base as metadata_base
from d3m.base import utils as base_utils
from d3m.exceptions import PrimitiveNotFittedError
from ..common.TODSBasePrimitives import TODSTransformerPrimitiveBase
__all__ = ('StatisticalSkewPrimitive',)
Inputs = container.DataFrame
Outputs = container.DataFrame
class Params(params.Params):
#to-do : how to make params dynamic
use_column_names: Optional[Any]
class Hyperparams(hyperparams.Hyperparams):
#Tuning Parameter
#default -1 consider entire time series is considered
window_size = hyperparams.Hyperparameter(default=-1, semantic_types=[
'https://metadata.datadrivendiscovery.org/types/TuningParameter',
], description="Window Size for decomposition")
#control parameter
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
)
exclude_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided.",
)
return_result = hyperparams.Enumeration(
values=['append', 'replace', 'new'],
default='append',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false.",
)
use_semantic_types = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe"
)
add_index_columns = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\".",
)
error_on_no_input = hyperparams.UniformBool(
default=True,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Throw an exception if no input column is selected/provided. Defaults to true to behave like sklearn. To prevent pipelines from breaking set this to False.",
)
return_semantic_type = hyperparams.Enumeration[str](
values=['https://metadata.datadrivendiscovery.org/types/Attribute',
'https://metadata.datadrivendiscovery.org/types/ConstructedAttribute'],
default='https://metadata.datadrivendiscovery.org/types/Attribute',
description='Decides what semantic type to attach to generated attributes',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']
)
class StatisticalSkewPrimitive(TODSTransformerPrimitiveBase[Inputs, Outputs, Hyperparams]):
"""
Primitive to find skew of time series
"""
metadata = metadata_base.PrimitiveMetadata({
"__author__": "DATA Lab @ Texas A&M University",
'name': 'Time Series Decompostional',
'python_path': 'd3m.primitives.tods.feature_analysis.statistical_skew',
'keywords': ['Time Series','Skew'],
'source': {
'name': 'DATA Lab at Texas A&M University',
'contact': 'mailto:khlai037@tamu.edu'
},
'version': '0.1.0',
"hyperparams_to_tune": ['window_size'],
'algorithm_types': [
metadata_base.PrimitiveAlgorithmType.TODS_PRIMITIVE,
],
'primitive_family': metadata_base.PrimitiveFamily.FEATURE_CONSTRUCTION,
'id': str(uuid.uuid3(uuid.NAMESPACE_DNS, 'StatisticalSkewPrimitive')),
})
def _produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> base.CallResult[Outputs]:
"""
Args:
inputs: Container DataFrame
timeout: Default
iterations: Default
Returns:
Container DataFrame containing skew of time series
"""
self.logger.info('Statistical Skew Primitive called')
# Get cols to fit.
self._fitted = False
self._training_inputs, self._training_indices = self._get_columns_to_fit(inputs, self.hyperparams)
self._input_column_names = self._training_inputs.columns
if len(self._training_indices) > 0:
# self._clf.fit(self._training_inputs)
self._fitted = True
else: # pragma: no cover
if self.hyperparams['error_on_no_input']:
raise RuntimeError("No input columns were selected")
self.logger.warn("No input columns were selected")
if not self._fitted:
raise PrimitiveNotFittedError("Primitive not fitted.")
statistical_skew_input = inputs
if self.hyperparams['use_semantic_types']:
statistical_skew_input = inputs.iloc[:, self._training_indices]
output_columns = []
if len(self._training_indices) > 0:
statistical_skew_output = self._skew(statistical_skew_input,self.hyperparams["window_size"])
if sparse.issparse(statistical_skew_output):
statistical_skew_output = statistical_skew_output.toarray()
outputs = self._wrap_predictions(inputs, statistical_skew_output)
#if len(outputs.columns) == len(self._input_column_names):
# outputs.columns = self._input_column_names
output_columns = [outputs]
else: # pragma: no cover
if self.hyperparams['error_on_no_input']:
raise RuntimeError("No input columns were selected")
self.logger.warn("No input columns were selected")
outputs = base_utils.combine_columns(return_result=self.hyperparams['return_result'],
add_index_columns=self.hyperparams['add_index_columns'],
inputs=inputs, column_indices=self._training_indices,
columns_list=output_columns)
self.logger.info('Statistical Skew Primitive returned')
return base.CallResult(outputs)
@classmethod
def _get_columns_to_fit(cls, inputs: Inputs, hyperparams: Hyperparams):
"""
Select columns to fit.
Args:
inputs: Container DataFrame
hyperparams: d3m.metadata.hyperparams.Hyperparams
Returns:
list
"""
if not hyperparams['use_semantic_types']:
return inputs, list(range(len(inputs.columns)))
inputs_metadata = inputs.metadata
def can_produce_column(column_index: int) -> bool:
return cls._can_produce_column(inputs_metadata, column_index, hyperparams)
use_columns = hyperparams['use_columns']
exclude_columns = hyperparams['exclude_columns']
columns_to_produce, columns_not_to_produce = base_utils.get_columns_to_use(inputs_metadata,
use_columns=use_columns,
exclude_columns=exclude_columns,
can_use_column=can_produce_column)
return inputs.iloc[:, columns_to_produce], columns_to_produce
# return columns_to_produce
@classmethod
def _can_produce_column(cls, inputs_metadata: metadata_base.DataMetadata, column_index: int,
hyperparams: Hyperparams) -> bool:
"""
Output whether a column can be processed.
Args:
inputs_metadata: d3m.metadata.base.DataMetadata
column_index: int
Returns:
bool
"""
column_metadata = inputs_metadata.query((metadata_base.ALL_ELEMENTS, column_index))
accepted_structural_types = (int, float, numpy.integer, numpy.float64)
accepted_semantic_types = set()
accepted_semantic_types.add("https://metadata.datadrivendiscovery.org/types/Attribute")
if not issubclass(column_metadata['structural_type'], accepted_structural_types):
return False
semantic_types = set(column_metadata.get('semantic_types', []))
return True
if len(semantic_types) == 0:
cls.logger.warning("No semantic types found in column metadata")
return False
# Making sure all accepted_semantic_types are available in semantic_types
if len(accepted_semantic_types - semantic_types) == 0:
return True
return False
@classmethod
def _update_predictions_metadata(cls, inputs_metadata: metadata_base.DataMetadata, outputs: Optional[Outputs],
target_columns_metadata: List[OrderedDict]) -> metadata_base.DataMetadata:
"""
Updata metadata for selected columns.
Args:
inputs_metadata: metadata_base.DataMetadata
outputs: Container Dataframe
target_columns_metadata: list
Returns:
d3m.metadata.base.DataMetadata
"""
outputs_metadata = metadata_base.DataMetadata().generate(value=outputs)
for column_index, column_metadata in enumerate(target_columns_metadata):
column_metadata.pop("structural_type", None)
outputs_metadata = outputs_metadata.update_column(column_index, column_metadata)
return outputs_metadata
def _wrap_predictions(self, inputs: Inputs, predictions: ndarray) -> Outputs:
"""
Wrap predictions into dataframe
Args:
inputs: Container Dataframe
predictions: array-like data (n_samples, n_features)
Returns:
Dataframe
"""
outputs = d3m_dataframe(predictions, generate_metadata=True)
target_columns_metadata = self._add_target_columns_metadata(outputs.metadata, self.hyperparams)
outputs.metadata = self._update_predictions_metadata(inputs.metadata, outputs, target_columns_metadata)
return outputs
@classmethod
def _add_target_columns_metadata(cls, outputs_metadata: metadata_base.DataMetadata, hyperparams):
"""
Add target columns metadata
Args:
outputs_metadata: metadata.base.DataMetadata
hyperparams: d3m.metadata.hyperparams.Hyperparams
Returns:
List[OrderedDict]
"""
outputs_length = outputs_metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length']
target_columns_metadata: List[OrderedDict] = []
for column_index in range(outputs_length):
# column_name = "output_{}".format(column_index)
column_metadata = OrderedDict()
semantic_types = set()
semantic_types.add(hyperparams["return_semantic_type"])
column_metadata['semantic_types'] = list(semantic_types)
# column_metadata["name"] = str(column_name)
target_columns_metadata.append(column_metadata)
return target_columns_metadata
def _write(self, inputs: Inputs): # pragma: no cover
inputs.to_csv(str(time.time()) + '.csv')
def _skew(self,X,window_size):
""" statistical skew of time series sequence
Args:
X : DataFrame
Time series.
Returns:
DataFrame
A object with skew
"""
if(window_size==-1):
window_size = len(X)
transformed_X = utils.pandas.DataFrame()
for column in X.columns:
column_value = X[column].values
column_skew = np.zeros(len(column_value))
for iter in range(window_size-1,len(column_value)):
sequence = column_value[iter-window_size+1:iter+1]
column_skew[iter] = round(stats.skew(sequence),4)
column_skew[:window_size-1] = column_skew[window_size-1]
transformed_X[column + "_skew"] = column_skew
return transformed_X
| 41.509317
| 221
| 0.654496
|
d07299313299db4ad90e7916c7f0d8c28fd5e6d0
| 21,702
|
py
|
Python
|
salt/modules/boto_asg.py
|
bogdanr/salt
|
4f198525873a1b7da3fbb9994dbb40d381494922
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/boto_asg.py
|
bogdanr/salt
|
4f198525873a1b7da3fbb9994dbb40d381494922
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/boto_asg.py
|
bogdanr/salt
|
4f198525873a1b7da3fbb9994dbb40d381494922
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon Autoscale Groups
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit autoscale credentials but can also
utilize IAM roles assigned to the instance trough Instance Profiles.
Dynamic credentials are then automatically obtained from AWS API and no
further configuration is necessary. More Information available at::
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
asg.keyid: GKTADJGHEIQSXMKKRBJ08H
asg.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration::
asg.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
# Import Python libs
from __future__ import absolute_import
import logging
import json
import sys
import email.mime.multipart
log = logging.getLogger(__name__)
# Import third party libs
import yaml
import salt.ext.six as six
try:
import boto
import boto.ec2
import boto.ec2.instance
import boto.ec2.blockdevicemapping as blockdevicemapping
import boto.ec2.autoscale as autoscale
logging.getLogger('boto').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# Import Salt libs
import salt.utils.odict as odict
def __virtual__():
'''
Only load if boto libraries exist.
'''
if not HAS_BOTO:
return False
__utils__['boto.assign_funcs'](__name__, 'asg', module='ec2.autoscale')
setattr(sys.modules[__name__], '_get_ec2_conn',
__utils__['boto.get_connection_func']('ec2'))
return True
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if an autoscale group exists.
CLI example::
salt myminion boto_asg.exists myasg region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
_conn = conn.get_all_groups(names=[name])
if _conn:
return True
else:
msg = 'The autoscale group does not exist in region {0}'.format(region)
log.debug(msg)
return False
except boto.exception.BotoServerError as e:
log.debug(e)
return False
def get_config(name, region=None, key=None, keyid=None, profile=None):
'''
Get the configuration for an autoscale group.
CLI example::
salt myminion boto_asg.get_config myasg region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
asg = conn.get_all_groups(names=[name])
if asg:
asg = asg[0]
else:
return {}
ret = odict.OrderedDict()
attrs = ['name', 'availability_zones', 'default_cooldown',
'desired_capacity', 'health_check_period',
'health_check_type', 'launch_config_name', 'load_balancers',
'max_size', 'min_size', 'placement_group',
'vpc_zone_identifier', 'tags', 'termination_policies',
'suspended_processes']
for attr in attrs:
# Tags are objects, so we need to turn them into dicts.
if attr == 'tags':
_tags = []
for tag in asg.tags:
_tag = odict.OrderedDict()
_tag['key'] = tag.key
_tag['value'] = tag.value
_tag['propagate_at_launch'] = tag.propagate_at_launch
_tags.append(_tag)
ret['tags'] = _tags
# Boto accepts a string or list as input for vpc_zone_identifier,
# but always returns a comma separated list. We require lists in
# states.
elif attr == 'vpc_zone_identifier':
ret[attr] = getattr(asg, attr).split(',')
# convert SuspendedProcess objects to names
elif attr == 'suspended_processes':
suspended_processes = getattr(asg, attr)
ret[attr] = sorted([x.process_name for x in suspended_processes])
else:
ret[attr] = getattr(asg, attr)
# scaling policies
policies = conn.get_all_policies(as_group=name)
ret["scaling_policies"] = []
for policy in policies:
ret["scaling_policies"].append(
dict([
("name", policy.name),
("adjustment_type", policy.adjustment_type),
("scaling_adjustment", policy.scaling_adjustment),
("min_adjustment_step", policy.min_adjustment_step),
("cooldown", policy.cooldown)
])
)
return ret
except boto.exception.BotoServerError as e:
log.debug(e)
return {}
def create(name, launch_config_name, availability_zones, min_size, max_size,
desired_capacity=None, load_balancers=None, default_cooldown=None,
health_check_type=None, health_check_period=None,
placement_group=None, vpc_zone_identifier=None, tags=None,
termination_policies=None, suspended_processes=None,
scaling_policies=None, region=None,
notification_arn=None, notification_types=None,
key=None, keyid=None, profile=None):
'''
Create an autoscale group.
CLI example::
salt myminion boto_asg.create myasg mylc '["us-east-1a", "us-east-1e"]' 1 10 load_balancers='["myelb", "myelb2"]' tags='[{"key": "Name", value="myasg", "propagate_at_launch": True}]'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(availability_zones, six.string_types):
availability_zones = json.loads(availability_zones)
if isinstance(load_balancers, six.string_types):
load_balancers = json.loads(load_balancers)
if isinstance(vpc_zone_identifier, six.string_types):
vpc_zone_identifier = json.loads(vpc_zone_identifier)
if isinstance(tags, six.string_types):
tags = json.loads(tags)
# Make a list of tag objects from the dict.
_tags = []
if tags:
for tag in tags:
try:
key = tag.get('key')
except KeyError:
log.error('Tag missing key.')
return False
try:
value = tag.get('value')
except KeyError:
log.error('Tag missing value.')
return False
propagate_at_launch = tag.get('propagate_at_launch', False)
_tag = autoscale.Tag(key=key, value=value, resource_id=name,
propagate_at_launch=propagate_at_launch)
_tags.append(_tag)
if isinstance(termination_policies, six.string_types):
termination_policies = json.loads(termination_policies)
if isinstance(suspended_processes, six.string_types):
suspended_processes = json.loads(suspended_processes)
try:
_asg = autoscale.AutoScalingGroup(
name=name, launch_config=launch_config_name,
availability_zones=availability_zones,
min_size=min_size, max_size=max_size,
desired_capacity=desired_capacity, load_balancers=load_balancers,
default_cooldown=default_cooldown,
health_check_type=health_check_type,
health_check_period=health_check_period,
placement_group=placement_group, tags=_tags,
vpc_zone_identifier=vpc_zone_identifier,
termination_policies=termination_policies,
suspended_processes=suspended_processes)
conn.create_auto_scaling_group(_asg)
# create scaling policies
_create_scaling_policies(conn, name, scaling_policies)
# create notifications
if notification_arn and notification_types:
conn.put_notification_configuration(_asg, notification_arn, notification_types)
log.info('Created ASG {0}'.format(name))
return True
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to create ASG {0}'.format(name)
log.error(msg)
return False
def update(name, launch_config_name, availability_zones, min_size, max_size,
desired_capacity=None, load_balancers=None, default_cooldown=None,
health_check_type=None, health_check_period=None,
placement_group=None, vpc_zone_identifier=None, tags=None,
termination_policies=None, suspended_processes=None,
scaling_policies=None,
notification_arn=None, notification_types=None,
region=None, key=None, keyid=None, profile=None):
'''
Update an autoscale group.
CLI example::
salt myminion boto_asg.update myasg mylc '["us-east-1a", "us-east-1e"]' 1 10 load_balancers='["myelb", "myelb2"]' tags='[{"key": "Name", value="myasg", "propagate_at_launch": True}]'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return False, "failed to connect to AWS"
if isinstance(availability_zones, six.string_types):
availability_zones = json.loads(availability_zones)
if isinstance(load_balancers, six.string_types):
load_balancers = json.loads(load_balancers)
if isinstance(vpc_zone_identifier, six.string_types):
vpc_zone_identifier = json.loads(vpc_zone_identifier)
if isinstance(tags, six.string_types):
tags = json.loads(tags)
# Make a list of tag objects from the dict.
_tags = []
if tags:
for tag in tags:
try:
key = tag.get('key')
except KeyError:
log.error('Tag missing key.')
return False, "Tag {0} missing key".format(tag)
try:
value = tag.get('value')
except KeyError:
log.error('Tag missing value.')
return False, "Tag {0} missing value".format(tag)
propagate_at_launch = tag.get('propagate_at_launch', False)
_tag = autoscale.Tag(key=key, value=value, resource_id=name,
propagate_at_launch=propagate_at_launch)
_tags.append(_tag)
if isinstance(termination_policies, six.string_types):
termination_policies = json.loads(termination_policies)
if isinstance(suspended_processes, six.string_types):
suspended_processes = json.loads(suspended_processes)
try:
_asg = autoscale.AutoScalingGroup(
connection=conn,
name=name, launch_config=launch_config_name,
availability_zones=availability_zones,
min_size=min_size, max_size=max_size,
desired_capacity=desired_capacity, load_balancers=load_balancers,
default_cooldown=default_cooldown,
health_check_type=health_check_type,
health_check_period=health_check_period,
placement_group=placement_group, tags=_tags,
vpc_zone_identifier=vpc_zone_identifier,
termination_policies=termination_policies)
if notification_arn and notification_types:
conn.put_notification_configuration(_asg, notification_arn, notification_types)
_asg.update()
# Seems the update call doesn't handle tags, so we'll need to update
# that separately.
if _tags:
conn.create_or_update_tags(_tags)
# update doesn't handle suspended_processes either
# Resume all processes
_asg.resume_processes()
# suspend any that are specified. Note that the boto default of empty
# list suspends all; don't do that.
if suspended_processes is not None and len(suspended_processes) > 0:
_asg.suspend_processes(suspended_processes)
log.info('Updated ASG {0}'.format(name))
# ### scaling policies
# delete all policies, then recreate them
for policy in conn.get_all_policies(as_group=name):
conn.delete_policy(policy.name, autoscale_group=name)
_create_scaling_policies(conn, name, scaling_policies)
return True, ''
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to update ASG {0}'.format(name)
log.error(msg)
return False, str(e)
def _create_scaling_policies(conn, as_name, scaling_policies):
'helper function to create scaling policies'
if scaling_policies:
for policy in scaling_policies:
policy = autoscale.policy.ScalingPolicy(
name=policy["name"],
as_name=as_name,
adjustment_type=policy["adjustment_type"],
scaling_adjustment=policy["scaling_adjustment"],
min_adjustment_step=policy.get("min_adjustment_step", None),
cooldown=policy["cooldown"])
conn.create_scaling_policy(policy)
def delete(name, force=False, region=None, key=None, keyid=None, profile=None):
'''
Delete an autoscale group.
CLI example::
salt myminion boto_asg.delete myasg region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.delete_auto_scaling_group(name, force)
msg = 'Deleted autoscale group {0}.'.format(name)
log.info(msg)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to delete autoscale group {0}'.format(name)
log.error(msg)
return False
def get_cloud_init_mime(cloud_init):
'''
Get a mime multipart encoded string from a cloud-init dict. Currently
supports scripts and cloud-config.
CLI Example:
.. code-block:: bash
salt myminion boto.get_cloud_init_mime <cloud init>
'''
if isinstance(cloud_init, six.string_types):
cloud_init = json.loads(cloud_init)
_cloud_init = email.mime.multipart.MIMEMultipart()
if 'scripts' in cloud_init:
for script_name, script in six.iteritems(cloud_init['scripts']):
_script = email.mime.text.MIMEText(script, 'x-shellscript')
_cloud_init.attach(_script)
if 'cloud-config' in cloud_init:
cloud_config = cloud_init['cloud-config']
_cloud_config = email.mime.text.MIMEText(_safe_dump(cloud_config),
'cloud-config')
_cloud_init.attach(_cloud_config)
return _cloud_init.as_string()
def _safe_dump(data):
def ordered_dict_presenter(dumper, data):
return dumper.represent_dict(six.iteritems(data))
yaml.add_representer(odict.OrderedDict, ordered_dict_presenter,
Dumper=yaml.dumper.SafeDumper)
return yaml.safe_dump(data, default_flow_style=False)
def launch_configuration_exists(name, region=None, key=None, keyid=None,
profile=None):
'''
Check for a launch configuration's existence.
CLI example::
salt myminion boto_asg.launch_configuration_exists mylc
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
lc = conn.get_all_launch_configurations(names=[name])
if lc:
return True
else:
msg = 'The launch configuration does not exist in region {0}'.format(region)
log.debug(msg)
return False
except boto.exception.BotoServerError as e:
log.debug(e)
return False
def create_launch_configuration(name, image_id, key_name=None,
security_groups=None, user_data=None,
instance_type='m1.small', kernel_id=None,
ramdisk_id=None, block_device_mappings=None,
instance_monitoring=False, spot_price=None,
instance_profile_name=None,
ebs_optimized=False,
associate_public_ip_address=None,
volume_type=None, delete_on_termination=True,
iops=None, use_block_device_types=False,
region=None, key=None, keyid=None,
profile=None):
'''
Create a launch configuration.
CLI example::
salt myminion boto_asg.create_launch_configuration mylc image_id=ami-0b9c9f62 key_name='mykey' security_groups='["mygroup"]' instance_type='c3.2xlarge'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(security_groups, six.string_types):
security_groups = json.loads(security_groups)
if isinstance(block_device_mappings, six.string_types):
block_device_mappings = json.loads(block_device_mappings)
_bdms = []
if block_device_mappings:
# Boto requires objects for the mappings and the devices.
_block_device_map = blockdevicemapping.BlockDeviceMapping()
for block_device_dict in block_device_mappings:
for block_device, attributes in six.iteritems(block_device_dict):
_block_device = blockdevicemapping.EBSBlockDeviceType()
for attribute, value in six.iteritems(attributes):
setattr(_block_device, attribute, value)
_block_device_map[block_device] = _block_device
_bdms = [_block_device_map]
lc = autoscale.LaunchConfiguration(
name=name, image_id=image_id, key_name=key_name,
security_groups=security_groups, user_data=user_data,
instance_type=instance_type, kernel_id=kernel_id,
ramdisk_id=ramdisk_id, block_device_mappings=_bdms,
instance_monitoring=instance_monitoring, spot_price=spot_price,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized,
associate_public_ip_address=associate_public_ip_address,
volume_type=volume_type, delete_on_termination=delete_on_termination,
iops=iops, use_block_device_types=use_block_device_types)
try:
conn.create_launch_configuration(lc)
log.info('Created LC {0}'.format(name))
return True
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to create LC {0}'.format(name)
log.error(msg)
return False
def delete_launch_configuration(name, region=None, key=None, keyid=None,
profile=None):
'''
Delete a launch configuration.
CLI example::
salt myminion boto_asg.delete_launch_configuration mylc
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.delete_launch_configuration(name)
log.info('Deleted LC {0}'.format(name))
return True
except boto.exception.BotoServerError as e:
log.debug(e)
msg = 'Failed to delete LC {0}'.format(name)
log.error(msg)
return False
def get_scaling_policy_arn(as_group, scaling_policy_name, region=None,
key=None, keyid=None, profile=None):
'''
Return the arn for a scaling policy in a specific autoscale group or None
if not found. Mainly used as a helper method for boto_cloudwatch_alarm, for
linking alarms to scaling policies.
CLI Example::
salt '*' boto_asg.get_scaling_policy_arn mygroup mypolicy
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policies = conn.get_all_policies(as_group=as_group)
for policy in policies:
if policy.name == scaling_policy_name:
return policy.policy_arn
log.error('Could not convert: {0}'.format(as_group))
return None
def get_instances(name, lifecycle_state="InService", health_status="Healthy", attribute="private_ip_address", region=None, key=None, keyid=None, profile=None):
"""return attribute of all instances in the named autoscale group.
CLI example::
salt-call boto_asg.get_instances my_autoscale_group_name
"""
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
ec2_conn = _get_ec2_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
asgs = conn.get_all_groups(names=[name])
except boto.exception.BotoServerError as e:
log.debug(e)
return False
if len(asgs) != 1:
log.debug("name '{0}' returns multiple ASGs: {1}".format(name, [asg.name for asg in asgs]))
return False
asg = asgs[0]
instance_ids = []
# match lifecycle_state and health_status
for i in asg.instances:
if lifecycle_state is not None and i.lifecycle_state != lifecycle_state:
continue
if health_status is not None and i.health_status != health_status:
continue
instance_ids.append(i.instance_id)
# get full instance info, so that we can return the attribute
instances = ec2_conn.get_only_instances(instance_ids=instance_ids)
return [getattr(instance, attribute).encode("ascii") for instance in instances]
| 39.60219
| 190
| 0.646945
|
328e84854a1a976f05c957b09e55654232d32f6e
| 3,547
|
py
|
Python
|
rally/plugins/openstack/scenarios/neutron/security_groups.py
|
LorenzoBianconi/rally
|
2bbd7ee590cca048fb4ad6a8eefc484989979ff8
|
[
"Apache-2.0"
] | null | null | null |
rally/plugins/openstack/scenarios/neutron/security_groups.py
|
LorenzoBianconi/rally
|
2bbd7ee590cca048fb4ad6a8eefc484989979ff8
|
[
"Apache-2.0"
] | 1
|
2020-07-14T11:29:31.000Z
|
2020-07-14T11:29:31.000Z
|
rally/plugins/openstack/scenarios/neutron/security_groups.py
|
LorenzoBianconi/rally
|
2bbd7ee590cca048fb4ad6a8eefc484989979ff8
|
[
"Apache-2.0"
] | 1
|
2020-07-02T01:33:48.000Z
|
2020-07-02T01:33:48.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.neutron import utils
from rally.task import validation
class NeutronSecurityGroup(utils.NeutronScenario):
"""Benchmark scenarios for Neutron Security Groups."""
@validation.required_services(consts.Service.NEUTRON)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["neutron"]})
def create_and_list_security_groups(self, security_group_create_args=None):
"""Create and list Neutron security-groups.
Measure the "neutron security-group-create" and "neutron
security-group-list" command performance.
:param security_group_create_args: dict, POST /v2.0/security-groups
request options
"""
security_group_create_args = security_group_create_args or {}
self._create_security_group(**security_group_create_args)
self._list_security_groups()
@validation.required_services(consts.Service.NEUTRON)
@validation.required_openstack(users=True)
@scenario.configure()
def create_and_delete_security_groups(self,
security_group_create_args=None):
"""Create and delete Neutron security-groups.
Measure the "neutron security-group-create" and "neutron
security-group-delete" command performance.
:param security_group_create_args: dict, POST /v2.0/security-groups
request options
"""
security_group_create_args = security_group_create_args or {}
security_group = self._create_security_group(
**security_group_create_args)
self._delete_security_group(security_group)
@validation.required_services(consts.Service.NEUTRON)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["neutron"]})
def create_and_update_security_groups(self,
security_group_create_args=None,
security_group_update_args=None):
"""Create and update Neutron security-groups.
Measure the "neutron security-group-create" and "neutron
security-group-update" command performance.
:param security_group_create_args: dict, POST /v2.0/security-groups
request options
:param security_group_update_args: dict, POST /v2.0/security-groups
update options
"""
security_group_create_args = security_group_create_args or {}
security_group_update_args = security_group_update_args or {}
security_group = self._create_security_group(
**security_group_create_args)
self._update_security_group(security_group,
**security_group_update_args)
| 45.474359
| 79
| 0.673809
|
bd9e4a2d7f32035cf02a2267bfca2e54e4216f8a
| 3,013
|
py
|
Python
|
utils/utils.py
|
kashimmirza/TextBoxGan
|
ba7aeef92a0cce2c5a33d259beda038825a4bac4
|
[
"MIT"
] | 1
|
2021-03-14T11:27:33.000Z
|
2021-03-14T11:27:33.000Z
|
utils/utils.py
|
kashimmirza/TextBoxGan
|
ba7aeef92a0cce2c5a33d259beda038825a4bac4
|
[
"MIT"
] | null | null | null |
utils/utils.py
|
kashimmirza/TextBoxGan
|
ba7aeef92a0cce2c5a33d259beda038825a4bac4
|
[
"MIT"
] | null | null | null |
from typing import List
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from config import cfg
def mask_text_box(fake_images: tf.float32, input_words: tf.int32, char_width: int):
"""
Masks the text boxes outputted by the generator, in the cases where the length of the word is less than
cfg.max_char_number. Since each character is supposed to take 1/cfg.max_char_number of the width of the text box,
this function masks the extra width.
Parameters
----------
fake_images: Text boxes generated with our model.
input_words: Integer sequences obtained from the input words (initially strings) using the MAIN_CHAR_VECTOR.
char_width: Width of a single character.
Returns
-------
Masked fake_images
"""
mask = tf.tile(
tf.expand_dims(
tf.expand_dims(
tf.repeat(
tf.where(input_words == 0, 0.0, 1.0),
repeats=tf.tile([char_width], [tf.shape(input_words)[1]]),
axis=1,
),
1,
),
1,
),
[1, fake_images.shape[1], fake_images.shape[2], 1],
)
return fake_images * mask
def generator_output_to_uint8(fake_images: tf.float32):
"""
Converts the output of the generator to uint8 RGB images.
Parameters
----------
fake_images: Text boxes generated with our model.
Returns
-------
Generated text boxes in a uint8 RGB format.
"""
fake_images = (tf.clip_by_value(fake_images, -1.0, 1.0) + 1.0) * 127.5
fake_images = tf.transpose(fake_images, perm=[0, 2, 3, 1])
return tf.cast(fake_images, tf.uint8)
def string_to_main_int_sequence(words_list: List[str]):
"""
Converts input strings to integer sequences using the main character vector, and pad them if their length are less
than cfg.max_char_number.
Parameters
----------
words_list: List of words to generate
Returns
-------
Integer sequences obtained from the input words (initially strings) using the MAIN_CHAR_VECTOR.
"""
int_sequence = cfg.char_tokenizer.main.texts_to_sequences(words_list)
# First element is 1 so remove 1 to each element to match embedding shape
return (
pad_sequences(int_sequence, maxlen=cfg.max_char_number, value=1, padding="post")
- 1
)
def string_to_aster_int_sequence(words_list: List[str]):
"""
Converts input strings to integer sequences using aster's character vector, and pad them if their length are less
than cfg.max_char_number.
Parameters
----------
words_list: List of words to generate
Returns
-------
Integer sequences obtained from the input words (initially strings) using the ASTER_CHAR_VECTOR.
"""
int_sequence = cfg.char_tokenizer.aster.texts_to_sequences(words_list)
return pad_sequences(
int_sequence, maxlen=cfg.max_char_number, value=1, padding="post"
)
| 29.539216
| 118
| 0.658148
|
47cb4a0a213c6dee725b3019e7e588ac1037c26e
| 2,013
|
py
|
Python
|
Shark_Training/pyimagesearch/nn/conv/simplenet.py
|
crpurcell/MQ_DPI_Release
|
97444513e8b8d48ec91ff8a43b9dfaed0da029f9
|
[
"MIT"
] | null | null | null |
Shark_Training/pyimagesearch/nn/conv/simplenet.py
|
crpurcell/MQ_DPI_Release
|
97444513e8b8d48ec91ff8a43b9dfaed0da029f9
|
[
"MIT"
] | null | null | null |
Shark_Training/pyimagesearch/nn/conv/simplenet.py
|
crpurcell/MQ_DPI_Release
|
97444513e8b8d48ec91ff8a43b9dfaed0da029f9
|
[
"MIT"
] | null | null | null |
#=============================================================================#
# #
# MODIFIED: 05-Sep-2018 by C. Purcell #
# #
#=============================================================================#
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras import backend as K
#-----------------------------------------------------------------------------#
class SimpleNet:
@staticmethod
def build(width, height, depth, classes):
# Initialize the model and channel order
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
# First CONV(32) => RELU => POOL(2) layer set
model.add(Conv2D(32, (3, 3), input_shape=inputShape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Second CONV(32) => RELU => POOL(2) layer set
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Third CONV(64) => RELU => POOL(2) layer set
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# FC(64) => RELU => DROP(0.5)
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# Softmax classifier
model.add(Dense(classes))
model.add(Activation('softmax'))
return model
| 36.6
| 79
| 0.486836
|
002bf02b27d5bfd97d1a7445458c50b7fe992004
| 8,360
|
py
|
Python
|
src/ebay_rest/api/buy_marketing/configuration.py
|
gbm001/ebay_rest
|
077d3478423ccd80ff35e0361821d6a11180bc54
|
[
"MIT"
] | 3
|
2021-12-12T04:28:03.000Z
|
2022-03-10T03:29:18.000Z
|
src/ebay_rest/api/buy_marketing/configuration.py
|
jdavv/ebay_rest
|
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
|
[
"MIT"
] | 33
|
2021-06-16T20:44:36.000Z
|
2022-03-30T14:55:06.000Z
|
src/ebay_rest/api/buy_marketing/configuration.py
|
jdavv/ebay_rest
|
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
|
[
"MIT"
] | 7
|
2021-06-03T09:30:23.000Z
|
2022-03-08T19:51:33.000Z
|
# coding: utf-8
"""
Buy Marketing API
The Marketing API retrieves eBay products based on a metric, such as Best Selling, as well as products that were also bought and also viewed. # noqa: E501
OpenAPI spec version: v1_beta.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class TypeWithDefault(type):
def __init__(cls, name, bases, dct):
super(TypeWithDefault, cls).__init__(name, bases, dct)
cls._default = None
def __call__(cls):
if cls._default is None:
cls._default = type.__call__(cls)
return copy.copy(cls._default)
def set_default(cls, default):
cls._default = copy.copy(default)
class Configuration(six.with_metaclass(TypeWithDefault, object)):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
def __init__(self):
"""Constructor"""
# Default Base url
self.host = "https://api.ebay.com{basePath}"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# function to refresh API key if expired
self.refresh_api_key_hook = None
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# access token for OAuth
self.access_token = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("buy_marketing")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'api_auth':
{
'type': 'oauth2',
'in': 'header',
'key': 'Authorization',
'value': 'Bearer ' + self.access_token
},
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: v1_beta.1.0\n"\
"SDK Package Version: 1.0.0".\
format(env=sys.platform, pyversion=sys.version)
| 32.913386
| 159
| 0.608732
|
54ce8f3f5566f3f8cb283172a78856bf280b04ad
| 1,224
|
py
|
Python
|
ml-agents/mlagents/trainers/tests/test_models.py
|
williamthegrey/ml-agents
|
c75eacbdc40f9afffaedfd79d861b83a88cf3b8d
|
[
"Apache-2.0"
] | 1
|
2020-07-06T14:15:33.000Z
|
2020-07-06T14:15:33.000Z
|
ml-agents/mlagents/trainers/tests/test_models.py
|
williamthegrey/ml-agents
|
c75eacbdc40f9afffaedfd79d861b83a88cf3b8d
|
[
"Apache-2.0"
] | null | null | null |
ml-agents/mlagents/trainers/tests/test_models.py
|
williamthegrey/ml-agents
|
c75eacbdc40f9afffaedfd79d861b83a88cf3b8d
|
[
"Apache-2.0"
] | 1
|
2020-12-10T09:53:30.000Z
|
2020-12-10T09:53:30.000Z
|
import pytest
from mlagents.trainers.models import ModelUtils
from mlagents.tf_utils import tf
from mlagents_envs.base_env import BehaviorSpec, ActionType
def create_behavior_spec(num_visual, num_vector, vector_size):
behavior_spec = BehaviorSpec(
[(84, 84, 3)] * int(num_visual) + [(vector_size,)] * int(num_vector),
ActionType.DISCRETE,
(1,),
)
return behavior_spec
@pytest.mark.parametrize("num_visual", [1, 2, 4])
@pytest.mark.parametrize("num_vector", [1, 2, 4])
def test_create_input_placeholders(num_vector, num_visual):
vec_size = 8
name_prefix = "test123"
bspec = create_behavior_spec(num_visual, num_vector, vec_size)
vec_in, vis_in = ModelUtils.create_input_placeholders(
bspec.observation_shapes, name_prefix=name_prefix
)
assert isinstance(vis_in, list)
assert len(vis_in) == num_visual
assert isinstance(vec_in, tf.Tensor)
assert vec_in.get_shape().as_list()[1] == num_vector * 8
# Check names contain prefix and vis shapes are correct
for _vis in vis_in:
assert _vis.get_shape().as_list() == [None, 84, 84, 3]
assert _vis.name.startswith(name_prefix)
assert vec_in.name.startswith(name_prefix)
| 33.081081
| 77
| 0.714869
|
ef2beccca2b40e133e227b3f37b4302cba51a0b9
| 1,467
|
py
|
Python
|
DynamicProgramming/CommonPatternsContinued/paint_fence.py
|
mamoudmatook/Leetcode
|
59fb1612ee648a9b99ff7cc779ada5656c01ecd2
|
[
"MIT"
] | null | null | null |
DynamicProgramming/CommonPatternsContinued/paint_fence.py
|
mamoudmatook/Leetcode
|
59fb1612ee648a9b99ff7cc779ada5656c01ecd2
|
[
"MIT"
] | null | null | null |
DynamicProgramming/CommonPatternsContinued/paint_fence.py
|
mamoudmatook/Leetcode
|
59fb1612ee648a9b99ff7cc779ada5656c01ecd2
|
[
"MIT"
] | null | null | null |
#
# Created on Sun Dec 26 2021
#
# The MIT License (MIT)
# Copyright (c) 2021 Maatuq
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from functools import lru_cache
class Solution:
def numWays(self, n: int, k: int) -> int:
@lru_cache(maxsize=None)
def dp(i):
if i == 1:
return k
if i == 2:
return k * k
return (k - 1) * (dp(i - 1) + dp(i - 2))
return dp(n)
| 41.914286
| 122
| 0.697342
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.