hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e62035eb55279994b4c05dd9b254069dd128fe17
| 2,583
|
py
|
Python
|
rolling/hash.py
|
ajcr/rolling
|
7bbac93809a0ad1cd6e128cfd64b70a99d26ea8c
|
[
"MIT"
] | 189
|
2018-03-12T00:31:19.000Z
|
2022-03-26T00:17:38.000Z
|
rolling/hash.py
|
ajcr/rolling
|
7bbac93809a0ad1cd6e128cfd64b70a99d26ea8c
|
[
"MIT"
] | 23
|
2017-12-31T17:50:48.000Z
|
2021-11-27T15:31:54.000Z
|
rolling/hash.py
|
ajcr/rolling
|
7bbac93809a0ad1cd6e128cfd64b70a99d26ea8c
|
[
"MIT"
] | 7
|
2019-01-28T02:53:49.000Z
|
2021-11-11T18:34:45.000Z
|
from collections import Counter, deque
from itertools import islice
from .base import RollingObject
DEF_BASE = 719
DEF_MOD = 2 ** 61 - 1
def polynomial_hash_sequence(seq, base=DEF_BASE, mod=DEF_MOD):
"""
Compute the polynomial hash of a sequence.
"""
return sum(hash(c) * pow(base, k, mod) for k, c in enumerate(reversed(seq))) % mod
class PolynomialHash(RollingObject):
"""
Rolling polynomial hash.
Computes a hash of a window of size k as:
hash(w_0) * base ** (k - 1)
+ hash(w_1) * base ** (k - 2)
+ ...
+ hash(w_(k-2)) * base
+ hash(w_(k-1))
% mod
where `base` and `mod` are constant integers.
To minimise collisions, it is recommended that you
adjust these values to meet your specific use case.
See: wikipedia.org/wiki/Rolling_hash#Polynomial_rolling_hash
Parameters
----------
iterable : iterable of hashable objects
window_size : integer, the size of the rolling
window moving over the iterable
window_type : str, "fixed" or "variable"
base : integer, polynomial base
mod : integer, all hashes are modulus this value
Complexity
----------
Update time: O(1)
Memory usage: O(k)
where k is the size of the rolling window
Examples
--------
>>> import rolling
>>> r_hash = rolling.PolynomialHash("abcxyabc", window_size=3,
... base=31, mod=9967)
>>> list(r_hash)
[4984, 900, 5072, 771, 8757, 4984]
"""
def __init__(
self, iterable, window_size, window_type="fixed", base=DEF_BASE, mod=DEF_MOD
):
self._hash = 0
self._base = base
self._mod = mod
super().__init__(iterable, window_size, window_type)
def _init_fixed(self, *args, **kwargs):
self._buffer = deque([0])
for val in islice(self._iterator, self.window_size - 1):
self._add_new(val)
def _init_variable(self, *args, **kwargs):
self._buffer = deque()
def _add_new(self, new):
self._hash *= self._base
self._hash += hash(new)
self._hash %= self._mod
self._buffer.append(new)
def _remove_old(self):
old = self._buffer.popleft()
self._hash -= hash(old) * pow(self._base, self._obs, self._mod)
self._hash %= self._mod
def _update_window(self, new):
self._remove_old()
self._add_new(new)
@property
def current_value(self):
return self._hash
@property
def _obs(self):
return len(self._buffer)
| 24.6
| 86
| 0.603175
|
8f69c335bd08d12466ea7908b7bcec825f79053c
| 4,012
|
py
|
Python
|
mydataset.py
|
dreamakeo/catsvsdogs
|
4113e60f348ca882aebd63315526116df2e550c4
|
[
"MIT"
] | null | null | null |
mydataset.py
|
dreamakeo/catsvsdogs
|
4113e60f348ca882aebd63315526116df2e550c4
|
[
"MIT"
] | null | null | null |
mydataset.py
|
dreamakeo/catsvsdogs
|
4113e60f348ca882aebd63315526116df2e550c4
|
[
"MIT"
] | null | null | null |
#对原始数据进行划分,得到标记文件
import os
import shutil
import collections
import math
import csv
import random
root_dir = '/home/ding/DATA/dataset/'
valid_ratio = 0.1
batch_size = 4
data_train_dir = os.path.join(root_dir,'train')
#随机字典
def random_dic(dicts):
dict_key_ls = list(dicts.keys())
random.shuffle(dict_key_ls)
new_dic = {}
for key in dict_key_ls:
new_dic[key] = dicts.get(key)
return new_dic
#建立CSV标记文件
def create_csv_labels(data_train_dir,csv_dir = os.path.join(data_train_dir,'../trainLabels.csv')):
for root,dir,files in os.walk(data_train_dir):
tokens = [name.rstrip().split('.') for name in files]
data_label_list = [label for label,id,suffix in tokens]
data_id_list = [(label+'.'+ id) for label,id,suffix in tokens]
csv_dict = dict(zip(data_id_list,data_label_list))
csv_dict = random_dic(csv_dict)
#print(csv_dict.items())
with open(csv_dir,'w') as f:
csv_write = csv.writer(f)
csv_head = ["id","label"]
csv_write.writerow(csv_head)
for id,label in csv_dict.items():
#print(id,label)
csv_write.writerow([id,label])
# for name in files:
# print(name.rstrip().split('.',1))
labels_map = {
0: "cat",
1: "dog",
}
#建立CSV标记文件V2
def create_csv_labels_v2(data_train_dir,csv_dir = os.path.join(data_train_dir,'../trainLabels1.csv')):
for root,dir,files in os.walk(data_train_dir):
tokens = [name.rstrip().split('.') for name in files]
data_label_list = [label for label,id,suffix in tokens]
data_id_list = [(label+'.'+ id+'.'+suffix) for label,id,suffix in tokens]
csv_dict = dict(zip(data_id_list,data_label_list))
csv_dict = random_dic(csv_dict)
#print(csv_dict.items())
with open(csv_dir,'w') as f:
csv_write = csv.writer(f)
csv_head = ["id","label"]
csv_write.writerow(csv_head)
for id,label in csv_dict.items():
#print(id,label)
csv_write.writerow([id,label])
# for name in files:
# print(name.rstrip().split('.',1))
#读取标记字典
def read_csv_labels(fname):
with open(fname, 'r') as f:
lines = f.readlines()[1:]
tokens = [l.rstrip().split(',') for l in lines]
return dict(((name, label) for name, label in tokens))
def copyfile(filename,target_dir):
os.makedirs(target_dir,exist_ok = True)
shutil.copy(filename,target_dir)
#划分训练 校验集
def reorg_train_valid(data_dir, labels, valid_ratio):
n = collections.Counter(labels.values()).most_common()[-1][1]
n_valid_per_label = max(1, math.floor(n * valid_ratio))
label_count = {}
for train_file in os.listdir(os.path.join(data_dir, 'train')):
l = train_file.split('.')
label = labels[l[0]+'.'+l[1]]
fname = os.path.join(data_dir, 'train', train_file)
copyfile(
fname,
os.path.join(data_dir, 'train_valid_test', 'train_valid', label))
if label not in label_count or label_count[label] < n_valid_per_label:
copyfile(
fname,
os.path.join(data_dir, 'train_valid_test', 'valid', label))
label_count[label] = label_count.get(label, 0) + 1
else:
copyfile(
fname,
os.path.join(data_dir, 'train_valid_test', 'train', label))
return n_valid_per_label
#划分测试集
def reorg_test(data_dir):
for test_file in os.listdir(os.path.join(data_dir, 'test')):
copyfile(
os.path.join(data_dir, 'test', test_file),
os.path.join(data_dir, 'train_valid_test', 'test', 'unknown'))
def reorg_catsanddog_data(data_dir, valid_ratio):
labels = read_csv_labels(os.path.join(data_dir, 'trainLabels.csv'))
reorg_train_valid(data_dir, labels, valid_ratio)
reorg_test(data_dir)
valid_ratio = 0.1
#create_csv_labels_v2(data_train_dir)
#reorg_catsanddog_data(root_dir,valid_ratio)
| 33.714286
| 102
| 0.629611
|
88d7e0baffc611d60cbeb788fae0f16b5ee4937f
| 6,960
|
py
|
Python
|
tests/django_init.py
|
tbaschak/peeringdb
|
20d89d53d8e1d807383fa84d74601e37ba4dc9d4
|
[
"BSD-2-Clause"
] | null | null | null |
tests/django_init.py
|
tbaschak/peeringdb
|
20d89d53d8e1d807383fa84d74601e37ba4dc9d4
|
[
"BSD-2-Clause"
] | null | null | null |
tests/django_init.py
|
tbaschak/peeringdb
|
20d89d53d8e1d807383fa84d74601e37ba4dc9d4
|
[
"BSD-2-Clause"
] | null | null | null |
import os
from django.conf import settings
# lazy init for translations
_ = lambda s: s
# from django.utils.translation import ugettext_lazy as _
settings.configure(
PACKAGE_VERSION="dev",
RELEASE_ENV="dev",
MIGRATION_MODULES={"django_peeringdb": None},
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django_otp",
"django_otp.plugins.otp_static",
"django_otp.plugins.otp_totp",
"django_otp.plugins.otp_email",
"two_factor",
"grappelli",
"django.contrib.admin",
"django.contrib.sessions",
"django.contrib.sites",
"django_inet",
"django_peeringdb",
"django_namespace_perms",
"django_countries",
"oauth2_provider",
"peeringdb_server",
"allauth",
"allauth.account",
"reversion",
"rest_framework",
"dal",
"dal_select2",
"corsheaders",
"captcha",
],
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.db.DatabaseCache",
"LOCATION": "django_cache",
}
},
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"DIRS": (
os.path.join(
os.path.dirname(__file__), "..", "peeringdb_server", "templates"
),
),
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
],
},
}
],
LANGUAGE_CODE="en-us",
LANGUAGES=[("en", _("English")), ("pt", _("Portuguese")),],
USE_L10N=True,
USE_I18N=True,
MEDIA_URL="/m/",
STATIC_URL="/s/",
MIDDLEWARE=(
"corsheaders.middleware.CorsMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"peeringdb_server.maintenance.Middleware",
),
SOUTH_TESTS_MIGRATE=False,
SOUTH_SKIP_TESTS=True,
AUTH_USER_MODEL="peeringdb_server.User",
TABLE_PREFIX="peeringdb_",
PEERINGDB_ABSTRACT_ONLY=True,
COUNTRIES_OVERRIDE={"XK": _("Kosovo")},
CLIENT_COMPAT={
"client": {"min": (0, 6), "max": (0, 6, 5)},
"backends": {"django_peeringdb": {"min": (0, 6), "max": (0, 6, 5)}},
},
DATABASE_ENGINE="django.db.backends.sqlite3",
DATABASES={
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:",},
# TODO - this is supposed to work to mimic replication
# during tests, but doesnt. So instead we use the
# peeringdb_server.db_router.TestRouter class instead
# which just always used the default db for read and writes
#'read' : {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': ':memory:',
# 'TEST' : { 'MIRROR' : 'default' }
# }
},
# TODO - change to peeringdb_server.db_router.DatabaseRouter
# if repliation mimicing (see above) gets fixed
DATABASE_ROUTERS=["peeringdb_server.db_router.TestRouter"],
DEBUG=False,
GUEST_GROUP_ID=1,
USER_GROUP_ID=2,
TEMPLATE_DEBUG=False,
BASE_URL="https://localhost",
PASSWORD_RESET_URL="localhost",
API_CACHE_ROOT="tests/api-cache",
API_CACHE_ENABLED=False,
SUGGEST_ENTITY_ORG=1234,
API_URL="localhost",
REST_FRAMEWORK={
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.BasicAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
"DEFAULT_MODEL_SERIALIZER_CLASS": "rest_framework.serializers.HyperlinkedModelSerializer",
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly",
"django_namespace_perms.rest.BasePermission",
],
"DEFAULT_RENDERER_CLASSES": ("peeringdb_server.renderers.MetaJSONRenderer",),
"DEFAULT_SCHEMA_CLASS": "rest_framework.schemas.coreapi.AutoSchema",
},
NSP_MODE="crud",
NSP_GUEST_GROUP="guest",
DEBUG_EMAIL=True,
TIME_ZONE="UTC",
USE_TZ=True,
POC_DELETION_PERIOD=30,
PROTECTED_OBJECT_NOTIFICATION_PERIOD=1,
AUTHENTICATION_BACKENDS=("django_namespace_perms.auth.backends.NSPBackend",),
ROOT_URLCONF="mainsite.urls",
LOGGING={
"version": 1,
"disable_existing_loggers": True,
"handlers": {"stderr": {"level": "DEBUG", "class": "logging.StreamHandler",},},
"loggers": {
"": {"handlers": ["stderr"], "level": "DEBUG", "propagate": False},
},
},
API_DOC_STR={
"retrieve": "retrieve object",
"list": "list objects",
"update": "update object",
"delete": "delete object",
"create": "create object",
},
LOGIN_URL="/account/login",
LOGIN_REDIRECT_URL="/",
OAUTH_ENABLED=False,
RECAPTCHA_PUBLIC_KEY="",
EMAIL_SUBJECT_PREFIX="[test]",
CORS_ORIGIN_WHITELIST=[],
CORS_ALLOW_METHODS=["GET", "OPTIONS"],
CORS_ALLOW_CREDENTIALS=False,
DATA_QUALITY_MAX_PREFIX_V4_LIMIT=500000,
DATA_QUALITY_MAX_PREFIX_V6_LIMIT=500000,
DATA_QUALITY_MIN_PREFIXLEN_V4=18,
DATA_QUALITY_MAX_PREFIXLEN_V4=28,
DATA_QUALITY_MIN_PREFIXLEN_V6=64,
DATA_QUALITY_MAX_PREFIXLEN_V6=116,
DATA_QUALITY_MAX_IRR_DEPTH=3,
TUTORIAL_MODE=False,
CAPTCHA_TEST_MODE=True,
SITE_ID=1,
IXF_POSTMORTEM_LIMIT=250,
IXF_NOTIFY_IX_ON_CONFLICT=True,
IXF_NOTIFY_NET_ON_CONFLICT=True,
IXF_TICKET_ON_CONFLICT=True,
ABSTRACT_ONLY=True,
GOOGLE_GEOLOC_API_KEY="AIzatest",
RATELIMITS={
"view_affiliate_to_org_POST": "100/m",
"resend_confirmation_mail": "2/m",
"view_request_ownership_GET": "3/m",
"view_username_retrieve_initiate": "2/m",
"view_request_ownership_POST": "3/m",
"request_login_POST": "10/m",
"view_verify_POST": "2/m",
"request_translation": "10/m",
"view_import_ixlan_ixf_preview": "1/m",
"view_import_net_ixf_postmortem": "1/m",
},
MAX_USER_AFFILIATION_REQUESTS=10,
MAIL_DEBUG=True,
IXF_PARSE_ERROR_NOTIFICATION_PERIOD=36,
)
| 35.151515
| 98
| 0.624569
|
6b3d480afd6a58ab60f5551cbcee669d966fc73b
| 1,807
|
py
|
Python
|
OpdrachtC2.py
|
kalkoen/informatica-olympiade-2019
|
0b6e810d906b105ad7a1a1a6800e5710517c3527
|
[
"MIT"
] | null | null | null |
OpdrachtC2.py
|
kalkoen/informatica-olympiade-2019
|
0b6e810d906b105ad7a1a1a6800e5710517c3527
|
[
"MIT"
] | null | null | null |
OpdrachtC2.py
|
kalkoen/informatica-olympiade-2019
|
0b6e810d906b105ad7a1a1a6800e5710517c3527
|
[
"MIT"
] | null | null | null |
grid_size = 8
start = "@"
jewel = "+"
wall = "#"
bomb = "*"
stop = "O"
free = "-"
directions = [(0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1)]
class Tile:
def __init__(self, x, y, value):
self.value = value
self.x = x
self.y = y
self.paths = []
class Path:
def __init__(self):
passing_tiles = []
def print_grid():
print()
for y in range(grid_size):
for x in range(grid_size):
print(grid[x][y].value, end="")
print()
print()
#Initalize and fill grid
#grid= [[]] * 8 only creates one list instance. Good to know
grid = [[] for i in range(grid_size)]
for y in range(grid_size):
line = input()
for x in range(len(line)):
grid[x].append(Tile(x, y, line[x]))
#Define possible paths for each tile
def path(start_tile, direction):
path = []
v = directions[direction]
condition = 1
while condition:
next_tile = tile(start_tile.x + v[0], start_tile.y + v[1])
print("Test")
if not next_tile:
break
condition = next_tile.value != wall
if next_tile.value == bomb:
return
path.append(next_tile)
start_tile = next_tile
return path
def tile(x, y):
if x < 0 or x >= grid_size or y < 0 or y >= grid_size:
return 0
return grid[x][y]
def add_paths():
print()
for y in range(grid_size):
for x in range(grid_size):
for d in range(8):
grid[x][y].paths.append(path(grid[x][y], d))
print(x, y, "has", len(grid[x][y].paths), "paths")
print()
print()
x = 1
y = 6
for d in range(8):
grid[x][y].paths.append(path(grid[x][y], d))
print(x, y, "has", len(grid[x][y].paths), "paths")
print_grid()
| 19.021053
| 83
| 0.53016
|
136886429ff0d9fe219b886015a04ca006a5a494
| 39,295
|
py
|
Python
|
frappe/__init__.py
|
olderp/frappe
|
92700852a8d182e3ec6c987123428c323eaa8844
|
[
"MIT"
] | null | null | null |
frappe/__init__.py
|
olderp/frappe
|
92700852a8d182e3ec6c987123428c323eaa8844
|
[
"MIT"
] | null | null | null |
frappe/__init__.py
|
olderp/frappe
|
92700852a8d182e3ec6c987123428c323eaa8844
|
[
"MIT"
] | 5
|
2016-06-20T08:48:11.000Z
|
2018-12-12T09:42:31.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals
from werkzeug.local import Local, release_local
import os, sys, importlib, inspect, json
# public
from .exceptions import *
from .utils.jinja import get_jenv, get_template, render_template
__version__ = "7.0.30"
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg, lang=None):
"""Returns translated string in current lang, if exists."""
from frappe.translate import get_full_dict
from frappe.utils import cstr
if not lang:
lang = local.lang
# msg should always be unicode
msg = cstr(msg)
return get_full_dict(local.lang).get(msg) or msg
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
if local.lang=="en":
return {}
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None, new_site=False):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.realtime_log = []
local.flags = _dict({
"ran_schedulers": [],
"currently_saving": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
"new_site": new_site
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_ip = None
local.response = _dict({"docs":[]})
local.task_id = None
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = None
local.user = None
local.user_perms = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.jenv = None
local.jloader =None
local.cache = {}
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
local.form_dict = _dict()
local.session = _dict()
set_user("Administrator")
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
elif local.site and not local.flags.new_site:
raise IncorrectSitePath, "{0} does not exist".format(site_config)
return _dict(config)
def get_conf(site=None):
if hasattr(local, 'conf'):
return local.conf
else:
# if no site, get from common_site_config.json
with init_site(site):
return local.conf
class init_site:
def __init__(self, site=None):
'''If site==None, initialize it for empty site ('') to load common_site_config.json'''
self.site = site or ''
def __enter__(self):
init(self.site)
return local
def __exit__(self, type, value, traceback):
destroy()
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get('redis_cache')
or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
import utils
return utils.get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
from utils import cstr
if not request or (not "cmd" in local.form_dict):
print cstr(msg)
error_log.append(cstr(msg))
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print repr(msg)
from utils import cstr
debug_log.append(cstr(msg))
def msgprint(msg, title=None, raise_exception=0, as_table=False, indicator=None, alert=False):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param title: [optional] Message title.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
"""
from utils import encode
out = _dict(message=msg)
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception, encode(msg)
else:
raise ValidationError, encode(msg)
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
out.msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages:
print "Message: " + repr(out.msg).encode("utf-8")
if title:
out.title = title
if not indicator and raise_exception:
indicator = 'red'
if indicator:
out.indicator = indicator
if alert:
out.alert = 1
message_log.append(json.dumps(out))
_raise_exception()
def throw(msg, exc=ValidationError, title=None):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc, title=title, indicator='red')
def emit_js(js, user=False, **kwargs):
from frappe.async import publish_realtime
if user == False:
user = session.user
publish_realtime('eval_js', js, user=user, **kwargs)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_perms = None
def get_user():
from frappe.utils.user import UserPermissions
if not local.user_perms:
local.user_perms = UserPermissions(local.session.user)
return local.user_perms
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.utils.user
return frappe.utils.user.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=(), sender="", subject="No Subject", message="No Message",
as_markdown=False, delayed=True, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=(), show_as_cc=(), message_id=None, in_reply_to=None, send_after=None, expose_recipients=False,
send_priority=1, communication=None):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param delayed: Send via scheduled email sender **Email Queue**. Don't send immediately. Default is true
:param send_priority: Priority for Email Queue, default 1.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To email id.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send after the given datetime.
:param expose_recipients: Display all recipients in the footer message - "This email was sent to"
:param communication: Communication link to be set in Email Queue record
"""
if delayed:
import frappe.email.queue
frappe.email.queue.send(recipients=recipients, sender=sender,
subject=subject, message=content or message,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, show_as_cc=show_as_cc, message_id=message_id, in_reply_to=in_reply_to,
send_after=send_after, expose_recipients=expose_recipients, send_priority=send_priority, communication=communication)
else:
import frappe.email
if as_markdown:
frappe.email.sendmail_md(recipients, sender=sender,
subject=subject, msg=content or message, attachments=attachments, reply_to=reply_to,
cc=cc, message_id=message_id, in_reply_to=in_reply_to)
else:
frappe.email.sendmail(recipients, sender=sender,
subject=subject, msg=content or message, attachments=attachments, reply_to=reply_to,
cc=cc, message_id=message_id, in_reply_to=in_reply_to)
whitelisted = []
guest_methods = []
xss_safe_methods = []
def whitelist(allow_guest=False, xss_safe=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods, xss_safe_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
if xss_safe:
xss_safe_methods.append(fn)
return fn
return innerfn
def only_for(roles):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.sessions
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
import translate
frappe.sessions.clear_cache()
translate.clear_cache()
reset_metadata_version()
local.cache = {}
local.new_doc_templates = {}
for fn in get_hooks("clear_cache"):
get_attr(fn)()
local.role_permissions = {}
def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=False, throw=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not doctype and doc:
doctype = doc.doctype
import frappe.permissions
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
if throw and not out:
if doc:
frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name))
else:
frappe.throw(_("No permission for {0}").format(doctype))
return out
def has_website_permission(doctype, ptype="read", doc=None, user=None, verbose=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
if isinstance(doc, basestring):
doc = get_doc(doctype, doc)
for method in hooks:
result = call(method, doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where istable=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None, length=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
digest = hashlib.sha224((txt or "") + repr(time.time()) + repr(random_string(8))).hexdigest()
if length:
digest = digest[:length]
return digest
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_doc(arg1, arg2=None):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
return frappe.model.document.get_doc(arg1, arg2)
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags)
def delete_doc_if_exists(doctype, name, force=0):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name, force=force)
def reload_doctype(doctype, force=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype), force=force)
def reload_doc(module, dt=None, dn=None, force=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force)
def rename_doc(doctype, old, new, debug=0, force=False, merge=False, ignore_permissions=False):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(doctype, old, new, force=force, merge=merge, ignore_permissions=ignore_permissions)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
if not "public" in joins:
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
for app in get_file_items(os.path.join(local.site_path, "apps.txt")):
if app not in apps:
apps.append(app)
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False, frappe_last=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
if not db:
connect()
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
if frappe_last:
if 'frappe' in installed:
installed.remove('frappe')
installed.append('frappe')
return installed
def get_doc_hooks():
'''Returns hooked methods for given doc. It will expand the dict tuple if required.'''
if not hasattr(local, 'doc_events_hooks'):
hooks = get_hooks('doc_events', {})
out = {}
for key, value in hooks.iteritems():
if isinstance(key, tuple):
for doctype in key:
append_hook(out, doctype, value)
else:
append_hook(out, key, value)
local.doc_events_hooks = out
return local.doc_events_hooks
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
print 'Could not find app "{0}"'.format(app_name)
if not request:
sys.exit(1)
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def append_hook(target, key, value):
'''appends a hook to the the target dict.
If the hook key, exists, it will make it a key.
If the hook value is a dict, like doc_events, it will
listify the values against the key.
'''
if isinstance(value, dict):
# dict? make a list of values against each key
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
# make a list
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
from frappe.utils import cstr
if isinstance(path, unicode):
path = path.encode("utf-8")
if os.path.exists(path):
with open(path, "r") as f:
return cstr(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
app_name = method_string.split(".")[0]
if not local.flags.in_install and app_name not in get_installed_apps():
throw(_("App {0} is not installed").format(app_name), AppNotInstalledError)
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if isinstance(fn, basestring):
fn = get_attr(fn)
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return fn(*args, **newargs)
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties).
If doctype is not specified, it will create a property setter for all fields with the
given fieldname"""
args = _dict(args)
if not args.doctype_or_field:
args.doctype_or_field = 'DocField'
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': 'DocField', 'fieldname': args.property}, 'fieldtype') or 'Data'
if not args.doctype:
doctype_list = db.sql_list('select distinct parent from tabDocField where fieldname=%s', args.fieldname)
else:
doctype_list = [args.doctype]
for doctype in doctype_list:
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': doctype, 'fieldname': args.fieldname}, 'fieldtype') or 'Data'
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field,
'doc_type': doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import Tool."""
from frappe.core.page.data_import_tool import data_import_tool
data_import_tool.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
fields_to_clear = ['name', 'owner', 'creation', 'modified', 'modified_by']
if not local.flags.in_test:
fields_to_clear.append("docstatus")
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.set("__islocal", 1)
for fieldname in (fields_to_clear + ['amended_from', 'amendment_date']):
newdoc.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for i, d in enumerate(newdoc.get_all_children()):
d.set("__islocal", 1)
for fieldname in fields_to_clear:
d.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None, context=None):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code."""
local.message_title = title
local.message = html
local.message_success = success
local.response['type'] = 'page'
local.response['route'] = 'message'
if http_status_code:
local.response['http_status_code'] = http_status_code
if context:
local.response['context'] = context
def redirect_to_message(title, html, http_status_code=None, context=None):
"""Redirects to /message?id=random
Similar to respond_as_web_page, but used to 'redirect' and show message pages like success, failure, etc. with a detailed message
:param title: Page title and heading.
:param message: Message to be shown.
:param http_status_code: HTTP status code.
Example Usage:
frappe.redirect_to_message(_('Thank you'), "<div><p>You will receive an email at test@example.com</p></div>")
"""
message_id = generate_hash(length=8)
message = {
'context': context or {},
'http_status_code': http_status_code or 200
}
message['context'].update({
'header': title,
'title': title,
'message': html
})
cache().set_value("message_id:{0}".format(message_id), message, expires_in_sec=60)
location = '/message?id={0}'.format(message_id)
if not getattr(local, 'is_ajax', False):
local.response["type"] = "redirect"
local.response["location"] = location
else:
return location
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for conditions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def get_value(*args, **kwargs):
"""Returns a document property or list of properties.
Alias for `frappe.db.get_value`
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
"""
return db.get_value(*args, **kwargs)
def add_version(doc):
"""Insert a new **Version** of the given document.
A **Version** is a JSON dump of the current document state."""
get_doc({
"doctype": "Version",
"ref_doctype": doc.doctype,
"docname": doc.name,
"doclist_json": as_json(doc.as_dict())
}).insert(ignore_permissions=True)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler)
def are_emails_muted():
return flags.mute_emails or conf.get("mute_emails") or False
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(value, df, doc=None, currency=None):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(value, df, doc, currency=currency)
def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False."""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
local.form_dict.doc = doc
if not html:
html = build_page("print")
if as_pdf:
return get_pdf(html)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
local.flags.ignore_print_permissions = True
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
return out
def publish_progress(*args, **kwargs):
"""Show the user progress for a long request
:param percent: Percent progress
:param title: Title
:param doctype: Optional, for DocType
:param name: Optional, for Document name
"""
import frappe.async
return frappe.async.publish_progress(*args, **kwargs)
def publish_realtime(*args, **kwargs):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname
:param after_commit: (default False) will emit after current transaction is committed
"""
import frappe.async
return frappe.async.publish_realtime(*args, **kwargs)
def local_cache(namespace, key, generator, regenerate_if_none=False):
"""A key value store for caching within a request
:param namespace: frappe.local.cache[namespace]
:param key: frappe.local.cache[namespace][key] used to retrieve value
:param generator: method to generate a value if not found in store
"""
if namespace not in local.cache:
local.cache[namespace] = {}
if key not in local.cache[namespace]:
local.cache[namespace][key] = generator()
elif local.cache[namespace][key]==None and regenerate_if_none:
# if key exists but the previous result was None
local.cache[namespace][key] = generator()
return local.cache[namespace][key]
def get_doctype_app(doctype):
def _get_doctype_app():
doctype_module = local.db.get_value("DocType", doctype, "module")
return local.module_app[scrub(doctype_module)]
return local_cache("doctype_app", doctype, generator=_get_doctype_app)
loggers = {}
log_level = None
def logger(module=None, with_more_info=True):
'''Returns a python logger that uses StreamHandler'''
from frappe.utils.logger import get_logger
return get_logger(module or __name__, with_more_info=with_more_info)
def get_desk_link(doctype, name):
return '<a href="#Form/{0}/{1}" style="font-weight: bold;">{2} {1}</a>'.format(doctype, name, _(doctype))
def bold(text):
return '<b>{0}</b>'.format(text)
| 31.410871
| 180
| 0.728897
|
00fef0f8f05c031dacdaf59d4ececb299cc35aa7
| 4,890
|
py
|
Python
|
src/v5.3/resources/swagger_client/models/ed_fi_survey_response_survey_level.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | 2
|
2021-04-27T17:18:17.000Z
|
2021-04-27T19:14:39.000Z
|
src/v5.1/resources/swagger_client/models/ed_fi_survey_response_survey_level.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | null | null | null |
src/v5.1/resources/swagger_client/models/ed_fi_survey_response_survey_level.py
|
xmarcosx/edfi-notebook
|
0564ebdf1d0f45a9d25056e7e61369f0a837534d
|
[
"Apache-2.0"
] | 1
|
2022-01-06T09:43:11.000Z
|
2022-01-06T09:43:11.000Z
|
# coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class EdFiSurveyResponseSurveyLevel(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'survey_level_descriptor': 'str'
}
attribute_map = {
'survey_level_descriptor': 'surveyLevelDescriptor'
}
def __init__(self, survey_level_descriptor=None, _configuration=None): # noqa: E501
"""EdFiSurveyResponseSurveyLevel - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._survey_level_descriptor = None
self.discriminator = None
self.survey_level_descriptor = survey_level_descriptor
@property
def survey_level_descriptor(self):
"""Gets the survey_level_descriptor of this EdFiSurveyResponseSurveyLevel. # noqa: E501
Provides information about the respondents of a survey and how they can be grouped together. # noqa: E501
:return: The survey_level_descriptor of this EdFiSurveyResponseSurveyLevel. # noqa: E501
:rtype: str
"""
return self._survey_level_descriptor
@survey_level_descriptor.setter
def survey_level_descriptor(self, survey_level_descriptor):
"""Sets the survey_level_descriptor of this EdFiSurveyResponseSurveyLevel.
Provides information about the respondents of a survey and how they can be grouped together. # noqa: E501
:param survey_level_descriptor: The survey_level_descriptor of this EdFiSurveyResponseSurveyLevel. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and survey_level_descriptor is None:
raise ValueError("Invalid value for `survey_level_descriptor`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
survey_level_descriptor is not None and len(survey_level_descriptor) > 306):
raise ValueError("Invalid value for `survey_level_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._survey_level_descriptor = survey_level_descriptor
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EdFiSurveyResponseSurveyLevel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EdFiSurveyResponseSurveyLevel):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EdFiSurveyResponseSurveyLevel):
return True
return self.to_dict() != other.to_dict()
| 37.615385
| 482
| 0.646012
|
8bad285ffd8c126e1ff2dea69bb3c438f06b28f0
| 6,454
|
py
|
Python
|
airflow/contrib/operators/ecs_operator.py
|
dossett/incubator-airflow
|
60583a3c6d1c4b5bbecaad6cd195301107530de9
|
[
"Apache-2.0"
] | 1
|
2019-06-04T06:09:55.000Z
|
2019-06-04T06:09:55.000Z
|
airflow/contrib/operators/ecs_operator.py
|
berniechiu/incubator-airflow
|
b85210d8596c1e05fbf3da2941aaca8170ae871e
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4
|
2018-01-31T19:28:31.000Z
|
2019-03-07T15:56:45.000Z
|
airflow/contrib/operators/ecs_operator.py
|
berniechiu/incubator-airflow
|
b85210d8596c1e05fbf3da2941aaca8170ae871e
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-05-14T09:19:52.000Z
|
2022-01-12T19:08:31.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.aws_hook import AwsHook
class ECSOperator(BaseOperator):
"""
Execute a task on AWS EC2 Container Service
:param task_definition: the task definition name on EC2 Container Service
:type task_definition: str
:param cluster: the cluster name on EC2 Container Service
:type cluster: str
:param overrides: the same parameter that boto3 will receive (templated):
http://boto3.readthedocs.org/en/latest/reference/services/ecs.html#ECS.Client.run_task
:type overrides: dict
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
(http://boto3.readthedocs.io/en/latest/guide/configuration.html).
:type aws_conn_id: str
:param region_name: region name to use in AWS Hook.
Override the region_name in connection (if provided)
:type region_name: str
:param launch_type: the launch type on which to run your task ('EC2' or 'FARGATE')
:type launch_type: str
:param group: the name of the task group associated with the task
:type group: str
:param placement_constraints: an array of placement constraint objects to use for
the task
:type placement_constraints: list
:param platform_version: the platform version on which your task is running
:type platform_version: str
:param network_configuration: the network configuration for the task
:type network_configuration: dict
"""
ui_color = '#f0ede4'
client = None
arn = None
template_fields = ('overrides',)
@apply_defaults
def __init__(self, task_definition, cluster, overrides,
aws_conn_id=None, region_name=None, launch_type='EC2',
group=None, placement_constraints=None, platform_version='LATEST',
network_configuration=None, **kwargs):
super(ECSOperator, self).__init__(**kwargs)
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.task_definition = task_definition
self.cluster = cluster
self.overrides = overrides
self.launch_type = launch_type
self.group = group
self.placement_constraints = placement_constraints
self.platform_version = platform_version
self.network_configuration = network_configuration
self.hook = self.get_hook()
def execute(self, context):
self.log.info(
'Running ECS Task - Task definition: %s - on cluster %s',
self.task_definition, self.cluster
)
self.log.info('ECSOperator overrides: %s', self.overrides)
self.client = self.hook.get_client_type(
'ecs',
region_name=self.region_name
)
run_opts = {
'cluster': self.cluster,
'taskDefinition': self.task_definition,
'overrides': self.overrides,
'startedBy': self.owner,
'launchType': self.launch_type,
'platformVersion': self.platform_version,
}
if self.group is not None:
run_opts['group'] = self.group
if self.placement_constraints is not None:
run_opts['placementConstraints'] = self.placement_constraints
if self.network_configuration is not None:
run_opts['networkConfiguration'] = self.network_configuration
response = self.client.run_task(**run_opts)
failures = response['failures']
if len(failures) > 0:
raise AirflowException(response)
self.log.info('ECS Task started: %s', response)
self.arn = response['tasks'][0]['taskArn']
self._wait_for_task_ended()
self._check_success_task()
self.log.info('ECS Task has been successfully executed: %s', response)
def _wait_for_task_ended(self):
waiter = self.client.get_waiter('tasks_stopped')
waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow
waiter.wait(
cluster=self.cluster,
tasks=[self.arn]
)
def _check_success_task(self):
response = self.client.describe_tasks(
cluster=self.cluster,
tasks=[self.arn]
)
self.log.info('ECS Task stopped, check status: %s', response)
if len(response.get('failures', [])) > 0:
raise AirflowException(response)
for task in response['tasks']:
containers = task['containers']
for container in containers:
if container.get('lastStatus') == 'STOPPED' and \
container['exitCode'] != 0:
raise AirflowException(
'This task is not in success state {}'.format(task))
elif container.get('lastStatus') == 'PENDING':
raise AirflowException('This task is still pending {}'.format(task))
elif 'error' in container.get('reason', '').lower():
raise AirflowException(
'This containers encounter an error during launching : {}'.
format(container.get('reason', '').lower()))
def get_hook(self):
return AwsHook(
aws_conn_id=self.aws_conn_id
)
def on_kill(self):
response = self.client.stop_task(
cluster=self.cluster,
task=self.arn,
reason='Task killed by the user')
self.log.info(response)
| 38.879518
| 94
| 0.650449
|
f3eb3e6ca23b840bd6bf1b2944473cf9520b9b96
| 2,691
|
py
|
Python
|
binarySearch/binarySearchSimulation.py
|
trthanhquang/wayback-data-collector
|
304172bd16135321d997dc13970feb7c8047c3be
|
[
"Apache-2.0"
] | 1
|
2015-11-20T17:22:57.000Z
|
2015-11-20T17:22:57.000Z
|
binarySearch/binarySearchSimulation.py
|
trthanhquang/wayback-data-collector
|
304172bd16135321d997dc13970feb7c8047c3be
|
[
"Apache-2.0"
] | null | null | null |
binarySearch/binarySearchSimulation.py
|
trthanhquang/wayback-data-collector
|
304172bd16135321d997dc13970feb7c8047c3be
|
[
"Apache-2.0"
] | null | null | null |
from bisect import bisect_left
from Queue import *
import random
randomSeed = 123456789
# randomSeed = random.random()*123456789
noVersion = 10
listSize = 200
itemList = None #url list
crawledlist = None #downloaded page
downloadCount = None
def init():
global itemList,crawledlist, downloadCount
newList = []
crawledlist = []
downloadCount = 0
random.seed(randomSeed)
for i in range(listSize):
newItem = int(random.random()*noVersion)
newList.append(newItem)
crawledlist.append(0)
itemList = sorted(newList)
def download(dlist):
global downloadCount
download = False
for i in dlist:
if(crawledlist[i]==0):
crawledlist[i]=1
download = True
if download:
downloadCount = downloadCount+1
def getDivideIndex(lo,hi,level):
indexList = []
if level == 0:
return indexList
mid = lo + (hi-lo)/2
indexList.append(mid)
indexList.extend(getDivideIndex(lo,mid,level-1))
indexList.extend(getDivideIndex(mid,hi,level-1))
return set(indexList)
def binarySearchDiff(lo, hi):
digLevel = 6
download(getDivideIndex(lo,hi,digLevel))
searchItem = itemList[lo]
if(itemList[hi]==searchItem):
return -1
while(lo<=hi):
if(digLevel == 0):
print 'additional dig at (%s,%s)'%(lo,hi)
digLevel = 6
download(getDivideIndex(lo,hi,digLevel))
mid = lo+(hi-lo)/2
# print 'lo= %s, hi=%s, mid=%s'%(lo,hi,mid)
if(itemList[mid]!=searchItem):
hi = mid -1
else:
lo = mid +1
digLevel = digLevel -1
return lo
def refineSearchRange(lo,hi):
searchItem = itemList[lo]
for i in range(lo,hi,1):
if(crawledlist[i]==1):
if(itemList[i]==searchItem):
lo = i
else:
hi = i
break
# print 'update range from %s to %s'%(lo,hi)
return (lo,hi)
def normalSearch():
init()
# print itemList
lo = 0
endIndex = len(itemList)-1
while(lo!=-1):
hi = endIndex
# (lo,hi)= refineSearchRange(lo,hi)
lo = binarySearchDiff(lo,hi)
# print 'index=%s, item=%s'%(lo,itemList[lo])
visited = sum(crawledlist)
percentage = 100.0*visited/len(itemList)
print 'normalSearch: download {2} times ({0} snapshots downloaded, {1:.3g}%)'.format(visited,percentage,downloadCount)
def advanceSearch():
init()
# print itemList
lo = 0
endIndex = len(itemList)-1
while(lo!=-1):
hi = endIndex
(lo,hi)= refineSearchRange(lo,hi)
lo= binarySearchDiff(lo,hi)
print 'index=%s, item=%s'%(lo,itemList[lo])
visited = sum(crawledlist)
percentage = 100.0*visited/len(itemList)
print 'advanceSearch: download {2} times ({0} snapshots downloaded, {1:.3g}%)'.format(visited,percentage,downloadCount)
def main():
global noVersion,listSize
noVersion = 20
listSize = 2000
# normalSearch()
advanceSearch()
if __name__ == '__main__':
main()
| 20.860465
| 120
| 0.687477
|
99340da8591505b6addfc4e3e26d99de41be9b27
| 786
|
py
|
Python
|
renku/service/views/v1_0/__init__.py
|
lokijuhy/renku-python
|
0bfceafa4e6b4750439ab0ed20c61b0a6ba03a1f
|
[
"Apache-2.0"
] | null | null | null |
renku/service/views/v1_0/__init__.py
|
lokijuhy/renku-python
|
0bfceafa4e6b4750439ab0ed20c61b0a6ba03a1f
|
[
"Apache-2.0"
] | null | null | null |
renku/service/views/v1_0/__init__.py
|
lokijuhy/renku-python
|
0bfceafa4e6b4750439ab0ed20c61b0a6ba03a1f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku service v1.0 views."""
| 41.368421
| 75
| 0.751908
|
bd81db15d34fbfd3ba18a575531c439585e5236b
| 1,251
|
py
|
Python
|
app/models/place.py
|
hashtrip/api
|
1a968f47dbd5c9d24fb45f3a6360505009f6c4d8
|
[
"MIT"
] | 1
|
2021-01-30T08:38:15.000Z
|
2021-01-30T08:38:15.000Z
|
app/models/place.py
|
hashtrip/backend
|
1a968f47dbd5c9d24fb45f3a6360505009f6c4d8
|
[
"MIT"
] | null | null | null |
app/models/place.py
|
hashtrip/backend
|
1a968f47dbd5c9d24fb45f3a6360505009f6c4d8
|
[
"MIT"
] | null | null | null |
from typing import List, Optional
from pydantic import Field
from .dbmodel import DateTimeModelMixin, DBModelMixin
from .util import GeoJson, Time
from .profile import Profile
from .rwmodel import RWModel
class PlaceFilterParams(RWModel):
tag: str = ""
author: str = ""
favorited: str = ""
limit: int = 20
offset: int = 0
class PlaceBase(RWModel):
title: str
description: str
location: GeoJson
body: str
capacity: int
time_start: Optional[Time] = Field(None, alias="timeStart")
time_end: Optional[Time] = Field(None, alias="timeEnd")
tag_list: List[str] = Field([], alias="tagList")
class Place(DateTimeModelMixin, PlaceBase):
slug: str
author: Profile
favorited: bool
favorites_count: int = Field(..., alias="favoritesCount")
class PlaceInDB(DBModelMixin, Place):
pass
class PlaceInResponse(RWModel):
place: Place
class ManyPlacesInResponse(RWModel):
places: List[Place]
places_count: int = Field(..., alias="placesCount")
class PlaceInCreate(PlaceBase):
pass
class PlaceInUpdate(RWModel):
title: Optional[str] = None
description: Optional[str] = None
body: Optional[str] = None
tag_list: List[str] = Field([], alias="tagList")
| 21.20339
| 63
| 0.688249
|
c8e0607aaf509219bea9178014e95b68c1fe7da3
| 298
|
py
|
Python
|
generate/common.py
|
xx-li/CustomTemplates
|
aa0a1ebc6788fe58babd0324190bae7ca00d2351
|
[
"MIT"
] | 1
|
2021-07-15T01:08:28.000Z
|
2021-07-15T01:08:28.000Z
|
generate/common.py
|
xx-li/CustomTemplates
|
aa0a1ebc6788fe58babd0324190bae7ca00d2351
|
[
"MIT"
] | null | null | null |
generate/common.py
|
xx-li/CustomTemplates
|
aa0a1ebc6788fe58babd0324190bae7ca00d2351
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from enum import Enum
class TemplateType(Enum):
Blank = "BlankCode"
BlankXib = "BlankXib"
List = "ListCode"
ListXib = "ListXib"
ListEdit = "ListEditCode"
ListEditXib = "ListEditXib"
MultiPage= "MultiPageCode"
MultiPageXib= "MultiPageXib"
| 18.625
| 32
| 0.647651
|
32e31857ffdcd390a4920b6946df4f889464e411
| 12,367
|
py
|
Python
|
pytorch_tutorials/cifar10_tutorial.py
|
adam-dziedzic/time-series-ml
|
81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a
|
[
"Apache-2.0"
] | 1
|
2018-03-25T13:19:46.000Z
|
2018-03-25T13:19:46.000Z
|
pytorch_tutorials/cifar10_tutorial.py
|
adam-dziedzic/time-series-ml
|
81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a
|
[
"Apache-2.0"
] | null | null | null |
pytorch_tutorials/cifar10_tutorial.py
|
adam-dziedzic/time-series-ml
|
81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Training a classifier
=====================
This is it. You have seen how to define neural networks, compute loss and make
updates to the weights of the network.
Now you might be thinking,
What about data?
----------------
Generally, when you have to deal with image, text, audio or video data,
you can use standard python packages that load data into a numpy array.
Then you can convert this array into a ``torch.*Tensor``.
- For images, packages such as Pillow, OpenCV are useful
- For audio, packages such as scipy and librosa
- For text, either raw Python or Cython based loading, or NLTK and
SpaCy are useful
Specifically for vision, we have created a package called
``torchvision``, that has data loaders for common datasets such as
Imagenet, CIFAR10, MNIST, etc. and data transformers for images, viz.,
``torchvision.datasets`` and ``torch.track_utils.data.DataLoader``.
This provides a huge convenience and avoids writing boilerplate code.
For this tutorial, we will use the CIFAR10 dataset.
It has the classes: ‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’,
‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’. The images in CIFAR-10 are of
size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size.
.. figure:: /_static/img/cifar10.png
:alt: cifar10
cifar10
Training an image classifier
----------------------------
We will do the following steps in order:
1. Load and normalizing the CIFAR10 training and test datasets using
``torchvision``
2. Define a Convolution Neural Network
3. Define a loss function
4. Train the network on the training data
5. ExperimentSpectralSpatial the network on the test data
1. Loading and normalizing CIFAR10
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using ``torchvision``, it’s extremely easy to load CIFAR10.
"""
import torch
import torchvision
import torchvision.transforms as transforms
########################################################################
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors of normalized range [-1, 1].
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
########################################################################
# Let us show some of the training images, for fun.
import matplotlib.pyplot as plt
import numpy as np
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
########################################################################
# 2. Define a Convolution Neural Network
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Copy the neural network from the Neural Networks section before and modify it to
# take 3-channel images (instead of 1-channel images as it was defined).
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
########################################################################
# 3. Define a Loss function and optimizer
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Let's use a Classification Cross-Entropy loss and SGD with momentum.
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
########################################################################
# 4. Train the network
# ^^^^^^^^^^^^^^^^^^^^
#
# This is when things start to get interesting.
# We simply have to loop over our data iterator, and feed the inputs to the
# network and optimize.
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
########################################################################
# 5. ExperimentSpectralSpatial the network on the test data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We have trained the network for 2 passes over the training dataset.
# But we need to check if the network has learnt anything at all.
#
# We will check this by predicting the class label that the neural network
# outputs, and checking it against the ground-truth. If the prediction is
# correct, we add the sample to the list of correct predictions.
#
# Okay, first step. Let us display an image from the test set to get familiar.
dataiter = iter(testloader)
images, labels = dataiter.next()
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
########################################################################
# Okay, now let us see what the neural network thinks these examples above are:
outputs = net(images)
########################################################################
# The outputs are energies for the 10 classes.
# Higher the energy for a class, the more the network
# thinks that the image is of the particular class.
# So, let's get the index of the highest energy:
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(4)))
########################################################################
# The results seem pretty good.
#
# Let us look at how the network performs on the whole dataset.
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
########################################################################
# That looks waaay better than chance, which is 10% accuracy (randomly picking
# a class out of 10 classes).
# Seems like the network learnt something.
#
# Hmmm, what are the classes that performed well, and the classes that did
# not perform well:
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
########################################################################
# Okay, so what next?
#
# How do we run these neural networks on the GPU?
#
# Training on GPU
# ----------------
# Just like how you transfer a Tensor on to the GPU, you transfer the neural
# net onto the GPU.
#
# Let's first define our device as the first visible conv1D_cuda device if we have
# CUDA available:
device = torch.device("conv1D_cuda:0" if torch.cuda.is_available() else "cpu")
# Assume that we are on a CUDA machine, then this should print a CUDA device:
print(device)
########################################################################
# The rest of this section assumes that `device` is a CUDA device.
#
# Then these methods will recursively go over all modules and convert their
# parameters and buffers to CUDA tensors:
#
# .. code:: python
#
# net.to(device)
#
#
# Remember that you will have to send the inputs and targets at every step
# to the GPU too:
#
# .. code:: python
#
# inputs, labels = inputs.to(device), labels.to(device)
#
# Why dont I notice MASSIVE speedup compared to CPU? Because your network
# is realllly small.
#
# **Exercise:** Try increasing the width of your network (argument 2 of
# the first ``nn.Conv2d``, and argument 1 of the second ``nn.Conv2d`` –
# they need to be the same number), see what kind of speedup you get.
#
# **Goals achieved**:
#
# - Understanding PyTorch's Tensor library and neural networks at a high level.
# - Train a small neural network to classify images
#
# Training on multiple GPUs
# -------------------------
# If you want to see even more MASSIVE speedup using all of your GPUs,
# please check out :doc:`data_parallel_tutorial`.
#
# Where do I go next?
# -------------------
#
# - :doc:`Train neural nets to play video games </intermediate/reinforcement_q_learning>`
# - `Train a state-of-the-art ResNet network on imagenet`_
# - `Train a face generator using Generative Adversarial Networks`_
# - `Train a word-level language model using Recurrent LSTM networks`_
# - `More examples`_
# - `More tutorials`_
# - `Discuss PyTorch on the Forums`_
# - `Chat with other users on Slack`_
#
# .. _Train a state-of-the-art ResNet network on imagenet: https://github.com/pytorch/examples/tree/master/imagenet
# .. _Train a face generator using Generative Adversarial Networks: https://github.com/pytorch/examples/tree/master/dcgan
# .. _Train a word-level language model using Recurrent LSTM networks: https://github.com/pytorch/examples/tree/master/word_language_model
# .. _More examples: https://github.com/pytorch/examples
# .. _More tutorials: https://github.com/pytorch/tutorials
# .. _Discuss PyTorch on the Forums: https://discuss.pytorch.org/
# .. _Chat with other users on Slack: http://pytorch.slack.com/messages/beginner/
"""
Full output:
/home/adam/anaconda3/bin/python /home/adam/code/time-series-ml/cnns/pytorch_tutorials/cifar10_tutorial.py
Downloading http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ./data/cifar-10-python.tar.gz
Files already downloaded and verified
cat dog frog plane
[1, 2000] loss: 2.208
[1, 4000] loss: 1.882
[1, 6000] loss: 1.706
[1, 8000] loss: 1.600
[1, 10000] loss: 1.529
[1, 12000] loss: 1.486
[2, 2000] loss: 1.416
[2, 4000] loss: 1.368
[2, 6000] loss: 1.341
[2, 8000] loss: 1.328
[2, 10000] loss: 1.297
[2, 12000] loss: 1.277
Finished Training
GroundTruth: cat ship ship plane
Predicted: dog car ship ship
Accuracy of the network on the 10000 test images: 53 %
Accuracy of plane : 53 %
Accuracy of car : 83 %
Accuracy of bird : 48 %
Accuracy of cat : 10 %
Accuracy of deer : 31 %
Accuracy of dog : 58 %
Accuracy of frog : 71 %
Accuracy of horse : 66 %
Accuracy of ship : 66 %
Accuracy of truck : 47 %
cpu
Process finished with exit code 0
"""
| 33.334232
| 138
| 0.61656
|
b917540d8e25a1a24b074791de2b51eff10e98c9
| 8,726
|
py
|
Python
|
tests/test_domain_py.py
|
balabit-deps/balabit-os-7-sphinx
|
4e18ca37f4ddddf346c0b30835a544db20887259
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_domain_py.py
|
balabit-deps/balabit-os-7-sphinx
|
4e18ca37f4ddddf346c0b30835a544db20887259
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_domain_py.py
|
balabit-deps/balabit-os-7-sphinx
|
4e18ca37f4ddddf346c0b30835a544db20887259
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
test_domain_py
~~~~~~~~~~~~~~
Tests the Python Domain
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import pytest
from mock import Mock
from six import text_type
from docutils import nodes
from sphinx import addnodes
from sphinx.domains.python import py_sig_re, _pseudo_parse_arglist, PythonDomain
from sphinx.testing.util import assert_node
def parse(sig):
m = py_sig_re.match(sig)
if m is None:
raise ValueError
name_prefix, name, arglist, retann = m.groups()
signode = addnodes.desc_signature(sig, '')
_pseudo_parse_arglist(signode, arglist)
return signode.astext()
def test_function_signatures():
rv = parse('func(a=1) -> int object')
assert text_type(rv) == u'a=1'
rv = parse('func(a=1, [b=None])')
assert text_type(rv) == u'a=1, [b=None]'
rv = parse('func(a=1[, b=None])')
assert text_type(rv) == u'a=1, [b=None]'
rv = parse("compile(source : string, filename, symbol='file')")
assert text_type(rv) == u"source : string, filename, symbol='file'"
rv = parse('func(a=[], [b=None])')
assert text_type(rv) == u'a=[], [b=None]'
rv = parse('func(a=[][, b=None])')
assert text_type(rv) == u'a=[], [b=None]'
@pytest.mark.sphinx('dummy', testroot='domain-py')
def test_domain_py_xrefs(app, status, warning):
"""Domain objects have correct prefixes when looking up xrefs"""
app.builder.build_all()
def assert_refnode(node, module_name, class_name, target, reftype=None,
domain='py'):
attributes = {
'refdomain': domain,
'reftarget': target,
}
if reftype is not None:
attributes['reftype'] = reftype
if module_name is not False:
attributes['py:module'] = module_name
if class_name is not False:
attributes['py:class'] = class_name
assert_node(node, **attributes)
doctree = app.env.get_doctree('roles')
refnodes = list(doctree.traverse(addnodes.pending_xref))
assert_refnode(refnodes[0], None, None, u'TopLevel', u'class')
assert_refnode(refnodes[1], None, None, u'top_level', u'meth')
assert_refnode(refnodes[2], None, u'NestedParentA', u'child_1', u'meth')
assert_refnode(refnodes[3], None, u'NestedParentA',
u'NestedChildA.subchild_2', u'meth')
assert_refnode(refnodes[4], None, u'NestedParentA', u'child_2', u'meth')
assert_refnode(refnodes[5], False, u'NestedParentA', u'any_child', domain='')
assert_refnode(refnodes[6], None, u'NestedParentA', u'NestedChildA',
u'class')
assert_refnode(refnodes[7], None, u'NestedParentA.NestedChildA',
u'subchild_2', u'meth')
assert_refnode(refnodes[8], None, u'NestedParentA.NestedChildA',
u'NestedParentA.child_1', u'meth')
assert_refnode(refnodes[9], None, u'NestedParentA',
u'NestedChildA.subchild_1', u'meth')
assert_refnode(refnodes[10], None, u'NestedParentB', u'child_1', u'meth')
assert_refnode(refnodes[11], None, u'NestedParentB', u'NestedParentB',
u'class')
assert_refnode(refnodes[12], None, None, u'NestedParentA.NestedChildA',
u'class')
assert len(refnodes) == 13
doctree = app.env.get_doctree('module')
refnodes = list(doctree.traverse(addnodes.pending_xref))
assert_refnode(refnodes[0], 'module_a.submodule', None,
'ModTopLevel', 'class')
assert_refnode(refnodes[1], 'module_a.submodule', 'ModTopLevel',
'mod_child_1', 'meth')
assert_refnode(refnodes[2], 'module_a.submodule', 'ModTopLevel',
'ModTopLevel.mod_child_1', 'meth')
assert_refnode(refnodes[3], 'module_a.submodule', 'ModTopLevel',
'mod_child_2', 'meth')
assert_refnode(refnodes[4], 'module_a.submodule', 'ModTopLevel',
'module_a.submodule.ModTopLevel.mod_child_1', 'meth')
assert_refnode(refnodes[5], 'module_b.submodule', None,
'ModTopLevel', 'class')
assert_refnode(refnodes[6], 'module_b.submodule', 'ModTopLevel',
'ModNoModule', 'class')
assert_refnode(refnodes[7], False, False, 'int', 'obj')
assert_refnode(refnodes[8], False, False, 'tuple', 'obj')
assert_refnode(refnodes[9], False, False, 'str', 'obj')
assert_refnode(refnodes[10], False, False, 'float', 'obj')
assert len(refnodes) == 11
doctree = app.env.get_doctree('module_option')
refnodes = list(doctree.traverse(addnodes.pending_xref))
print(refnodes)
print(refnodes[0])
print(refnodes[1])
assert_refnode(refnodes[0], 'test.extra', 'B', 'foo', 'meth')
assert_refnode(refnodes[1], 'test.extra', 'B', 'foo', 'meth')
assert len(refnodes) == 2
@pytest.mark.sphinx('dummy', testroot='domain-py')
def test_domain_py_objects(app, status, warning):
app.builder.build_all()
modules = app.env.domains['py'].data['modules']
objects = app.env.domains['py'].data['objects']
assert 'module_a.submodule' in modules
assert 'module_a.submodule' in objects
assert 'module_b.submodule' in modules
assert 'module_b.submodule' in objects
assert objects['module_a.submodule.ModTopLevel'] == ('module', 'class')
assert objects['module_a.submodule.ModTopLevel.mod_child_1'] == ('module', 'method')
assert objects['module_a.submodule.ModTopLevel.mod_child_2'] == ('module', 'method')
assert 'ModTopLevel.ModNoModule' not in objects
assert objects['ModNoModule'] == ('module', 'class')
assert objects['module_b.submodule.ModTopLevel'] == ('module', 'class')
assert objects['TopLevel'] == ('roles', 'class')
assert objects['top_level'] == ('roles', 'method')
assert objects['NestedParentA'] == ('roles', 'class')
assert objects['NestedParentA.child_1'] == ('roles', 'method')
assert objects['NestedParentA.any_child'] == ('roles', 'method')
assert objects['NestedParentA.NestedChildA'] == ('roles', 'class')
assert objects['NestedParentA.NestedChildA.subchild_1'] == ('roles', 'method')
assert objects['NestedParentA.NestedChildA.subchild_2'] == ('roles', 'method')
assert objects['NestedParentA.child_2'] == ('roles', 'method')
assert objects['NestedParentB'] == ('roles', 'class')
assert objects['NestedParentB.child_1'] == ('roles', 'method')
@pytest.mark.sphinx('dummy', testroot='domain-py')
def test_domain_py_find_obj(app, status, warning):
def find_obj(modname, prefix, obj_name, obj_type, searchmode=0):
return app.env.domains['py'].find_obj(
app.env, modname, prefix, obj_name, obj_type, searchmode)
app.builder.build_all()
assert (find_obj(None, None, u'NONEXISTANT', u'class') ==
[])
assert (find_obj(None, None, u'NestedParentA', u'class') ==
[(u'NestedParentA', (u'roles', u'class'))])
assert (find_obj(None, None, u'NestedParentA.NestedChildA', u'class') ==
[(u'NestedParentA.NestedChildA', (u'roles', u'class'))])
assert (find_obj(None, 'NestedParentA', u'NestedChildA', u'class') ==
[(u'NestedParentA.NestedChildA', (u'roles', u'class'))])
assert (find_obj(None, None, u'NestedParentA.NestedChildA.subchild_1', u'meth') ==
[(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'method'))])
assert (find_obj(None, u'NestedParentA', u'NestedChildA.subchild_1', u'meth') ==
[(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'method'))])
assert (find_obj(None, u'NestedParentA.NestedChildA', u'subchild_1', u'meth') ==
[(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'method'))])
def test_get_full_qualified_name():
env = Mock(domaindata={})
domain = PythonDomain(env)
# non-python references
node = nodes.reference()
assert domain.get_full_qualified_name(node) is None
# simple reference
node = nodes.reference(reftarget='func')
assert domain.get_full_qualified_name(node) == 'func'
# with py:module context
kwargs = {'py:module': 'module1'}
node = nodes.reference(reftarget='func', **kwargs)
assert domain.get_full_qualified_name(node) == 'module1.func'
# with py:class context
kwargs = {'py:class': 'Class'}
node = nodes.reference(reftarget='func', **kwargs)
assert domain.get_full_qualified_name(node) == 'Class.func'
# with both py:module and py:class context
kwargs = {'py:module': 'module1', 'py:class': 'Class'}
node = nodes.reference(reftarget='func', **kwargs)
assert domain.get_full_qualified_name(node) == 'module1.Class.func'
| 41.35545
| 88
| 0.649897
|
ffdb43a1df2671f5ace274edff96d15196e2dfaf
| 1,071
|
py
|
Python
|
msmart/const.py
|
rokam/midea-msmart
|
41f957e330b9d679cbdedb4972efa88184e8c63f
|
[
"MIT"
] | 1
|
2021-11-26T09:19:05.000Z
|
2021-11-26T09:19:05.000Z
|
msmart/const.py
|
rokam/midea-msmart
|
41f957e330b9d679cbdedb4972efa88184e8c63f
|
[
"MIT"
] | null | null | null |
msmart/const.py
|
rokam/midea-msmart
|
41f957e330b9d679cbdedb4972efa88184e8c63f
|
[
"MIT"
] | null | null | null |
VERSION = '0.1.33'
BROADCAST_MSG = bytearray([
0x5a, 0x5a, 0x01, 0x11, 0x48, 0x00, 0x92, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7f, 0x75, 0xbd, 0x6b, 0x3e, 0x4f, 0x8b, 0x76,
0x2e, 0x84, 0x9c, 0x6e, 0x57, 0x8d, 0x65, 0x90,
0x03, 0x6e, 0x9d, 0x43, 0x42, 0xa5, 0x0f, 0x1f,
0x56, 0x9e, 0xb8, 0xec, 0x91, 0x8e, 0x92, 0xe5
])
DEVICE_INFO_MSG = bytearray([
0x5a, 0x5a, 0x15, 0x00, 0x00, 0x38, 0x00, 0x04,
0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0x33, 0x05,
0x13, 0x06, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x03, 0xe8, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xca, 0x8d, 0x9b, 0xf9, 0xa0, 0x30, 0x1a, 0xe3,
0xb7, 0xe4, 0x2d, 0x53, 0x49, 0x47, 0x62, 0xbe
])
MSGTYPE_HANDSHAKE_REQUEST = 0x0
MSGTYPE_HANDSHAKE_RESPONSE = 0x1
MSGTYPE_ENCRYPTED_RESPONSE = 0x3
MSGTYPE_ENCRYPTED_REQUEST = 0x6
MSGTYPE_TRANSPARENT = 0xf
| 36.931034
| 51
| 0.658263
|
1e9581e034bf7359333bd0ed25edff6997802ad5
| 2,182
|
py
|
Python
|
challange_accepted.py
|
tborisova/hackfmi4
|
5e9fd056c09d4e36e24a04a3eb70d480aa6a664d
|
[
"MIT"
] | 1
|
2017-01-29T21:39:50.000Z
|
2017-01-29T21:39:50.000Z
|
challange_accepted.py
|
tborisova/hackfmi4
|
5e9fd056c09d4e36e24a04a3eb70d480aa6a664d
|
[
"MIT"
] | null | null | null |
challange_accepted.py
|
tborisova/hackfmi4
|
5e9fd056c09d4e36e24a04a3eb70d480aa6a664d
|
[
"MIT"
] | null | null | null |
import socket
import sys
import pygame
pygame.init()
from hardcoded import Layout
import images
from client import *
from parent_server import GameServer
FPS = 60
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
def check_for_internet_conection():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('8.8.8.8', 0))
return s.getsockname()[0]
except OSError:
return None
def init_graphics():
logo = pygame.transform.scale(images.images["logo"], (32, 32))
pygame.display.set_icon(logo)
pygame.display.set_caption("Challenge Accepted", "Challenge Accepted")
# if __name__ == "__main__":
#
# s = GameServer(localaddr=('10.0.201.111', 22022))
# s.current_game = kick_ball.Game(5)
# s.Launch()
# host = '10.0.201.111'
# port = 22022
# c = Client(host, int(port))
ingame = False
LAYOUTS = Layout.load_layouts()
current_left_layout = "start"
current_right_layout = None
def start_conntrol():
if LAYOUTS[current_left_layout].connect_to_server_button.clicked:
globals()["current_right_layout"] = "connect_to_server"
if LAYOUTS[current_left_layout].connect_to_server_button.clicked:
globals()["current_right_layout"] = "create_server"
if LAYOUTS[current_left_layout].connect_to_server_button.clicked:
sys.exit()
def start_server_controll():
if LAYOUTS[current_left_layout].connect_to_server_button.clicked:
globals()["current_left_layout"] = "game"
globals()["current_right_layout"] = None
if __name__ == "__main__":
init_graphics()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
while True:
events = pygame.event.get()
if ingame:
continue
else:
screen.fill((55, 155, 255))
if current_left_layout is not None:
LAYOUTS[current_left_layout].update_elements(events)
LAYOUTS[current_left_layout].draw(screen)
if current_right_layout is not None:
LAYOUTS[current_right_layout].update_elements(events)
LAYOUTS[current_right_layout].draw(srceen)
pygame.display.update()
# c.Loop()
# sleep(0.01)
| 28.337662
| 74
| 0.682401
|
92adb85a4eb3efaf7dd6076d356a071ec17c7036
| 26,816
|
py
|
Python
|
deepmars/models/train_model_sys.py
|
utplanets/deepmars
|
ba306aa9b25b654636b61cf952af2791b7ed0e56
|
[
"MIT"
] | 2
|
2021-08-08T03:06:58.000Z
|
2021-11-25T04:06:00.000Z
|
deepmars/models/train_model_sys.py
|
utplanets/deepmars
|
ba306aa9b25b654636b61cf952af2791b7ed0e56
|
[
"MIT"
] | null | null | null |
deepmars/models/train_model_sys.py
|
utplanets/deepmars
|
ba306aa9b25b654636b61cf952af2791b7ed0e56
|
[
"MIT"
] | 2
|
2020-11-23T09:38:26.000Z
|
2021-02-26T01:14:28.000Z
|
#!/usr/bin/env python
"""Convolutional Neural Network Training Functions
Functions for building and training a (UNET) Convolutional Neural Network on
images of the Mars and binary ring targets.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import h5py
from keras.models import Model
from keras.layers.core import Dropout, Reshape
from keras.regularizers import l2
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras import backend as K
import deepmars.features.template_match_target as tmt
import deepmars.utils.processing as proc
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import os
from joblib import Parallel, delayed
from tqdm import tqdm, trange
# Check Keras version - code will switch API if needed.
from keras import __version__ as keras_version
K.set_image_dim_ordering('tf')
k2 = True if keras_version[0] == '2' else False
# If Keras is v2.x.x, create Keras 1-syntax wrappers.
if not k2:
from keras.models import load_model
from keras.layers import merge, Input
from keras.layers.convolutional import (Convolution2D, MaxPooling2D,
UpSampling2D)
else:
from keras.models import load_model
from keras.layers import Concatenate, Input
from keras.layers.convolutional import (Conv2D, MaxPooling2D,
UpSampling2D)
def merge(layers, mode=None, concat_axis=None):
"""Wrapper for Keras 2's Concatenate class (`mode` is discarded)."""
return Concatenate(axis=concat_axis)(list(layers))
def Convolution2D(n_filters, FL, FLredundant, activation=None,
init=None, W_regularizer=None, border_mode=None):
"""Wrapper for Keras 2's Conv2D class."""
return Conv2D(n_filters, FL, activation=activation,
kernel_initializer=init,
kernel_regularizer=W_regularizer,
padding=border_mode)
minrad_ = 5
maxrad_ = 40
longlat_thresh2_ = 1.8
rad_thresh_ = 1.0
template_thresh_ = 0.5
target_thresh_ = 0.1
@click.group()
def dl():
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
import sys
sys.path.append(os.getenv("DM_ROOTDIR"))
pass
########################
def get_param_i(param, i):
"""Gets correct parameter for iteration i.
Parameters
----------
param : list
List of model hyperparameters to be iterated over.
i : integer
Hyperparameter iteration.
Returns
-------
Correct hyperparameter for iteration i.
"""
if len(param) > i:
return param[i]
else:
return param[0]
########################
def custom_image_generator(data, target, batch_size=32):
"""Custom image generator that manipulates image/target pairs to prevent
overfitting in the Convolutional Neural Network.
Parameters
----------
data : array
Input images.
target : array
Target images.
batch_size : int, optional
Batch size for image manipulation.
Yields
------
Manipulated images and targets.
"""
D, L, W = data.shape[0], data[0].shape[0], data[0].shape[1]
while True:
shuffle_index = np.arange(D)
# only shuffle once each loop through the data
np.random.shuffle(shuffle_index)
for i in np.arange(0, len(data), batch_size):
index = shuffle_index[i:i + batch_size]
d, t = data[index].copy(), target[index].copy()
# Random color inversion
# for j in np.where(np.random.randint(0, 2, batch_size) == 1)[0]:
# d[j][d[j] > 0.] = 1. - d[j][d[j] > 0.]
# Horizontal/vertical flips
for j in np.where(np.random.randint(0, 2, batch_size) == 1)[0]:
d[j], t[j] = np.fliplr(d[j]), np.fliplr(t[j]) # left/right
for j in np.where(np.random.randint(0, 2, batch_size) == 1)[0]:
d[j], t[j] = np.flipud(d[j]), np.flipud(t[j]) # up/down
# Random up/down & left/right pixel shifts, 90 degree rotations
npix = 15
# Horizontal shift
h = np.random.randint(-npix, npix + 1, batch_size)
# Vertical shift
v = np.random.randint(-npix, npix + 1, batch_size)
# 90 degree rotations
r = np.random.randint(0, 4, batch_size)
for j in range(batch_size):
d[j] = np.pad(d[j], ((npix, npix), (npix, npix), (0, 0)),
mode='constant')[npix + h[j]:L + h[j] + npix,
npix + v[j]:W + v[j] + npix, :]
sh, sv = slice(npix + h[j], L + h[j] + npix),\
slice(npix + v[j], W + v[j] + npix)
t[j] = np.pad(t[j], (npix,), mode='constant')[sh, sv]
d[j], t[j] = np.rot90(d[j], r[j]), np.rot90(t[j], r[j])
yield (d, t)
def t2c(pred, csv, i,
minrad=minrad_,
maxrad=maxrad_,
longlat_thresh2=longlat_thresh2_,
rad_thresh=rad_thresh_,
template_thresh=template_thresh_,
target_thresh=target_thresh_):
return np.hstack([i,
tmt.template_match_t2c(pred, csv,
minrad=minrad,
maxrad=maxrad
longlat_thresh2=longlat_thresh2,
rad_thresh=rad_thresh,
template_thresh=template_thresh,
target_thresh=target_thresh)])
def diagnostic(res, beta):
"""Calculate the metrics from the predictions compared to the CSV.
Parameters
------------
res: list of results containing:
image number, number of matched, number of existing craters, number of
detected craters, maximum radius detected, mean error in longitude,
mean error in latitude, mean error in radius, fraction of duplicates
in detections.
beta : int
Beta value when calculating F-beta score.
Returns
-------
dictionary : metrics stored in a dictionary
"""
counter, N_match, N_csv, N_detect,\
mrad, err_lo, err_la, err_r, frac_duplicates = np.array(res).T
w = np.where(N_match == 0)
w = np.where(N_match > 0)
counter, N_match, N_csv, N_detect,\
mrad, err_lo, err_la, errr_, frac_dupes =\
counter[w], N_match[w], N_csv[w], N_detect[w],\
mrad[w], err_lo[w], err_la[w], err_r[w], frac_duplicates[w]
precision = N_match / (N_match + (N_detect - N_match))
recall = N_match / N_csv
fscore = (1 + beta**2) * (recall * precision) / \
(precision * beta**2 + recall)
diff = N_detect - N_match
frac_new = diff / (N_detect + diff)
frac_new2 = diff / (N_csv + diff)
frac_duplicates = frac_dupes
return dict(precision=precision,
recall=recall,
fscore=fscore,
frac_new=frac_new,
frac_new2=frac_new2,
err_lo=err_lo,
err_la=err_la,
err_r=err_r,
frac_duplicates=frac_duplicates,
maxrad=mrad,
counter=counter, N_match=N_match, N_csv=N_csv)
def get_metrics(data, craters_images, dim, model, name, beta=1, offset=0,
minrad=minrad_, maxrad=maxrad_,
longlat_thresh2=longlat_thresh2_,
rad_thresh=rad_thresh_, template_thresh=template_thresh_,
target_thresh=target_thresh_, rmv_oor_csvs=0):
"""Function that prints pertinent metrics at the end of each epoch.
Parameters
----------
data : hdf5
Input images.
craters : hdf5
Pandas arrays of human-counted crater data.
dim : int
Dimension of input images (assumes square).
model : keras model object
Keras model
beta : int, optional
Beta value when calculating F-beta score. Defaults to 1.
"""
X, Y = data[0], data[1]
craters, images = craters_images
# Get csvs of human-counted craters
csvs = []
# minrad, maxrad = 3, 50
cutrad, n_csvs = 0.8, len(X)
diam = 'Diameter (pix)'
for i in range(len(X)):
imname = images[i] # name = "img_{0:05d}".format(i)
found = False
for crat in craters:
if imname in crat:
csv = crat[imname]
found = True
if not found:
csvs.append([-2])
continue
# remove small/large/half craters
csv = csv[(csv[diam] < 2 * maxrad) & (csv[diam] > 2 * minrad)]
csv = csv[(csv['x'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['y'] + cutrad * csv[diam] / 2 <= dim)]
csv = csv[(csv['x'] - cutrad * csv[diam] / 2 > 0)]
csv = csv[(csv['y'] - cutrad * csv[diam] / 2 > 0)]
if len(csv) < 3: # Exclude csvs with few craters
csvs.append([-1])
else:
csv_coords = np.asarray((csv['x'], csv['y'], csv[diam] / 2)).T
csvs.append(csv_coords)
# Calculate custom metrics
print("csvs: {}".format(len(csvs)))
print("")
print("*********Custom Loss*********")
recall, precision, fscore = [], [], []
frac_new, frac_new2, mrad = [], [], []
err_lo, err_la, err_r = [], [], []
frac_duplicates = []
if isinstance(model, Model):
preds = None
# print(X[6].min(),X[6].max(),X.dtype,np.percentile(X[6],99))
preds = model.predict(X, verbose=1)
# save
h5f = h5py.File("predictions.hdf5", 'w')
h5f.create_dataset(name, data=preds)
print("Successfully generated and saved model predictions.")
else:
preds = model
# print(csvs)
countme = [i for i in range(n_csvs) if len(csvs[i]) >= 3]
print("Processing {} fields".format(len(countme)))
# preds contains a large number of predictions,
# so we run the template code in parallel.
res = Parallel(n_jobs=24,
verbose=5)(delayed(t2c)(preds[i], csvs[i], i,
minrad=minrad,
maxrad=maxrad,
longlat_thresh2=longlat_thresh2,
rad_thresh=rad_thresh,
template_thresh=template_thresh,
target_thresh=target_thresh)
for i in range(n_csvs) if len(csvs[i]) >= 3)
if len(res) == 0:
print("No valid results: ", res)
return None
# At this point we've processed the predictions with the template matching
# algorithm, now calculate the metrics from the data.
diag = diagnostic(res, beta)
print(len(diag["recall"]))
# print("binary XE score = %f" % model.evaluate(X, Y))
if len(diag["recall"]) > 3:
metric_data = [("N_match/N_csv (recall)", diag["recall"]),
("N_match/(N_match + (N_detect-N_match)) (precision)",
diag["precision"]),
("F_{} score".format(beta), diag["fscore"]),
("(N_detect - N_match)/N_detect" +
"(fraction of craters that are new)",
diag["frac_new"]),
("(N_detect - N_match)/N_csv (fraction" +
"of craters that are new, 2)", diag["frac_new2"])]
for fname, data in metric_data:
print("mean and std of %s = %f, %f" %
(fname, np.mean(data), np.std(data)))
for fname, data in [("fractional longitude diff", diag["err_lo"]),
("fractional latitude diff", diag["err_la"]),
("fractional radius diff", diag["err_r"]),
]:
print("median and IQR %s = %f, 25:%f, 75:%f" %
(fname,
np.median(data),
np.percentile(data, 25),
np.percentile(data, 75)))
print("""mean and std of maximum detected pixel radius in an image =
%f, %f""" % (np.mean(diag["maxrad"]), np.std(diag["maxrad"])))
print("""absolute maximum detected pixel radius over all images =
%f""" % np.max(diag["maxrad"]))
print("")
return diag
########################
def build_model(dim, learn_rate, lmbda, drop, FL, init, n_filters):
"""Function that builds the (UNET) convolutional neural network.
Parameters
----------
dim : int
Dimension of input images (assumes square).
learn_rate : float
Learning rate.
lmbda : float
Convolution2D regularization parameter.
drop : float
Dropout fraction.
FL : int
Filter length.
init : string
Weight initialization type.
n_filters : int
Number of filters in each layer.
Returns
-------
model : keras model object
Constructed Keras model.
"""
print('Making UNET model...')
img_input = Input(batch_shape=(None, dim, dim, 1))
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(img_input)
a1 = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1)
a1P = MaxPooling2D((2, 2), strides=(2, 2))(a1)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a1P)
a2 = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2)
a2P = MaxPooling2D((2, 2), strides=(2, 2))(a2)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a2P)
a3 = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3)
a3P = MaxPooling2D((2, 2), strides=(2, 2),)(a3)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(a3P)
u = Convolution2D(n_filters * 4, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a3, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters * 2, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a2, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = UpSampling2D((2, 2))(u)
u = merge((a1, u), mode='concat', concat_axis=3)
u = Dropout(drop)(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Convolution2D(n_filters, FL, FL, activation='relu', init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
# Final output
final_activation = 'sigmoid'
u = Convolution2D(1, 1, 1, activation=final_activation, init=init,
W_regularizer=l2(lmbda), border_mode='same')(u)
u = Reshape((dim, dim))(u)
if k2:
model = Model(inputs=img_input, outputs=u)
else:
model = Model(input=img_input, output=u)
optimizer = Adam(lr=learn_rate)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
print(model.summary())
return model
########################
def test_model(Data, Craters, MP, i_MP):
# Static params
dim, nb_epoch, bs = MP['dim'], MP['epochs'], MP['bs']
# Iterating params
FL = get_param_i(MP['filter_length'], i_MP)
learn_rate = get_param_i(MP['lr'], i_MP)
n_filters = get_param_i(MP['n_filters'], i_MP)
init = get_param_i(MP['init'], i_MP)
lmbda = get_param_i(MP['lambda'], i_MP)
drop = get_param_i(MP['dropout'], i_MP)
model = load_model(MP["model"])
get_metrics(Data[MP["test_dataset"]],
Craters[MP["test_dataset"]], dim, model, MP["test_dataset"])
def train_and_test_model(Data, Craters, MP, i_MP):
"""Function that trains, tests and saves the model, printing out metrics
after each model.
Parameters
----------
Data : dict
Inputs and Target Moon data.
Craters : dict
Human-counted crater data.
MP : dict
Contains all relevant parameters.
i_MP : int
Iteration number (when iterating over hypers).
"""
# Static params
dim, nb_epoch, bs = MP['dim'], MP['epochs'], MP['bs']
# Iterating params
FL = get_param_i(MP['filter_length'], i_MP)
learn_rate = get_param_i(MP['lr'], i_MP)
n_filters = get_param_i(MP['n_filters'], i_MP)
init = get_param_i(MP['init'], i_MP)
lmbda = get_param_i(MP['lambda'], i_MP)
drop = get_param_i(MP['dropout'], i_MP)
# Build model
if MP["model"] is not None:
model = load_model(MP["model"])
else:
model = build_model(dim, learn_rate, lmbda, drop, FL, init, n_filters)
# Main loop
n_samples = MP['n_train']
for nb in range(nb_epoch):
if k2:
model.fit_generator(
custom_image_generator(Data['train'][0], Data['train'][1],
batch_size=bs),
steps_per_epoch=n_samples / bs, epochs=1, verbose=1,
# validation_data=(Data['dev'][0],Data['dev'][1]), #no gen
validation_data=custom_image_generator(Data['dev'][0],
Data['dev'][1],
batch_size=bs),
validation_steps=MP['n_dev'] / bs,
callbacks=[
EarlyStopping(monitor='val_loss', patience=3, verbose=0)])
else:
model.fit_generator(
custom_image_generator(Data['train'][0], Data['train'][1],
batch_size=bs),
samples_per_epoch=n_samples, nb_epoch=1, verbose=1,
# validation_data=(Data['dev'][0],Data['dev'][1]), #no gen
validation_data=custom_image_generator(Data['dev'][0],
Data['dev'][1],
batch_size=bs),
nb_val_samples=n_samples,
callbacks=[
EarlyStopping(monitor='val_loss', patience=3, verbose=0)])
suffix = "{}_{}_{}_{}_{}_{}_{}.hdf5".format(learn_rate,
n_filters,
init,
lmbda,
drop,
nb,
nb_epoch)
model_save_name = os.path.join(MP["save_dir"],
"model_".format(suffix))
if MP['save_models']:
model.save(model_save_name)
if MP["calculate_custom_loss"]:
get_metrics(Data['dev'], Craters['dev'], dim, model, "dev")
if MP["save_models"] == 1:
model.save(os.path.join(MP["save_dir"], MP["final_save_name"]))
print("###################################")
print("##########END_OF_RUN_INFO##########")
print("""learning_rate=%e, batch_size=%d, filter_length=%e, n_epoch=%d
n_train=%d, img_dimensions=%d, init=%s, n_filters=%d, lambda=%e
dropout=%f""" % (learn_rate, bs, FL, nb_epoch, MP['n_train'],
MP['dim'], init, n_filters, lmbda, drop))
if MP["calculate_custom_loss"]:
get_metrics(Data['test'], Craters['test'], dim, model, "test")
print("###################################")
print("###################################")
########################
def get_models(MP):
"""Top-level function that loads data files and calls train_and_test_model.
Parameters
----------
MP : dict
Model Parameters.
"""
dir = MP['dir']
n_train, n_dev, n_test = MP['n_train'], MP['n_dev'], MP['n_test']
# Load data
def load_files(numbers, test, this_dataset):
res0 = []
res1 = []
files = []
craters = []
images = []
npic = 0
if not test or (test and this_dataset):
for n in tqdm(numbers):
files.append(h5py.File(os.path.join(
dir, "sys_images_{0:05d}.hdf5".format(n)), 'r'))
images.extend(["img_{0:05d}".format(a)
for a in np.arange(n, n + 1000)])
res0.append(files[-1]["input_images"][:].astype('float32'))
npic = npic + len(res0[-1])
res1.append(files[-1]["target_masks"][:].astype('float32'))
files[-1].close()
craters.append(pd.HDFStore(os.path.join(
dir, "sys_craters_{0:05d}.hdf5".format(n)), 'r'))
res0 = np.vstack(res0)
res1 = np.vstack(res1)
return files, res0, res1, npic, craters, images
train_files,\
train0,\
train1,\
Ntrain,\
train_craters,\
train_images = load_files(MP["train_indices"],
MP["test"],
MP["test_dataset"] == "train")
print(Ntrain, n_train)
dev_files,\
dev0,\
dev1,\
Ndev,\
dev_craters,\
dev_images = load_files(MP["dev_indices"],
MP["test"],
MP["test_dataset"] == "dev")
print(Ndev, n_dev)
test_files,\
test0,\
test1,\
Ntest,\
test_craters,\
test_images = load_files(MP["test_indices"],
MP["test"],
MP["test_dataset"] == "test")
print(Ntest, n_test)
Data = {
"train": [train0, train1],
"dev": [dev0, dev1],
"test": [test0[:n_test], test1[:n_test]]
}
# Rescale, normalize, add extra dim
proc.preprocess(Data)
# Load ground-truth craters
Craters = {
'train': [train_craters, train_images],
'dev': [dev_craters, dev_images],
'test': [test_craters, test_images]
}
# Iterate over parameters
if MP["test"]:
test_model(Data, Craters, MP, 0)
return
else:
for i in range(MP['N_runs']):
train_and_test_model(Data, Craters, MP, i)
@dl.command()
@click.option("--test", is_flag=True, default=False)
@click.option("--test_dataset", default="dev")
@click.option("--model", default=None)
def train_model(test, test_dataset, model):
"""Run Convolutional Neural Network Training
Execute the training of a (UNET) Convolutional Neural Network on
images of the Moon and binary ring targets.
"""
# Model Parameters
MP = {}
# Directory of train/dev/test image and crater hdf5 files.
MP['dir'] = os.path.join(os.getenv("DM_ROOTDIR"), 'data/processed/')
# Image width/height, assuming square images.
MP['dim'] = 256
# Batch size: smaller values = less memory, less accurate gradient estimate
MP['bs'] = 10
# Number of training epochs.
MP['epochs'] = 30
# Number of train/valid/test samples, needs to be a multiple of batch size.
# sample every even numbered image file to use in the training,
# half of the odd number for testing.
# half of the odd numbers for validataion.
MP['train_indices'] = list(np.arange(162000, 208000, 2000))
MP['dev_indices'] = list(np.arange(161000, 206000, 4000))
MP['test_indices'] = list(np.arange(163000, 206000, 4000))
# MP['test_indices'] = 90000#list(np.arange(10000,184000,8000))
MP['n_train'] = len(MP["train_indices"]) * 1000
MP['n_dev'] = len(MP["dev_indices"]) * 1000
MP['n_test'] = len(MP["test_indices"]) * 1000
print(MP["n_train"], MP["n_dev"], MP["n_test"])
# Save model (binary flag) and directory.
MP['save_models'] = 1
MP["calculate_custom_loss"] = False
MP['save_dir'] = 'models'
MP['final_save_name'] = 'model.h5'
# initial model
MP["model"] = model
# testing only
MP["test"] = test
MP["test_dataset"] = test_dataset
# Model Parameters (to potentially iterate over, keep in lists).
# runs.csv looks like
# filter_length,lr,n_filters,init,lambda,dropout
# 3,0.0001,112,he_normal,1e-6,0.15
#
# each line is a new run.
df = pd.read_csv("runs.csv")
for na, ty in [("filter_length", int),
("lr", float),
("n_filters", int),
("init", str),
("lambda", float),
("dropout", float)]:
MP[na] = df[na].astype(ty).values
MP['N_runs'] = len(MP['lambda']) # Number of runs
MP['filter_length'] = [3] # Filter length
# MP['lr'] = [0.0001] # Learning rate
# MP['n_filters'] = [112] # Number of filters
# MP['init'] = ['he_normal'] # Weight initialization
# MP['lambda'] = [1e-6] # Weight regularization
# MP['dropout'] = [0.15] # Dropout fraction
# Iterating over parameters example.
# MP['N_runs'] = 2
# MP['lambda']=[1e-4,1e-4]
print(MP)
get_models(MP)
if __name__ == '__main__':
dl()
| 36.237838
| 79
| 0.545085
|
43a210fbe85ce85f3fa711f9696ca943b58d01dd
| 3,667
|
py
|
Python
|
dendropy/test/support/dendropytest.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
dendropy/test/support/dendropytest.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
dendropy/test/support/dendropytest.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
#! /usr/bin/env python
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Extension to the basic unittest TestCase.
"""
import collections
import sys
import re
import os
import unittest
from distutils.util import strtobool
from dendropy.utility import metavar
from dendropy.utility import messaging
# Defining this here means that unittest will exclude all lines from this
# module in the traceback report when an assertion fails, allowing
# for the starting point of the traceback to be the point where the assertion
# was made, rather than the point where an exception was raised because
# the assertion was false.
__unittest = True
def discover_test_module_paths(filter_patterns=None):
"""
Discovers test modules. If ``filter_patterns`` is |None|, then
all files in *immediate* directory that begin with 'test' will
be added to the set returned. If ``filter_patterns`` is not |None|, then it
should be a list of regular expression patterns, and only files that match
at least one of the patterns will be returned.
"""
test_module_pattern = re.compile("^test.*\.py$", re.IGNORECASE)
if filter_patterns:
filter_pattern = re.compile("(" + r"\|".join(filter_patterns) + ")")
else:
filter_pattern = None
path = os.path.dirname(os.path.dirname(__file__))
filenames = os.listdir(path)
test_modules = []
for filename in filenames:
if test_module_pattern.match(filename):
if filter_pattern is None or filter_pattern.match(filename):
# test_modules.append("" + os.path.splitext(filename)[0])
test_modules.append("dendropy.test." + os.path.splitext(filename)[0])
return test_modules
def get_test_suite(test_names=None):
"""
If ``test_names`` is not |None|, creates a test suite out of those
modules. Otherwise, creates a test suite from all of the modules in
``dendropy.test`` using the discovery.
"""
if test_names is None:
test_names = discover_test_module_paths()
tests = unittest.defaultTestLoader.loadTestsFromNames(test_names)
return unittest.TestSuite(tests)
class ExtendedTestCase(unittest.TestCase):
"""
Extends unittest.TestCase with various new assertion tests.
"""
def _get_logger(self):
if not hasattr(self, "_logger") or self._logger is None:
self._logger = messaging.get_logger(self.__class__.__name__)
return self._logger
def _set_logger(self, logger):
self._logger = logger
logger = property(_get_logger, _set_logger)
def assertCountEqual(self, *args, **kwargs):
if sys.hexversion >= 0x03020000:
super(ExtendedTestCase, self).assertCountEqual(*args, **kwargs)
else:
self.assertEqual(collections.Counter(args[0]), collections.Counter(args[1]))
def fail_incomplete_tests(self):
return bool(strtobool(os.environ.get(metavar.FAIL_INCOMPLETE_TESTS_ENVAR, "0")))
def assertEqualUnorderedSequences(self, x1, x2):
c1 = collections.Counter(x1)
c2 = collections.Counter(x2)
return self.assertEqual(c1, c2)
| 37.040404
| 88
| 0.669757
|
9c2d102be62aa973cdae90e3136547104dd7fd91
| 2,427
|
py
|
Python
|
setup.py
|
virtuald/header2whatever
|
a838cbf3e232b41ea387274730251778a84d831f
|
[
"Apache-2.0"
] | 3
|
2018-07-04T12:36:56.000Z
|
2021-04-14T17:42:43.000Z
|
setup.py
|
virtuald/header2whatever
|
a838cbf3e232b41ea387274730251778a84d831f
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
virtuald/header2whatever
|
a838cbf3e232b41ea387274730251778a84d831f
|
[
"Apache-2.0"
] | 1
|
2021-12-31T07:23:18.000Z
|
2021-12-31T07:23:18.000Z
|
from __future__ import print_function
from os.path import dirname, exists, join
import sys, subprocess
from setuptools import setup
setup_dir = dirname(__file__)
git_dir = join(setup_dir, '.git')
base_package = 'header2whatever'
version_file = join(setup_dir, base_package, 'version.py')
# Automatically generate a version.py based on the git version
if exists(git_dir):
p = subprocess.Popen(["git", "describe", "--tags", "--long", "--dirty=-dirty"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
# Make sure the git version has at least one tag
if err:
print("Error: You need to create a tag for this repo to use the builder")
sys.exit(1)
# Convert git version to PEP440 compliant version
# - Older versions of pip choke on local identifiers, so we can't include the git commit
v, commits, local = out.decode('utf-8').rstrip().split('-', 2)
if commits != '0' or '-dirty' in local:
v = '%s.post0.dev%s' % (v, commits)
# Create the version.py file
with open(version_file, 'w') as fp:
fp.write("# Autogenerated by setup.py\n__version__ = '{0}'".format(v))
with open(version_file, 'r') as fp:
exec(fp.read(), globals())
with open(join(setup_dir, 'README.rst'), 'r') as readme_file:
long_description = readme_file.read()
setup(
name='header2whatever',
version=__version__,
description='Generate files from C/C++ headers using jinja2 templates',
long_description=long_description,
author='Dustin Spicuzza',
author_email='dustin@virtualroadside.com',
url='https://github.com/virtualroadside/header2whatever',
keywords='c++ cpp codegen generator header jinja2 template',
packages=[
base_package,
base_package + "/_pcpp",
base_package + "/_pcpp/ply/ply",
],
install_requires=[
'robotpy-cppheaderparser>=5.0.0',
'jinja2',
'pyyaml',
'schematics>=2.1.1',
],
license='Apache 2.0',
classifiers=[
'Operating System :: OS Independent',
'Environment :: Console',
'Programming Language :: Python',
'License :: OSI Approved :: Apache Software License'
],
entry_points = {
'console_scripts': [
'h2w = header2whatever.parse:main',
'h2w-batch = header2whatever.parse:batch'
]
}
)
| 32.797297
| 92
| 0.634116
|
cf6a4a88ff55f9e5dbd64e7ad1a40288f4ec8eb7
| 1,672
|
py
|
Python
|
config/wsgi.py
|
devnelmar/cornershop
|
f3af70577f28577fbd91e762df7388ff9aba54ee
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
devnelmar/cornershop
|
f3af70577f28577fbd91e762df7388ff9aba54ee
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
devnelmar/cornershop
|
f3af70577f28577fbd91e762df7388ff9aba54ee
|
[
"MIT"
] | null | null | null |
"""
WSGI config for cornershoop project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# cornershoop directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "cornershoop"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 42.871795
| 79
| 0.803828
|
35d67388ca65cdac5160d1f38b1d5565881975b1
| 2,774
|
py
|
Python
|
python_tutorial/settings.py
|
sunayaakula/outlookapp
|
84ad1c4293cae1d557ed79b105399221ab7a372c
|
[
"MIT"
] | null | null | null |
python_tutorial/settings.py
|
sunayaakula/outlookapp
|
84ad1c4293cae1d557ed79b105399221ab7a372c
|
[
"MIT"
] | 4
|
2020-06-06T01:27:26.000Z
|
2021-06-10T22:38:58.000Z
|
python_tutorial/settings.py
|
sunayaakula/outlookapp
|
84ad1c4293cae1d557ed79b105399221ab7a372c
|
[
"MIT"
] | null | null | null |
"""
Django settings for python_tutorial project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import django_heroku
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v@*)_*6xx)#t@4np043msmvg%^ez2p46ke#2*dtsf_bnvgxjuj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tutorial',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'python_tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'python_tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/assets/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'assets')
# Activate Django-Heroku.
django_heroku.settings(locals())
| 25.218182
| 72
| 0.702596
|
de192dd4b0ba4fcd927703f57e392efa24796a81
| 572
|
py
|
Python
|
tests/rpc/python/exn_test.py
|
omnisci3nce/ocaml-rpc
|
982f950ee7b6d792bd4ae097393d9bed545bef90
|
[
"ISC"
] | 76
|
2015-01-22T09:40:14.000Z
|
2022-03-30T19:44:53.000Z
|
tests/rpc/python/exn_test.py
|
omnisci3nce/ocaml-rpc
|
982f950ee7b6d792bd4ae097393d9bed545bef90
|
[
"ISC"
] | 106
|
2015-05-15T14:20:17.000Z
|
2022-03-09T11:36:53.000Z
|
tests/rpc/python/exn_test.py
|
omnisci3nce/ocaml-rpc
|
982f950ee7b6d792bd4ae097393d9bed545bef90
|
[
"ISC"
] | 26
|
2015-06-09T15:55:29.000Z
|
2022-02-04T12:29:32.000Z
|
"""
Tests that the exceptions are correctly generated.
"""
import bindings
if __name__ == "__main__":
try:
raise bindings.Error1("test")
except bindings.Error1 as exn:
pass
try:
raise bindings.Error2((4, True))
except bindings.Error2 as exn:
pass
try:
raise bindings.Error3((4, True, "error"))
except bindings.Error3 as exn:
pass
try:
raise bindings.Error2(("4", "True"))
except bindings.TypeError:
pass
except _:
raise Exception("Should have raised TypeError")
| 22
| 55
| 0.606643
|
b18680e12c75c6e6712b2b03cc985614e8283b20
| 10,814
|
py
|
Python
|
python/ray/tune/suggest/suggestion.py
|
noahshpak/ray
|
edd783bc327760a4892ab89222ee551e42df15b9
|
[
"Apache-2.0"
] | 1
|
2019-06-21T10:06:41.000Z
|
2019-06-21T10:06:41.000Z
|
python/ray/tune/suggest/suggestion.py
|
noahshpak/ray
|
edd783bc327760a4892ab89222ee551e42df15b9
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/suggest/suggestion.py
|
noahshpak/ray
|
edd783bc327760a4892ab89222ee551e42df15b9
|
[
"Apache-2.0"
] | null | null | null |
import copy
import glob
import logging
import os
from ray.util.debug import log_once
logger = logging.getLogger(__name__)
class Searcher:
"""Abstract class for wrapping suggesting algorithms.
Custom algorithms can extend this class easily by overriding the
`suggest` method provide generated parameters for the trials.
Any subclass that implements ``__init__`` must also call the
constructor of this class: ``super(Subclass, self).__init__(...)``.
To track suggestions and their corresponding evaluations, the method
`suggest` will be passed a trial_id, which will be used in
subsequent notifications.
Args:
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
.. code-block:: python
class ExampleSearch(Searcher):
def __init__(self, metric="mean_loss", mode="min", **kwargs):
super(ExampleSearch, self).__init__(
metric=metric, mode=mode, **kwargs)
self.optimizer = Optimizer()
self.configurations = {}
def suggest(self, trial_id):
configuration = self.optimizer.query()
self.configurations[trial_id] = configuration
def on_trial_complete(self, trial_id, result, **kwargs):
configuration = self.configurations[trial_id]
if result and self.metric in result:
self.optimizer.update(configuration, result[self.metric])
tune.run(trainable_function, search_alg=ExampleSearch())
"""
FINISHED = "FINISHED"
CKPT_FILE_TMPL = "searcher-state-{}.pkl"
def __init__(self,
metric="episode_reward_mean",
mode="max",
max_concurrent=None,
use_early_stopped_trials=None):
if use_early_stopped_trials is False:
raise DeprecationWarning(
"Early stopped trials are now always used. If this is a "
"problem, file an issue: https://github.com/ray-project/ray.")
if max_concurrent is not None:
logger.warning(
"DeprecationWarning: `max_concurrent` is deprecated for this "
"search algorithm. Use tune.suggest.ConcurrencyLimiter() "
"instead. This will raise an error in future versions of Ray.")
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
self._metric = metric
self._mode = mode
def on_trial_result(self, trial_id, result):
"""Optional notification for result during training.
Note that by default, the result dict may include NaNs or
may not include the optimization metric. It is up to the
subclass implementation to preprocess the result to
avoid breaking the optimization process.
Args:
trial_id (str): A unique string ID for the trial.
result (dict): Dictionary of metrics for current training progress.
Note that the result dict may include NaNs or
may not include the optimization metric. It is up to the
subclass implementation to preprocess the result to
avoid breaking the optimization process.
"""
pass
def on_trial_complete(self, trial_id, result=None, error=False):
"""Notification for the completion of trial.
Typically, this method is used for notifying the underlying
optimizer of the result.
Args:
trial_id (str): A unique string ID for the trial.
result (dict): Dictionary of metrics for current training progress.
Note that the result dict may include NaNs or
may not include the optimization metric. It is up to the
subclass implementation to preprocess the result to
avoid breaking the optimization process. Upon errors, this
may also be None.
error (bool): True if the training process raised an error.
"""
raise NotImplementedError
def suggest(self, trial_id):
"""Queries the algorithm to retrieve the next set of parameters.
Arguments:
trial_id (str): Trial ID used for subsequent notifications.
Returns:
dict | FINISHED | None: Configuration for a trial, if possible.
If FINISHED is returned, Tune will be notified that
no more suggestions/configurations will be provided.
If None is returned, Tune will skip the querying of the
searcher for this step.
"""
raise NotImplementedError
def save(self, checkpoint_path):
"""Save state to path for this search algorithm.
Args:
checkpoint_path (str): File where the search algorithm
state is saved. This path should be used later when
restoring from file.
Example:
.. code-block:: python
search_alg = Searcher(...)
analysis = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
name=self.experiment_name,
local_dir=self.tmpdir)
search_alg.save("./my_favorite_path.pkl")
.. versionchanged:: 0.8.7
Save is automatically called by `tune.run`. You can use
`restore_from_dir` to restore from an experiment directory
such as `~/ray_results/trainable`.
"""
raise NotImplementedError
def restore(self, checkpoint_path):
"""Restore state for this search algorithm
Args:
checkpoint_path (str): File where the search algorithm
state is saved. This path should be the same
as the one provided to "save".
Example:
.. code-block:: python
search_alg.save("./my_favorite_path.pkl")
search_alg2 = Searcher(...)
search_alg2 = ConcurrencyLimiter(search_alg2, 1)
search_alg2.restore(checkpoint_path)
tune.run(cost, num_samples=5, search_alg=search_alg2)
"""
raise NotImplementedError
def get_state(self):
raise NotImplementedError
def set_state(self, state):
raise NotImplementedError
def save_to_dir(self, checkpoint_dir, session_str="default"):
"""Automatically saves the given searcher to the checkpoint_dir.
This is automatically used by tune.run during a Tune job.
Args:
checkpoint_dir (str): Filepath to experiment dir.
session_str (str): Unique identifier of the current run
session.
"""
tmp_search_ckpt_path = os.path.join(checkpoint_dir,
".tmp_searcher_ckpt")
success = True
try:
self.save(tmp_search_ckpt_path)
except NotImplementedError:
if log_once("suggest:save_to_dir"):
logger.warning(
"save not implemented for Searcher. Skipping save.")
success = False
if success and os.path.exists(tmp_search_ckpt_path):
os.rename(
tmp_search_ckpt_path,
os.path.join(checkpoint_dir,
self.CKPT_FILE_TMPL.format(session_str)))
def restore_from_dir(self, checkpoint_dir):
"""Restores the state of a searcher from a given checkpoint_dir.
Typically, you should use this function to restore from an
experiment directory such as `~/ray_results/trainable`.
.. code-block:: python
experiment_1 = tune.run(
cost,
num_samples=5,
search_alg=search_alg,
verbose=0,
name=self.experiment_name,
local_dir="~/my_results")
search_alg2 = Searcher()
search_alg2.restore_from_dir(
os.path.join("~/my_results", self.experiment_name)
"""
pattern = self.CKPT_FILE_TMPL.format("*")
full_paths = glob.glob(os.path.join(checkpoint_dir, pattern))
if not full_paths:
raise RuntimeError(
"Searcher unable to find checkpoint in {}".format(
checkpoint_dir)) # TODO
most_recent_checkpoint = max(full_paths)
self.restore(most_recent_checkpoint)
@property
def metric(self):
"""The training result objective value attribute."""
return self._metric
@property
def mode(self):
"""Specifies if minimizing or maximizing the metric."""
return self._mode
class ConcurrencyLimiter(Searcher):
"""A wrapper algorithm for limiting the number of concurrent trials.
Args:
searcher (Searcher): Searcher object that the
ConcurrencyLimiter will manage.
Example:
.. code-block:: python
from ray.tune.suggest import ConcurrencyLimiter
search_alg = HyperOptSearch(metric="accuracy")
search_alg = ConcurrencyLimiter(search_alg, max_concurrent=2)
tune.run(trainable, search_alg=search_alg)
"""
def __init__(self, searcher, max_concurrent):
assert type(max_concurrent) is int and max_concurrent > 0
self.searcher = searcher
self.max_concurrent = max_concurrent
self.live_trials = set()
super(ConcurrencyLimiter, self).__init__(
metric=self.searcher.metric, mode=self.searcher.mode)
def suggest(self, trial_id):
assert trial_id not in self.live_trials, (
f"Trial ID {trial_id} must be unique: already found in set.")
if len(self.live_trials) >= self.max_concurrent:
logger.debug(
f"Not providing a suggestion for {trial_id} due to "
"concurrency limit: %s/%s.", len(self.live_trials),
self.max_concurrent)
return
suggestion = self.searcher.suggest(trial_id)
if suggestion not in (None, Searcher.FINISHED):
self.live_trials.add(trial_id)
return suggestion
def on_trial_complete(self, trial_id, result=None, error=False):
if trial_id not in self.live_trials:
return
else:
self.searcher.on_trial_complete(
trial_id, result=result, error=error)
self.live_trials.remove(trial_id)
def get_state(self):
state = self.__dict__.copy()
del state["searcher"]
return copy.deepcopy(state)
def set_state(self, state):
self.__dict__.update(state)
| 35.224756
| 79
| 0.610875
|
b143b5a013fb3c60db822057b92655667079a542
| 1,591
|
py
|
Python
|
mopidy/http/__init__.py
|
stekern/mopidy
|
94509cf70f2bd35ebb13e746dde135bfbe35ce0e
|
[
"Apache-2.0"
] | 2
|
2019-02-13T15:16:55.000Z
|
2019-02-18T08:47:29.000Z
|
mopidy/http/__init__.py
|
stekern/mopidy
|
94509cf70f2bd35ebb13e746dde135bfbe35ce0e
|
[
"Apache-2.0"
] | 40
|
2019-02-13T09:33:00.000Z
|
2019-02-19T13:21:12.000Z
|
mopidy/http/__init__.py
|
stekern/mopidy
|
94509cf70f2bd35ebb13e746dde135bfbe35ce0e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
import logging
import os
import mopidy
from mopidy import config as config_lib, exceptions, ext
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = 'Mopidy-HTTP'
ext_name = 'http'
version = mopidy.__version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config_lib.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['hostname'] = config_lib.Hostname()
schema['port'] = config_lib.Port()
schema['static_dir'] = config_lib.Deprecated()
schema['zeroconf'] = config_lib.String(optional=True)
schema['allowed_origins'] = config_lib.List(optional=True)
schema['csrf_protection'] = config_lib.Boolean(optional=True)
return schema
def validate_environment(self):
try:
import tornado.web # noqa
except ImportError as e:
raise exceptions.ExtensionError('tornado library not found', e)
def setup(self, registry):
from .actor import HttpFrontend
from .handlers import make_mopidy_app_factory
HttpFrontend.apps = registry['http:app']
HttpFrontend.statics = registry['http:static']
registry.add('frontend', HttpFrontend)
registry.add('http:app', {
'name': 'mopidy',
'factory': make_mopidy_app_factory(
registry['http:app'], registry['http:static']),
})
| 31.196078
| 75
| 0.660591
|
68ee01bc4c247867a691d991d3173ff429fac1d0
| 583
|
py
|
Python
|
lightly/data/__init__.py
|
umami-ware/lightly
|
5d70b34df7f784af249f9e9a6bfd6256756a877f
|
[
"MIT"
] | 1,515
|
2020-10-05T13:04:17.000Z
|
2022-03-31T16:14:55.000Z
|
lightly/data/__init__.py
|
umami-ware/lightly
|
5d70b34df7f784af249f9e9a6bfd6256756a877f
|
[
"MIT"
] | 628
|
2020-10-14T11:38:51.000Z
|
2022-03-31T14:40:54.000Z
|
lightly/data/__init__.py
|
umami-ware/lightly
|
5d70b34df7f784af249f9e9a6bfd6256756a877f
|
[
"MIT"
] | 108
|
2020-10-17T08:31:06.000Z
|
2022-03-20T16:44:22.000Z
|
"""The lightly.data module provides a dataset wrapper and collate functions. """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from lightly.data.dataset import LightlyDataset
from lightly.data.collate import BaseCollateFunction
from lightly.data.collate import ImageCollateFunction
from lightly.data.collate import SimCLRCollateFunction
from lightly.data.collate import MoCoCollateFunction
from lightly.data.collate import MultiCropCollateFunction
from lightly.data.collate import SwaVCollateFunction
from lightly.data.collate import imagenet_normalize
| 44.846154
| 80
| 0.850772
|
6ba65583c641ff148200ec76f98807a15f261116
| 7,140
|
py
|
Python
|
cryptoapis/model/add_tokens_to_existing_from_address_response_item_token_data_ethereum_erc20_token.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
cryptoapis/model/add_tokens_to_existing_from_address_response_item_token_data_ethereum_erc20_token.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
cryptoapis/model/add_tokens_to_existing_from_address_response_item_token_data_ethereum_erc20_token.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | 1
|
2021-07-21T03:35:18.000Z
|
2021-07-21T03:35:18.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class AddTokensToExistingFromAddressResponseItemTokenDataEthereumErc20Token(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'contract_address': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'contract_address': 'contractAddress', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, contract_address, *args, **kwargs): # noqa: E501
"""AddTokensToExistingFromAddressResponseItemTokenDataEthereumErc20Token - a model defined in OpenAPI
Args:
contract_address (str): Token contract address to be transferred
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.contract_address = contract_address
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 41.754386
| 484
| 0.608543
|
1ecd80d34344c3419b94e557c344f05202b12dc0
| 586
|
py
|
Python
|
agent/src/agent/pipeline/config/stages/jdbc_offset.py
|
eacherkan-aternity/daria
|
7c77a2f52c09c852017b16949a848fa51f0fb579
|
[
"Apache-2.0"
] | null | null | null |
agent/src/agent/pipeline/config/stages/jdbc_offset.py
|
eacherkan-aternity/daria
|
7c77a2f52c09c852017b16949a848fa51f0fb579
|
[
"Apache-2.0"
] | null | null | null |
agent/src/agent/pipeline/config/stages/jdbc_offset.py
|
eacherkan-aternity/daria
|
7c77a2f52c09c852017b16949a848fa51f0fb579
|
[
"Apache-2.0"
] | null | null | null |
from .base import Stage
class JDBCScript(Stage):
JYTHON_SCRIPT = 'jdbc.py'
def _get_config(self) -> dict:
with open(self.get_jython_file_path()) as f:
return {
'scriptConf.params': [
{'key': 'INITIAL_OFFSET', 'value': self.get_initial_timestamp().strftime('%d/%m/%Y %H:%M')},
{'key': 'INTERVAL_IN_SECONDS', 'value': str(self.pipeline.interval)},
{'key': 'DELAY_IN_SECONDS', 'value': str(self.pipeline.delay)},
],
'script': f.read(),
}
| 34.470588
| 112
| 0.515358
|
11c44c7c4b71277486e40cd1790d44b511563ff3
| 4,403
|
py
|
Python
|
pkg/Python27/Lib/ctypes/test/test_arrays.py
|
jkolokotronis/ds_mod_tools
|
d9fd4def34f6adfd0e2b176d0a9bf2a3dfd43f93
|
[
"MIT"
] | 1
|
2021-05-19T16:14:23.000Z
|
2021-05-19T16:14:23.000Z
|
pkg/Python27/Lib/ctypes/test/test_arrays.py
|
jkolokotronis/ds_mod_tools
|
d9fd4def34f6adfd0e2b176d0a9bf2a3dfd43f93
|
[
"MIT"
] | null | null | null |
pkg/Python27/Lib/ctypes/test/test_arrays.py
|
jkolokotronis/ds_mod_tools
|
d9fd4def34f6adfd0e2b176d0a9bf2a3dfd43f93
|
[
"MIT"
] | 2
|
2020-04-30T17:34:06.000Z
|
2020-09-03T23:54:39.000Z
|
import unittest
from ctypes import *
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = range(15, 25)
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = range(42, 42+alen)
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray("a", "b", "c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], "a")
self.assertEqual(ca[1], "b")
self.assertEqual(ca[2], "c")
self.assertEqual(ca[-3], "a")
self.assertEqual(ca[-2], "b")
self.assertEqual(ca[-1], "c")
self.assertEqual(len(ca), 3)
# slicing is now supported, but not extended slicing (3-argument)!
from operator import getslice, delitem
self.assertRaises(TypeError, getslice, ca, 0, 1, -1)
# cannot delete items
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertTrue(not ARRAY(c_int, 3) is ARRAY(c_int, 4))
self.assertTrue(ARRAY(c_int, 3) is ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer("foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
try:
create_unicode_buffer
except NameError:
pass
else:
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertTrue(t1 is t2)
if __name__ == '__main__':
unittest.main()
| 32.614815
| 75
| 0.545083
|
9358c4fd07111f7adfbf60241727215f978b2a36
| 10,428
|
py
|
Python
|
vendor/github.com/tensorflow/tensorflow/tensorflow/python/eager/function_argument_naming_test.py
|
owennewo/kfserving
|
89f73c87525b8e06ea799f69f2979c4ad272fcb3
|
[
"Apache-2.0"
] | 5
|
2019-01-13T16:15:25.000Z
|
2019-07-07T16:17:32.000Z
|
vendor/github.com/tensorflow/tensorflow/tensorflow/python/eager/function_argument_naming_test.py
|
owennewo/kfserving
|
89f73c87525b8e06ea799f69f2979c4ad272fcb3
|
[
"Apache-2.0"
] | 13
|
2020-11-13T18:53:29.000Z
|
2022-03-12T00:33:00.000Z
|
vendor/github.com/tensorflow/tensorflow/tensorflow/python/eager/function_argument_naming_test.py
|
owennewo/kfserving
|
89f73c87525b8e06ea799f69f2979c4ad272fcb3
|
[
"Apache-2.0"
] | 2
|
2019-12-17T09:27:07.000Z
|
2020-05-24T13:09:49.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@parameterized.named_parameters(
dict(testcase_name='Defun', function_decorator=function.defun),
dict(testcase_name='DefFunction', function_decorator=def_function.function))
class ArgumentNamingTests(test.TestCase, parameterized.TestCase):
"""Tests for recognizable export signatures from concrete functions."""
def testBasic(self, function_decorator):
@function_decorator
def fn(a, b):
return a + b, a * b
# Call the function to make def_function happy
fn(array_ops.ones([]), array_ops.ones([]))
fn_op = fn.get_concrete_function(
tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))
self.assertEqual(
['a', 'b'],
[inp.op.name for inp in fn_op.inputs])
self.assertEqual(
[b'a', b'b'],
[inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs])
self.assertEqual(2, len(fn_op.graph.structured_outputs))
self.assertAllClose(
[3., 2.],
fn_op(constant_op.constant(1.), constant_op.constant(2.)))
self.assertAllClose(
[3., 2.],
fn_op(a=constant_op.constant(1.), b=constant_op.constant(2.)))
def testVariable(self, function_decorator):
@function_decorator
def fn(a, b):
return a + b, a * b
# Call the function to make def_function happy
fn(array_ops.ones([]), array_ops.ones([]))
fn_op = fn.get_concrete_function(
tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),
variables.Variable(1.))
self.assertEqual(
['a', 'b'],
[inp.op.name for inp in fn_op.inputs])
self.assertEqual(
[b'a', b'b'],
[inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs])
self.assertEqual(2, len(fn_op.graph.structured_outputs))
def testDictReturned(self, function_decorator):
@function_decorator
def fn(x, z=(1., 2.), y=3.):
z1, z2 = z
return {'alpha': x + y + z1, 'beta': x * y + z2}
# Call the function to make def_function happy
fn(array_ops.ones([]))
fn_op = fn.get_concrete_function(
x=tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),
y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))
self.assertEqual(
['x', 'y'],
[inp.op.name for inp in fn_op.inputs])
self.assertEqual(
[b'x', b'y'],
[inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs])
self.assertEqual({'alpha', 'beta'},
set(fn_op.graph.structured_outputs.keys()))
with self.assertRaisesRegexp(ValueError, "two arguments named 'z'"):
fn.get_concrete_function(
z=(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)),
y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32,
name='custom'),
x=4.)
fn_op2 = fn.get_concrete_function(
z=(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32,
name='z_first'),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32,
name='z_second')),
y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='custom'),
x=4.)
self.assertEqual(
['z_first', 'z_second', 'custom'],
[inp.op.name for inp in fn_op2.inputs])
self.assertEqual(
[b'z_first', b'z_second', b'custom'],
[inp.op.get_attr('_user_specified_name') for inp in fn_op2.inputs])
fn_op3 = fn.get_concrete_function(
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='custom'),
z=(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32,
name='z1'),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='z2')),
y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))
self.assertEqual(
['custom', 'z1', 'z2', 'y'],
[inp.op.name for inp in fn_op3.inputs])
self.assertEqual(
[b'custom', b'z1', b'z2', b'y'],
[inp.op.get_attr('_user_specified_name') for inp in fn_op3.inputs])
def testMethod(self, function_decorator):
class HasMethod(object):
@function_decorator
def method(self, x):
return x
has_method = HasMethod()
# Call the function to make def_function happy
HasMethod.method(has_method, array_ops.ones([]))
class_op = HasMethod.method.get_concrete_function(
has_method, tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))
self.assertEqual(
['x'],
[inp.op.name for inp in class_op.inputs])
self.assertEqual(
[b'x'],
[inp.op.get_attr('_user_specified_name') for inp in class_op.inputs])
# Call the function to make def_function happy
has_method.method(array_ops.ones([]))
method_op = has_method.method.get_concrete_function(
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32))
self.assertEqual(
['x'],
[inp.op.name for inp in method_op.inputs])
self.assertEqual(
[b'x'],
[inp.op.get_attr('_user_specified_name') for inp in method_op.inputs])
# TODO(allenl): It should be possible to override names when exporting. Do
# TensorSpec names need to go in cache keys? Or maybe get_concrete_function
# should always retrace?
self.skipTest('Not working')
method_op = has_method.method.get_concrete_function(
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='y'))
self.assertEqual(
['y'],
[inp.op.name for inp in method_op.inputs])
self.assertEqual(
[b'y'],
[inp.op.get_attr('_user_specified_name') for inp in method_op.inputs])
def testMethodSignature(self, function_decorator):
class HasMethod(object):
@function_decorator(
input_signature=(tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float64, name='y'),))
def method(self, x):
hash(self) # No weak proxies passed as `self`
return x
has_method = HasMethod()
# Call the function to make def_function happy
has_method.method(array_ops.ones([], dtype=dtypes.float64))
method_op = has_method.method.get_concrete_function()
self.assertEqual(
['y'],
[inp.op.name for inp in method_op.inputs])
self.assertEqual(
[b'y'],
[inp.op.get_attr('_user_specified_name') for inp in method_op.inputs])
method_op2 = has_method.method.get_concrete_function()
self.assertEqual(
['y'],
[inp.op.name for inp in method_op2.inputs])
self.assertEqual(
[b'y'],
[inp.op.get_attr('_user_specified_name') for inp in method_op2.inputs])
def testVariadic(self, function_decorator):
@function_decorator
def variadic_fn(x, *args, **kwargs):
return x + math_ops.add_n(list(args) + list(kwargs.values()))
# Call the function to make def_function happy
variadic_fn(array_ops.ones([]), array_ops.ones([]))
variadic_op = variadic_fn.get_concrete_function(
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32, name='y'),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32,
name='second_variadic'),
z=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
zz=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='cust'))
self.assertEqual(
['x', 'y', 'args', 'second_variadic', 'z', 'cust'],
[inp.op.name for inp in variadic_op.inputs])
self.assertEqual(
[b'x', b'y', b'args', b'second_variadic', b'z', b'cust'],
[inp.op.get_attr('_user_specified_name')
for inp in variadic_op.inputs])
def testVariadicInputSignature(self, function_decorator):
@function_decorator(
input_signature=(
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32, name='y'),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='z'),
))
def variadic_fn(x, *args):
return x + math_ops.add_n(list(args))
# Call the function to make def_function happy
variadic_fn(array_ops.ones([]), array_ops.ones([]),
array_ops.ones([]), array_ops.ones([]))
variadic_op = variadic_fn.get_concrete_function()
self.assertIn(b'variadic_fn', variadic_op.name)
self.assertEqual(
['x', 'y', 'args', 'z'],
[inp.op.name for inp in variadic_op.inputs])
self.assertEqual(
[b'x', b'y', b'args', b'z'],
[inp.op.get_attr('_user_specified_name')
for inp in variadic_op.inputs])
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={'CPU': 4}))
test.main()
| 40.262548
| 80
| 0.654776
|
e7c08a5b8148b4852ce28accb4d7f6cea0b47e2d
| 1,212
|
py
|
Python
|
sample/1.number.py
|
Hyper-Devil/learn-python3
|
c097158c48b9f6804c8c46aebf00025249089bf6
|
[
"MIT"
] | null | null | null |
sample/1.number.py
|
Hyper-Devil/learn-python3
|
c097158c48b9f6804c8c46aebf00025249089bf6
|
[
"MIT"
] | null | null | null |
sample/1.number.py
|
Hyper-Devil/learn-python3
|
c097158c48b9f6804c8c46aebf00025249089bf6
|
[
"MIT"
] | null | null | null |
a = 1
b = 1.0
c = 10000
# python3中长整数不用再加L
print('你好世界')
# pyhon3 print函数格式:print()
print('hello world!')
print(a)
print(b)
print(c)
str = 'hello world'
# 字符串
print('字符串')
print(str)
print(str[0])
# 切片 数字从0开始
print(str[2:5])
# 左闭右开
print(str[2:6])
# 此时末尾输出了一个空格
print(str[2:])
# 输出从第3个字符开始的字符串
print(str * 2)
# 输出字符串两次
print(str * 2)
print(str + 'TEST')
# 输出字符串连接字符串
list_a = ["str", 1, ["a", "b", "c"], 4]
'''
列表
一个有序可变集合的容器。不同的数据结构也可以放在同一个列表中,没有统一类型的限制。使用[]方括号
'''
list_b = ["hello"]
print('列表')
print(list_a[0])
print(list_a[1:3])
# 输出含第二个和第三个元素的列表
print(list_a[1:])
# 输出含从第二个开始的元素的列表
print(list_b * 2)
# 输出含两个list_b元素的列表
print(list_a + list_b)
# 输出含list_a和list_b的列表
tuple_a = ("str", 1, ["a", "b", "c"], 4)
# 元组 赋值后不可变的列表 使用()圆括号
tuple_b = ("hello", )
# 只有1个元素的tuple定义时必须加一个逗号, 来消除歧义
print('元组')
print(tuple_a[0])
print(tuple_a[1:3])
print(tuple_a[1:])
print(tuple_b * 2)
print(tuple_a + tuple_b)
dict_a = {"name": "Alan", "age": 24, 1: "level_1"}
print('字典')
print(dict_a["name"])
# 查找
print(dict_a["age"])
print(dict_a[1])
print("name" in dict_a)
# 判断key是否存在
print("xxx" in dict_a)
print(dict_a.keys())
print(dict_a.values())
print(dict_a.items())
dict_a[2] = 'level_2'
# 放入新数据,可覆盖
print(dict_a[2])
| 16.16
| 50
| 0.660891
|
1b6abae3e9d18ab3b79a4d7e0f830ee28e21e24f
| 67,227
|
py
|
Python
|
src/hapPyTango/CosNotifyChannelAdmin_skel/__init__.py
|
mguijarr/hapPyTango
|
2506c8e83d93fbd2c0a0115983489d59c74caa2f
|
[
"MIT"
] | 1
|
2020-10-28T16:57:36.000Z
|
2020-10-28T16:57:36.000Z
|
src/hapPyTango/CosNotifyChannelAdmin_skel/__init__.py
|
mguijarr/hapPyTango
|
2506c8e83d93fbd2c0a0115983489d59c74caa2f
|
[
"MIT"
] | null | null | null |
src/hapPyTango/CosNotifyChannelAdmin_skel/__init__.py
|
mguijarr/hapPyTango
|
2506c8e83d93fbd2c0a0115983489d59c74caa2f
|
[
"MIT"
] | null | null | null |
""" Module: IDL:omg.org/CosNotifyChannelAdmin:1.0
Automagically generated by:-
The ORB called Fnorb v1.1.Return.of.Fnorb
"""
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin:1.0"
# Fnorb modules.
import Fnorb.orb.CORBA
import Fnorb.orb.TypeManager
import Fnorb.orb.Util
# Import base interface packages.
import CosNotification_skel
import CosNotifyFilter_skel
class ProxyConsumer_skel(Fnorb.orb.CORBA.Object_skel, CosNotification_skel.QoSAdmin_skel, CosNotifyFilter_skel.FilterAdmin_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/ProxyConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/ProxyConsumer:1.0"
def _skel__get_MyType(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ProxyConsumer/MyType:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyType:1.0"))
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_MyType()
# Create the reply.
server_request.results(results)
return
def _skel__get_MyAdmin(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ProxyConsumer/MyAdmin:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_MyAdmin()
# Create the reply.
server_request.results(results)
return
def _skel_obtain_subscription_types(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxyConsumer/obtain_subscription_types:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ObtainInfoMode:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/EventTypeSeq:1.0"))
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.obtain_subscription_types, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_validate_event_qos(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxyConsumer/validate_event_qos:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/QoSProperties:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/NamedPropertyRangeSeq:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/UnsupportedQoS:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.validate_event_qos, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotification_skel
import CosNotifyFilter_skel
class ProxySupplier_skel(Fnorb.orb.CORBA.Object_skel, CosNotification_skel.QoSAdmin_skel, CosNotifyFilter_skel.FilterAdmin_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/ProxySupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/ProxySupplier:1.0"
def _skel__get_MyType(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ProxySupplier/MyType:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyType:1.0"))
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_MyType()
# Create the reply.
server_request.results(results)
return
def _skel__get_MyAdmin(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ProxySupplier/MyAdmin:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_MyAdmin()
# Create the reply.
server_request.results(results)
return
def _skel__get_priority_filter(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ProxySupplier/priority_filter:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_priority_filter()
# Create the reply.
server_request.results(results)
return
def _skel__set_priority_filter(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ProxySupplier/priority_filter:1.0 """
# Typecode for the attribute value.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise(inputs, [], [])
# Unmarshal the attribute value.
value = server_request.arguments()[0]
# Invoke the implementation.
results = self._set_priority_filter(value)
# Create the reply.
server_request.results(results)
return
def _skel__get_lifetime_filter(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ProxySupplier/lifetime_filter:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_lifetime_filter()
# Create the reply.
server_request.results(results)
return
def _skel__set_lifetime_filter(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ProxySupplier/lifetime_filter:1.0 """
# Typecode for the attribute value.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise(inputs, [], [])
# Unmarshal the attribute value.
value = server_request.arguments()[0]
# Invoke the implementation.
results = self._set_lifetime_filter(value)
# Create the reply.
server_request.results(results)
return
def _skel_obtain_offered_types(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxySupplier/obtain_offered_types:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ObtainInfoMode:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/EventTypeSeq:1.0"))
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.obtain_offered_types, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_validate_event_qos(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxySupplier/validate_event_qos:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/QoSProperties:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/NamedPropertyRangeSeq:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/UnsupportedQoS:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.validate_event_qos, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class ProxyPushConsumer_skel(Fnorb.orb.CORBA.Object_skel, ProxyConsumer_skel, CosNotifyComm_skel.PushConsumer_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/ProxyPushConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/ProxyPushConsumer:1.0"
def _skel_connect_any_push_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxyPushConsumer/connect_any_push_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_any_push_supplier, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class StructuredProxyPushConsumer_skel(Fnorb.orb.CORBA.Object_skel, ProxyConsumer_skel, CosNotifyComm_skel.StructuredPushConsumer_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPushConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPushConsumer:1.0"
def _skel_connect_structured_push_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPushConsumer/connect_structured_push_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_structured_push_supplier, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class SequenceProxyPushConsumer_skel(Fnorb.orb.CORBA.Object_skel, ProxyConsumer_skel, CosNotifyComm_skel.SequencePushConsumer_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPushConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPushConsumer:1.0"
def _skel_connect_sequence_push_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPushConsumer/connect_sequence_push_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_sequence_push_supplier, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class ProxyPullSupplier_skel(Fnorb.orb.CORBA.Object_skel, ProxySupplier_skel, CosNotifyComm_skel.PullSupplier_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/ProxyPullSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/ProxyPullSupplier:1.0"
def _skel_connect_any_pull_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxyPullSupplier/connect_any_pull_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_any_pull_consumer, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class StructuredProxyPullSupplier_skel(Fnorb.orb.CORBA.Object_skel, ProxySupplier_skel, CosNotifyComm_skel.StructuredPullSupplier_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPullSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPullSupplier:1.0"
def _skel_connect_structured_pull_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPullSupplier/connect_structured_pull_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_structured_pull_consumer, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class SequenceProxyPullSupplier_skel(Fnorb.orb.CORBA.Object_skel, ProxySupplier_skel, CosNotifyComm_skel.SequencePullSupplier_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPullSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPullSupplier:1.0"
def _skel_connect_sequence_pull_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPullSupplier/connect_sequence_pull_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_sequence_pull_consumer, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class ProxyPullConsumer_skel(Fnorb.orb.CORBA.Object_skel, ProxyConsumer_skel, CosNotifyComm_skel.PullConsumer_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/ProxyPullConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/ProxyPullConsumer:1.0"
def _skel_connect_any_pull_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxyPullConsumer/connect_any_pull_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/TypeError:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_any_pull_supplier, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_suspend_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxyPullConsumer/suspend_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyInactive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.suspend_connection, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_resume_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxyPullConsumer/resume_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyActive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.resume_connection, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class StructuredProxyPullConsumer_skel(Fnorb.orb.CORBA.Object_skel, ProxyConsumer_skel, CosNotifyComm_skel.StructuredPullConsumer_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPullConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPullConsumer:1.0"
def _skel_connect_structured_pull_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPullConsumer/connect_structured_pull_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/TypeError:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_structured_pull_supplier, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_suspend_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPullConsumer/suspend_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyInactive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.suspend_connection, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_resume_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPullConsumer/resume_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyActive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.resume_connection, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class SequenceProxyPullConsumer_skel(Fnorb.orb.CORBA.Object_skel, ProxyConsumer_skel, CosNotifyComm_skel.SequencePullConsumer_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPullConsumer:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPullConsumer:1.0"
def _skel_connect_sequence_pull_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPullConsumer/connect_sequence_pull_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/TypeError:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_sequence_pull_supplier, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_suspend_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPullConsumer/suspend_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyInactive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.suspend_connection, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_resume_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPullConsumer/resume_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyActive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.resume_connection, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class ProxyPushSupplier_skel(Fnorb.orb.CORBA.Object_skel, ProxySupplier_skel, CosNotifyComm_skel.PushSupplier_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/ProxyPushSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/ProxyPushSupplier:1.0"
def _skel_connect_any_push_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxyPushSupplier/connect_any_push_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/TypeError:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_any_push_consumer, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_suspend_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxyPushSupplier/suspend_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyInactive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.suspend_connection, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_resume_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ProxyPushSupplier/resume_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyActive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.resume_connection, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class StructuredProxyPushSupplier_skel(Fnorb.orb.CORBA.Object_skel, ProxySupplier_skel, CosNotifyComm_skel.StructuredPushSupplier_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPushSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPushSupplier:1.0"
def _skel_connect_structured_push_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPushSupplier/connect_structured_push_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/TypeError:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_structured_push_consumer, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_suspend_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPushSupplier/suspend_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyInactive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.suspend_connection, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_resume_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/StructuredProxyPushSupplier/resume_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyActive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.resume_connection, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotifyComm_skel
class SequenceProxyPushSupplier_skel(Fnorb.orb.CORBA.Object_skel, ProxySupplier_skel, CosNotifyComm_skel.SequencePushSupplier_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPushSupplier:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPushSupplier:1.0"
def _skel_connect_sequence_push_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPushSupplier/connect_sequence_push_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/AlreadyConnected:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosEventChannelAdmin/TypeError:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.connect_sequence_push_consumer, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_suspend_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPushSupplier/suspend_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyInactive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.suspend_connection, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_resume_connection(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SequenceProxyPushSupplier/resume_connection:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ConnectionAlreadyActive:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/NotConnected:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.resume_connection, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotification_skel
import CosNotifyComm_skel
import CosNotifyFilter_skel
import CosEventChannelAdmin_skel
class ConsumerAdmin_skel(Fnorb.orb.CORBA.Object_skel, CosNotification_skel.QoSAdmin_skel, CosNotifyComm_skel.NotifySubscribe_skel, CosNotifyFilter_skel.FilterAdmin_skel, CosEventChannelAdmin_skel.ConsumerAdmin_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin:1.0"
def _skel__get_MyID(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/MyID:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminID:1.0"))
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_MyID()
# Create the reply.
server_request.results(results)
return
def _skel__get_MyChannel(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/MyChannel:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_MyChannel()
# Create the reply.
server_request.results(results)
return
def _skel__get_MyOperator(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/MyOperator:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/InterFilterGroupOperator:1.0"))
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_MyOperator()
# Create the reply.
server_request.results(results)
return
def _skel__get_priority_filter(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/priority_filter:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_priority_filter()
# Create the reply.
server_request.results(results)
return
def _skel__set_priority_filter(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/priority_filter:1.0 """
# Typecode for the attribute value.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise(inputs, [], [])
# Unmarshal the attribute value.
value = server_request.arguments()[0]
# Invoke the implementation.
results = self._set_priority_filter(value)
# Create the reply.
server_request.results(results)
return
def _skel__get_lifetime_filter(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/lifetime_filter:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_lifetime_filter()
# Create the reply.
server_request.results(results)
return
def _skel__set_lifetime_filter(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/lifetime_filter:1.0 """
# Typecode for the attribute value.
inputs = []
inputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise(inputs, [], [])
# Unmarshal the attribute value.
value = server_request.arguments()[0]
# Invoke the implementation.
results = self._set_lifetime_filter(value)
# Create the reply.
server_request.results(results)
return
def _skel__get_pull_suppliers(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/pull_suppliers:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyIDSeq:1.0"))
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_pull_suppliers()
# Create the reply.
server_request.results(results)
return
def _skel__get_push_suppliers(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/push_suppliers:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyIDSeq:1.0"))
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_push_suppliers()
# Create the reply.
server_request.results(results)
return
def _skel_get_proxy_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/get_proxy_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyID:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyNotFound:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.get_proxy_supplier, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_obtain_notification_pull_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/obtain_notification_pull_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ClientType:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyID:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminLimitExceeded:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.obtain_notification_pull_supplier, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_obtain_notification_push_supplier(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/obtain_notification_push_supplier:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ClientType:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyID:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminLimitExceeded:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.obtain_notification_push_supplier, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_destroy(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/ConsumerAdmin/destroy:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.destroy, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotification_skel
import CosNotifyComm_skel
import CosNotifyFilter_skel
import CosEventChannelAdmin_skel
class SupplierAdmin_skel(Fnorb.orb.CORBA.Object_skel, CosNotification_skel.QoSAdmin_skel, CosNotifyComm_skel.NotifyPublish_skel, CosNotifyFilter_skel.FilterAdmin_skel, CosEventChannelAdmin_skel.SupplierAdmin_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/SupplierAdmin:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/SupplierAdmin:1.0"
def _skel__get_MyID(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/SupplierAdmin/MyID:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminID:1.0"))
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_MyID()
# Create the reply.
server_request.results(results)
return
def _skel__get_MyChannel(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/SupplierAdmin/MyChannel:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_MyChannel()
# Create the reply.
server_request.results(results)
return
def _skel__get_MyOperator(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/SupplierAdmin/MyOperator:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/InterFilterGroupOperator:1.0"))
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_MyOperator()
# Create the reply.
server_request.results(results)
return
def _skel__get_pull_consumers(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/SupplierAdmin/pull_consumers:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyIDSeq:1.0"))
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_pull_consumers()
# Create the reply.
server_request.results(results)
return
def _skel__get_push_consumers(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/SupplierAdmin/push_consumers:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyIDSeq:1.0"))
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_push_consumers()
# Create the reply.
server_request.results(results)
return
def _skel_get_proxy_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SupplierAdmin/get_proxy_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyID:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyNotFound:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.get_proxy_consumer, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_obtain_notification_pull_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SupplierAdmin/obtain_notification_pull_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ClientType:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyID:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminLimitExceeded:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.obtain_notification_pull_consumer, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_obtain_notification_push_consumer(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SupplierAdmin/obtain_notification_push_consumer:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ClientType:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ProxyID:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminLimitExceeded:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.obtain_notification_push_consumer, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_destroy(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/SupplierAdmin/destroy:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.destroy, arguments)
# Create the reply.
server_request.results(results)
return
# Import base interface packages.
import CosNotification_skel
import CosNotification_skel
import CosEventChannelAdmin_skel
class EventChannel_skel(Fnorb.orb.CORBA.Object_skel, CosNotification_skel.QoSAdmin_skel, CosNotification_skel.AdminPropertiesAdmin_skel, CosEventChannelAdmin_skel.EventChannel_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/EventChannel:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/EventChannel:1.0"
def _skel__get_MyFactory(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/EventChannel/MyFactory:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_MyFactory()
# Create the reply.
server_request.results(results)
return
def _skel__get_default_consumer_admin(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/EventChannel/default_consumer_admin:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_default_consumer_admin()
# Create the reply.
server_request.results(results)
return
def _skel__get_default_supplier_admin(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/EventChannel/default_supplier_admin:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_default_supplier_admin()
# Create the reply.
server_request.results(results)
return
def _skel__get_default_filter_factory(self, server_request):
""" Attribute: IDL:omg.org/CosNotifyChannelAdmin/EventChannel/default_filter_factory:1.0 """
# Typecode for the attribute value.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Initialise the server request object.
server_request.initialise([], outputs, [])
# Invoke the implementation.
results = self._get_default_filter_factory()
# Create the reply.
server_request.results(results)
return
def _skel_new_for_consumers(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/EventChannel/new_for_consumers:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/InterFilterGroupOperator:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminID:1.0"))
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.new_for_consumers, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_new_for_suppliers(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/EventChannel/new_for_suppliers:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/InterFilterGroupOperator:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminID:1.0"))
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.new_for_suppliers, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_get_consumeradmin(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/EventChannel/get_consumeradmin:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminID:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminNotFound:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.get_consumeradmin, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_get_supplieradmin(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/EventChannel/get_supplieradmin:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminID:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminNotFound:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.get_supplieradmin, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_get_all_consumeradmins(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/EventChannel/get_all_consumeradmins:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminIDSeq:1.0"))
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.get_all_consumeradmins, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_get_all_supplieradmins(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/EventChannel/get_all_supplieradmins:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/AdminIDSeq:1.0"))
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.get_all_supplieradmins, arguments)
# Create the reply.
server_request.results(results)
return
class EventChannelFactory_skel(Fnorb.orb.CORBA.Object_skel):
""" Interface: IDL:omg.org/CosNotifyChannelAdmin/EventChannelFactory:1.0 """
_FNORB_ID = "IDL:omg.org/CosNotifyChannelAdmin/EventChannelFactory:1.0"
def _skel_create_channel(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/EventChannelFactory/create_channel:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/QoSProperties:1.0"))
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/AdminProperties:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ChannelID:1.0"))
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/UnsupportedQoS:1.0"))
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotification/UnsupportedAdmin:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.create_channel, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_get_all_channels(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/EventChannelFactory/get_all_channels:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ChannelIDSeq:1.0"))
# Typecodes for user exceptions.
exceptions = []
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# This operation has no arguments.
arguments = ()
# Invoke the implementation.
results = apply(self.get_all_channels, arguments)
# Create the reply.
server_request.results(results)
return
def _skel_get_event_channel(self, server_request):
""" Operation: IDL:omg.org/CosNotifyChannelAdmin/EventChannelFactory/get_event_channel:1.0 """
# Typecodes for 'in' and 'inout' parameters.
inputs = []
inputs.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ChannelID:1.0"))
# Typecodes for the result, 'inout' and 'out' parameters.
outputs = []
outputs.append(Fnorb.orb.CORBA.TC_Object)
# Typecodes for user exceptions.
exceptions = []
exceptions.append(Fnorb.orb.CORBA.typecode("IDL:omg.org/CosNotifyChannelAdmin/ChannelNotFound:1.0"))
# Initialise the server request object.
server_request.initialise(inputs, outputs, exceptions)
# Unmarshal the arguments to the request.
arguments = server_request.arguments()
# Invoke the implementation.
results = apply(self.get_event_channel, arguments)
# Create the reply.
server_request.results(results)
return
#############################################################################
| 34.868776
| 216
| 0.681839
|
2ff55050e42ae030c060c932064218690c231573
| 931
|
py
|
Python
|
database/invitrodb/tox21_antag_summary.py
|
mshobair/invitro_cheminformatics
|
17201496c73453accd440646a1ee81726119a59c
|
[
"MIT"
] | null | null | null |
database/invitrodb/tox21_antag_summary.py
|
mshobair/invitro_cheminformatics
|
17201496c73453accd440646a1ee81726119a59c
|
[
"MIT"
] | null | null | null |
database/invitrodb/tox21_antag_summary.py
|
mshobair/invitro_cheminformatics
|
17201496c73453accd440646a1ee81726119a59c
|
[
"MIT"
] | null | null | null |
import datetime
from database.database_schemas import Schemas
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.dialects.mysql import BIGINT, SMALLINT, DOUBLE, TIMESTAMP, TINYINT
from database.base import Base
class Tox21AntagSummary(Base):
"""Maps to tox21_antag_summary table in invitrodb databases."""
__tablename__ = 'tox21_antag_summary'
__table_args__ = {'schema': Schemas.invitrodb_schema}
Assay_Name = Column(String)
Protocol_Name = Column(String)
Name_of_control_compound = Column(String)
Concentration_used_in_antagonist_mode = Column(String)
Hill_Coef = Column(String)
Calculating_ECXX = Column(String)
Online_Validation_EC50 = Column(String)
Online_Screening_EC50 = Column(String)
Validation_sample_size = Column(String)
Screening_sample_size = Column(String)
aid = Column(BIGINT)
Antag_assay_name = Column(String)
| 30.032258
| 82
| 0.769066
|
f4005934a82afad03466fcf5b747c48c61e5f8a2
| 22,702
|
py
|
Python
|
test/test_cmake_parser.py
|
eurogroep/catkin_lint
|
987d8e1378b4963e4d36032b9410e13c7bbab8f3
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_cmake_parser.py
|
eurogroep/catkin_lint
|
987d8e1378b4963e4d36032b9410e13c7bbab8f3
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_cmake_parser.py
|
eurogroep/catkin_lint
|
987d8e1378b4963e4d36032b9410e13c7bbab8f3
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
#
# catkin_lint
# Copyright (c) 2013-2020 Fraunhofer FKIE
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Fraunhofer organization nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import catkin_lint.cmake as cmake
class CMakeParserTest(unittest.TestCase):
def parse_all(self, s, var=None, env_var=None, location=None):
result = []
ctxt = cmake.ParserContext()
for cmd, args, arg_tokens, (fname, line, column) in ctxt.parse(s, var=var, env_var=env_var):
if cmd == "#catkin_lint" and args and args[0] == "skip":
ctxt.skip_block()
continue
if location is None:
result.append((cmd, args))
elif location == 1:
result.append((cmd, args, line))
elif location == 2:
result.append((cmd, args, line, column))
return result
def test_empty(self):
"""Test CMake parser with empty file"""
self.assertEqual(
self.parse_all(""),
[]
)
def test_generator_expressions(self):
"""Test CMake parser generator expressions"""
self.assertEqual(
self.parse_all("command($<0:ignore_me>)"),
[("command", [])]
)
def test_command(self):
"""Test CMake parser command parsing"""
self.assertEqual(
self.parse_all("command()"),
[("command", [])]
)
self.assertEqual(
self.parse_all("MiXeDCaSe()"),
[("MiXeDCaSe", [])]
)
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "unbalanced(")
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "invalid%=characters$()")
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "()")
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "missing_braces")
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "cmd();")
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "cmd cmd()")
def test_string(self):
"""Test CMake parser string parsing"""
self.assertEqual(
self.parse_all('cmd("simple string")'),
[("cmd", ["simple string"])]
)
self.assertEqual(
self.parse_all('cmd("string with \\"quote\\"")'),
[("cmd", ['string with "quote"'])]
)
self.assertEqual(
self.parse_all('cmd("string that spans\nmultiple lines")'),
[("cmd", ['string that spans\nmultiple lines'])]
)
self.assertEqual(
self.parse_all('cmd("\\\\"\\")'),
[("cmd", ['\\', '"'])]
)
def test_macro(self):
"""Test CMake parser macro expansion"""
self.assertEqual(
self.parse_all("macro(test) cmd() endmacro() test()"),
[("macro", ["test"]), ("endmacro", []), ("cmd", [])]
)
self.assertEqual(
self.parse_all("macro(test) cmd() test() endmacro() test()"),
[("macro", ["test"]), ("endmacro", []), ("cmd", [])]
)
self.assertEqual(
self.parse_all("macro(test) cmd(${global}) test() endmacro() test()", {"global": "value"}),
[("macro", ["test"]), ("endmacro", []), ("cmd", ["value"])]
)
self.assertEqual(
self.parse_all("macro(test arg) cmd(${arg}) endmacro() test(fun)"),
[("macro", ["test", "arg"]), ("endmacro", []), ("cmd", ["fun"])]
)
self.assertEqual(
self.parse_all("macro(test arg) cmd(${arg}) endmacro() test(local) cmd(${arg})", {"arg": "global"}),
[("macro", ["test", "arg"]), ("endmacro", []), ("cmd", ["local"]), ("cmd", ["global"])]
)
self.assertEqual(
self.parse_all('macro(test arg) cmd(${arg}) endmacro() test("one;two;three")'),
[("macro", ["test", "arg"]), ("endmacro", []), ("cmd", ["one", "two", "three"])]
)
self.assertEqual(
self.parse_all('macro(test arg) cmd(${arg}) cmd(${ARGN}) endmacro() test(one;two;three)'),
[("macro", ["test", "arg"]), ("endmacro", []), ("cmd", ["one"]), ("cmd", ["two", "three"])]
)
self.assertEqual(
self.parse_all('macro(test arg1 arg2) cmd("${arg2}") cmd(${ARGN}) endmacro() test(one)'),
[("macro", ["test", "arg1", "arg2"]), ("endmacro", []), ("cmd", [""]), ("cmd", [])]
)
self.assertEqual(
self.parse_all('macro(test arg) cmd("${arg}") endmacro() test("one;two;three")'),
[("macro", ["test", "arg"]), ("endmacro", []), ("cmd", ["one;two;three"])]
)
self.assertEqual(
self.parse_all('macro(test arg) cmd(${arg} ${ARGN}) endmacro() test(arg extra stuff)'),
[("macro", ["test", "arg"]), ("endmacro", []), ("cmd", ["arg", "extra", "stuff"])]
)
self.assertEqual(
self.parse_all('macro(TEST arg) cmd(${arg}) endmacro() test(value)'),
[("macro", ["TEST", "arg"]), ("endmacro", []), ("cmd", ["value"])]
)
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "macro() endmacro()")
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "macro(fun)")
def test_function(self):
"""Test CMake parser function definitions"""
self.assertEqual(
self.parse_all("function(test) cmd() endfunction() test()"),
[("function", ["test"]), ("endfunction", []), ("test", [])]
)
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "function() endfunction()")
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "function(fun)")
def test_foreach(self):
"""Test CMake parser foreach() loop handling"""
self.assertEqual(
self.parse_all('foreach(arg RANGE 2) cmd(${arg}) endforeach()'),
[("foreach", ["arg", "RANGE", "2"]), ("cmd", ["0"]), ("cmd", ["1"]), ("cmd", ["2"]), ("endforeach", [])]
)
self.assertEqual(
self.parse_all('foreach(arg RANGE 1 3) cmd(${arg}) endforeach()'),
[("foreach", ["arg", "RANGE", "1", "3"]), ("cmd", ["1"]), ("cmd", ["2"]), ("cmd", ["3"]), ("endforeach", [])]
)
self.assertEqual(
self.parse_all('foreach(arg RANGE 1 5 2) cmd(${arg}) endforeach()'),
[("foreach", ["arg", "RANGE", "1", "5", "2"]), ("cmd", ["1"]), ("cmd", ["3"]), ("cmd", ["5"]), ("endforeach", [])]
)
self.assertEqual(
self.parse_all('foreach(arg 1 2 3 4 5) endforeach()'),
[("foreach", ["arg", "1", "2", "3", "4", "5"]), ("endforeach", [])]
)
self.assertEqual(
self.parse_all('foreach(arg one) cmd(${global}) endforeach()', {"global": "value"}),
[("foreach", ["arg", "one"]), ("cmd", ["value"]), ("endforeach", [])]
)
self.assertEqual(
self.parse_all('foreach(arg IN LISTS dummy) cmd(${arg}) endforeach()', {"dummy": "one;two;three"}),
[("foreach", ["arg", "IN", "LISTS", "dummy"]), ("cmd", ["one"]), ("cmd", ["two"]), ("cmd", ["three"]), ("endforeach", [])]
)
self.assertEqual(
self.parse_all('foreach(arg IN ITEMS ${dummy}) cmd(${arg}) endforeach()', {"dummy": "one;two;three"}),
[("foreach", ["arg", "IN", "ITEMS", "one", "two", "three"]), ("cmd", ["one"]), ("cmd", ["two"]), ("cmd", ["three"]), ("endforeach", [])]
)
self.assertEqual(
self.parse_all('foreach(arg ${dummy}) cmd(${arg}) endforeach()', {"dummy": "one;two;three"}),
[("foreach", ["arg", "one", "two", "three"]), ("cmd", ["one"]), ("cmd", ["two"]), ("cmd", ["three"]), ("endforeach", [])]
)
self.assertEqual(
self.parse_all('foreach(arg) cmd(${arg}) endforeach()'),
[("foreach", ["arg"]), ("endforeach", [])]
)
self.assertEqual(
self.parse_all('foreach(a 1 2) foreach(b 3 4) cmd(${a} ${b}) endforeach() endforeach()'),
[("foreach", ["a", "1", "2"]),
("foreach", ["b", "3", "4"]),
("cmd", ["1", "3"]),
("cmd", ["1", "4"]),
("endforeach", []),
("foreach", ["b", "3", "4"]),
("cmd", ["2", "3"]),
("cmd", ["2", "4"]),
("endforeach", []),
("endforeach", [])]
)
self.assertEqual(
self.parse_all('FOREACH(a 1 2) FOREACH(b 3 4) cmd(${a} ${b}) ENDFOREACH() ENDFOREACH()'),
[("FOREACH", ["a", "1", "2"]),
("FOREACH", ["b", "3", "4"]),
("cmd", ["1", "3"]),
("cmd", ["1", "4"]),
("ENDFOREACH", []),
("FOREACH", ["b", "3", "4"]),
("cmd", ["2", "3"]),
("cmd", ["2", "4"]),
("ENDFOREACH", []),
("ENDFOREACH", [])]
)
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "foreach(arg)")
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "foreach(arg RANGE bla) endforeach()")
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "foreach(arg RANGE 1 5 2 0) endforeach()")
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, "foreach() endforeach()")
def test_arguments(self):
"""Test CMake parser argument parsing"""
self.assertEqual(
self.parse_all("cmd(one two three)"),
[("cmd", ["one", "two", "three"])]
)
self.assertEqual(
self.parse_all("cmd(one two;three)"),
[("cmd", ["one", "two", "three"])]
)
self.assertEqual(
self.parse_all("cmd(one;two;three)"),
[("cmd", ["one", "two", "three"])]
)
self.assertEqual(
self.parse_all("cmd(one;two three)"),
[("cmd", ["one", "two", "three"])]
)
self.assertEqual(
self.parse_all('cmd("one;two" three)'),
[("cmd", ["one;two", "three"])]
)
self.assertEqual(
self.parse_all('cmd("one;two";three)'),
[("cmd", ["one;two", "three"])]
)
self.assertEqual(
self.parse_all('cmd(one;"two;three")'),
[("cmd", ["one", "two;three"])]
)
self.assertEqual(
self.parse_all('if(NOT (A OR B)) endif()'),
[("if", ["NOT", "(", "A", "OR", "B", ")"]), ("endif", [])]
)
self.assertEqual(
self.parse_all('cmd("(")'),
[("cmd", ["("])]
)
self.assertEqual(
self.parse_all('cmd(")")'),
[("cmd", [")"])]
)
self.assertEqual(
self.parse_all('cmd("\\"")'),
[("cmd", ['"'])]
)
self.assertEqual(
self.parse_all('cmd(\\")'),
[("cmd", ['"'])]
)
self.assertEqual(
self.parse_all('cmd(a\\ b)'),
[("cmd", ['a b'])]
)
self.assertEqual(
self.parse_all("cmd(ENV{PATH})"),
[("cmd", ["ENV{PATH}"])]
)
self.assertRaises(cmake.CMakeSyntaxError, self.parse_all, 'cmd("unclosed string)')
def test_substitution(self):
"""Test CMake parser variable substitution semantics"""
self.assertEqual(
self.parse_all("cmd(${args})", var={"args": "one;two;three"}),
[("cmd", ["one", "two", "three"])]
)
self.assertEqual(
self.parse_all("cmd(${missing})"),
[("cmd", [])]
)
self.assertEqual(
self.parse_all('cmd("${missing}")'),
[("cmd", [""])]
)
self.assertEqual(
self.parse_all("${fun}()", var={"fun": "cmd"}),
[("cmd", [])]
)
self.assertEqual(
self.parse_all("cmd(${args})", var={"args": "one two three"}),
[("cmd", ["one two three"])]
)
self.assertEqual(
self.parse_all('cmd("${args}")', var={"args": "one;two;three"}),
[("cmd", ["one;two;three"])]
)
self.assertEqual(
self.parse_all('cmd("\\${args}")', var={"args": "fail"}),
[("cmd", ["${args}"])]
)
self.assertEqual(
self.parse_all('cmd(\\${args})', var={"args": "fail"}),
[("cmd", ["${args}"])]
)
self.assertEqual(
self.parse_all('cmd(${args})', var={"args": "\\\\"}),
[("cmd", ["\\\\"])]
)
self.assertEqual(
self.parse_all('cmd(${args})', var={"args": "${looks_like_a_variable}"}),
[("cmd", ["${looks_like_a_variable}"])]
)
self.assertEqual(
self.parse_all('cmd(${args})', var={"args": ")"}),
[("cmd", [")"])]
)
self.assertEqual(
self.parse_all('cmd(fun ${args})', var={"args": "stuff"}),
[("cmd", ["fun", "stuff"])]
)
self.assertEqual(
self.parse_all("cmd($ENV{PATH})"),
[("cmd", ["$ENV{PATH}"])]
)
self.assertEqual(
self.parse_all("cmd($env{test})", env_var={"test": "foo"}),
[("cmd", ["$env{test}"])]
)
self.assertEqual(
self.parse_all("cmd($ENV{test})", env_var={"test": "foo"}),
[("cmd", ["foo"])]
)
self.assertEqual(
self.parse_all("cmd($ENV{Test})", env_var={"test": "foo"}),
[("cmd", ["$ENV{Test}"])]
)
def test_pragma(self):
"""Test CMake parser catkin_lint pragmas"""
self.assertEqual(
self.parse_all("# catkin_lint: extra space\n#catkin_lint:\n#catkin_lint: \n#catkin_lint: one two three \n#catkin_lint :\n"),
[("#catkin_lint", []), ("#catkin_lint", []), ("#catkin_lint", ["one", "two", "three"])]
)
def test_skip_block(self):
"""Test CMaker parser skip block"""
self.assertEqual(
self.parse_all("""\
#catkin_lint: skip
cmd()
"""),
[("cmd", [])]
)
self.assertEqual(
self.parse_all("""\
if()
endif()
#catkin_lint: skip
cmd()
"""),
[("if", []), ("endif", []), ("cmd", [])]
)
self.assertEqual(
self.parse_all("""\
#catkin_lint: skip
if(test)
endif()
"""),
[("if", ["test"]), ("endif", [])]
)
self.assertEqual(
self.parse_all("""\
if(condition) #catkin_lint: skip
cmd()
endif()
"""),
[("if", ["condition"]), ("endif", [])]
)
self.assertEqual(
self.parse_all("""\
if(condition) #catkin_lint: skip
cmd1()
else()
cmd2()
endif()
"""),
[("if", ["condition"]), ("else", []), ("cmd2", []), ("endif", [])]
)
self.assertEqual(
self.parse_all("""\
if(condition)
cmd1()
else() #catkin_lint: skip
cmd2()
endif()
"""),
[("if", ["condition"]), ("cmd1", []), ("else", []), ("endif", [])]
)
self.assertEqual(
self.parse_all("""\
foreach(arg 1 2)
cmd(${arg})
#catkin_lint: skip
do_not_parse_this()
endforeach()
"""),
[("foreach", ["arg", "1", "2"]), ("cmd", ["1"]), ("cmd", ["2"]), ("endforeach", [])]
)
self.assertEqual(
self.parse_all("""\
macro(test)
cmd()
#catkin_lint: skip
do_not_parse_this()
endmacro()
test()
"""),
[("macro", ["test"]), ("endmacro", []), ("cmd", [])]
)
self.assertEqual(
self.parse_all("""\
if(first) #catkin_lint: skip
if(second)
endif(second)
do_not_parse_this()
endif()
"""),
[("if", ["first"]), ("endif", [])]
)
self.assertEqual(
self.parse_all("""\
if(first) #catkin_lint: skip
foreach(second 1 2)
endforeach()
do_not_parse_this()
endif()
"""),
[("if", ["first"]), ("endif", [])]
)
self.assertEqual(
self.parse_all("""\
macro(test)
do_not_parse_this()
endmacro()
if(first) #catkin_lint: skip
test()
do_not_parse_this()
endif()
"""),
[("macro", ["test"]), ("endmacro", []), ("if", ["first"]), ("endif", [])]
)
def test_comments(self):
"""Test CMake parser comment handling"""
self.assertEqual(
self.parse_all("""\
# initial comment
cmd(one # first argument comment
two # second argument comment
three# third argument comment without space
)## closing comment
# commented-out command
# cmd()
"""),
[("cmd", ["one", "two", "three"])]
)
def test_line_numbering(self):
"""Test CMake parser line numbering"""
self.assertEqual(
self.parse_all("""\
cmd1()
cmd2(
)
# Comment
cmd3()
""", location=1),
[("cmd1", [], 1), ("cmd2", [], 2), ("cmd3", [], 5)]
)
self.assertEqual(
self.parse_all("cmd1()\rcmd2()\rcmd3()\r", location=1),
[("cmd1", [], 1), ("cmd2", [], 2), ("cmd3", [], 3)]
)
self.assertEqual(
self.parse_all("cmd1()\ncmd2()\ncmd3()\n", location=1),
[("cmd1", [], 1), ("cmd2", [], 2), ("cmd3", [], 3)]
)
self.assertEqual(
self.parse_all("cmd1()\r\ncmd2()\r\ncmd3()\r\n", location=1),
[("cmd1", [], 1), ("cmd2", [], 2), ("cmd3", [], 3)]
)
def test_line_columns(self):
"""Test CMake parser column numbering"""
self.assertEqual(
self.parse_all("cmd1()\n cmd2()\n cmd3()\n", location=2),
[("cmd1", [], 1, 1), ("cmd2", [], 2, 2), ("cmd3", [], 3, 3)]
)
def test_argparse(self):
"""Test CMake parser argparse utility function"""
self.assertRaises(RuntimeError, cmake.argparse, [], {"TEST": "xxx"})
opts, args = cmake.argparse([], {})
self.assertEqual({}, opts)
self.assertEqual([], args)
opts, args = cmake.argparse([], {"TEST": "-"})
self.assertEqual({"TEST": False}, opts)
self.assertEqual([], args)
opts, args = cmake.argparse([], {"TEST": "?"})
self.assertEqual({"TEST": None}, opts)
self.assertEqual([], args)
self.assertRaises(cmake.CMakeSyntaxError, cmake.argparse, [], {"TEST": "!"})
opts, args = cmake.argparse([], {"TEST": "*"})
self.assertEqual({"TEST": []}, opts)
self.assertEqual([], args)
self.assertRaises(cmake.CMakeSyntaxError, cmake.argparse, [], {"TEST": "+"})
opts, args = cmake.argparse([], {"TEST": "p"})
self.assertEqual({"TEST": {}}, opts)
self.assertEqual([], args)
opts, args = cmake.argparse(["argument", "BOOL"], {"BOOL": "-"})
self.assertEqual({"BOOL": True}, opts)
self.assertEqual(["argument"], args)
opts, args = cmake.argparse(["argument", "KEY", "value"], {"KEY": "?"})
self.assertEqual({"KEY": "value"}, opts)
self.assertEqual(["argument"], args)
opts, args = cmake.argparse(["argument", "KEY", "value"], {"KEY": "!"})
self.assertEqual({"KEY": "value"}, opts)
self.assertEqual(["argument"], args)
opts, args = cmake.argparse(["argument", "LIST", "value1", "value2"], {"LIST": "*"})
self.assertEqual({"LIST": ["value1", "value2"]}, opts)
self.assertEqual(["argument"], args)
opts, args = cmake.argparse(["argument", "LIST", "value1", "value2"], {"LIST": "+"})
self.assertEqual({"LIST": ["value1", "value2"]}, opts)
self.assertEqual(["argument"], args)
opts, args = cmake.argparse(["argument", "PROPERTIES", "key1", "value1", "key2", "value2"], {"PROPERTIES": "p"})
self.assertEqual({"PROPERTIES": {"key1": "value1", "key2": "value2"}}, opts)
self.assertEqual(["argument"], args)
opts, args = cmake.argparse(["PROPERTIES", "key1", "value1", "key2"], {"PROPERTIES": "p"})
self.assertEqual({"PROPERTIES": {"key1": "value1", "key2": ""}}, opts)
opts, args = cmake.argparse(["DOUBLE", "DOUBLE", "ARGUMENT", "ARGUMENT"], {"DOUBLE ARGUMENT": "?"})
self.assertEqual({"DOUBLE ARGUMENT": "ARGUMENT"}, opts)
self.assertEqual(["DOUBLE"], args)
opts, args = cmake.argparse(["BOOL", "argument"], {"BOOL": "-"})
self.assertEqual({"BOOL": True}, opts)
self.assertEqual(["argument"], args)
| 39.07401
| 148
| 0.481632
|
362991897db1fd56e898613756c2f16c0f64c89d
| 9,134
|
py
|
Python
|
scripts/igvSnapshots.py
|
skchronicles/ChIPSeqPeakCalling
|
24a8ad6736c7c9e3a29b20da1a092693f9c1c926
|
[
"MIT"
] | 3
|
2019-05-24T05:58:19.000Z
|
2021-12-20T11:52:29.000Z
|
scripts/igvSnapshots.py
|
skchronicles/PeakCalling
|
24a8ad6736c7c9e3a29b20da1a092693f9c1c926
|
[
"MIT"
] | null | null | null |
scripts/igvSnapshots.py
|
skchronicles/PeakCalling
|
24a8ad6736c7c9e3a29b20da1a092693f9c1c926
|
[
"MIT"
] | 4
|
2021-07-14T15:13:29.000Z
|
2021-12-20T11:52:33.000Z
|
##########################################################################################################
# ChIP-Seq Pipeline: PeakCaller -> IGV
# Author: Skyler Kuhn (NIH/NCI) [C]
# CCR Collaborative Bioinformatics Resource
# Version 1.0.3
# See readme.txt for more information
# USAGE:
# python igvSnapshots.py
# --n=5
# --narrowPeak_file=CHIP_Thpok_Biotin_vs_Input_Thpok_peaks.narrowPeak
# --treatmentBW_file=CHIP_Thpok_Biotin.R1.trim.not_blacklist_plus.sorted.mapq_gt_3.normalized.bw
# --inputBW_file=Input_Thpok.R1.trim.not_blacklist_plus.sorted.mapq_gt_3.normalized.bw
# --output_folder=SNAPTEST
# --genome=mm10
##########################################################################################################
# Imports
from __future__ import print_function, division
import subprocess as sp
import sys
import time
import os
def check_args(all_args):
"""
:param all_args: # (this is a list of all provided command-line arguements)
:return: arg_dict # if 6 or 11arguments are not provided an Exception is raised
TLDR: This function checks the provided command-line arguments and them uses regex to check to see if they are valid,
if they are not an Exception is raised!
"""
def parse_args(args): # maybe look into using a decorator here, the more python-ic way
"""
Generator that returns,
:param args: (the list of args provided by sys.argv)
:return:
"""
for i in range(1, len(args)-1, 2): # the zero-th index is the program name
j = i + 1
yield args[i], args[j]
def invalid_usage_message():
"""
:return: docstring error message
TDLR: An error Message is returned if the wrong number of command-line args are provided
"""
return """Failed to Provide Required Input Arguments:
--n
--narrowPeak_file
--treatmentBW_file
--inputBW_file
--output_folder
--genome\n* Invalid Input Arguments provided *
\nUsage:\npython igvSnapshots.py --n=5 --narrowPeak_file=narrow.narrowPeak --treatmentBW_file=treatment.bw --inputBW_file=inputfile.bw --output_folder=FolderName --genome=mm10
"""
if len(all_args) != 7 and len(all_args) != 13: # maybe use an assert statement here
raise Exception(invalid_usage_message())
arg_dict = {} # k:switch (ex. --n), v:arguement (ex. 5)
if len(all_args) == 11:
arg_dict = {switch.lstrip("-"): argument for switch, argument in parse_args(args=all_args)}
else: # 6 args formatted like this: python ChiPSeqhelper.py --n=1 --narrowfile=narrowFH.txt ...(etc.)
for arg in all_args[1:]:
stripped_args_list = arg.lstrip("-").split("=")
arg_dict[stripped_args_list[0]] = stripped_args_list[1]
return arg_dict
def benchmarker(any_function):
"""
:param any_function: (function to be timed)
:return: String giving timing info for a given function
TDLR: Decorator that takes a function as a parameter and outputs its timing info (benchmarking)
"""
def timing_wrapper(*args, **kwargs):
t1 = time.time()
any_function(*args, **kwargs)
t2 = time.time()
return "Time it took to run {} function:{}\n".format(any_function.__name__, str(t2 - t1))
return timing_wrapper
class ChipSeqPipeline(object):
"""
Takes dictionary of validated arguments from check_args, sorts & selects MAC CLT output, and then calls IGV CLT
"""
def __init__(self, args_dict):
# Attributes
self.args_dict = args_dict
self.n = args_dict['n']
self.genome = args_dict['genome']
self.narrow_peaks = args_dict['narrowPeak_file']
self.sortedNnarrowpeaks = self.narrow_peaks.split(".")[0] + "_SORTEDqValue_TOP50.narrowPeaks" # Output Name
self.treatment_bw = args_dict['treatmentBW_file']
self.input_bw = args_dict['inputBW_file']
self.output_folder = args_dict['output_folder']
# Methods
#self.validate() # insert check args into this section
self.run()
def validate(self): # insert check_args code into here
"""
takes a list of __init__ attributes
:return: boolean
"""
pass
def createIGVscript(self, inputBWfile, treatBWfile, sortedNarrowPeaksfile, genome, maxPanelheight=500,
padding=500, snapdirectory="snapstest"):
"""
:param inputBWfile: (ex. input.bw)
:param treatBWfile: (ex. treatment.bw)
:param genome: (ex. hg19, mm10, etc.)
:param sortedNarrowPeaksfile: # Used to grab chr# and start/stop positions
:param maxPanelheight: (default: 500px) # maximum screen shot panel size
:param padding: (default: -/+ 500bp) # left and right padding adjustments added to start/stop positions
:param snapdirectory: (default:IGVsnaps) # Name of directory where the files will be stored
------------------------------------
Example IGV scrpt:
new
snapshotDirectory IGV_Snapshots
load test_alignments.bam
genome hg19
maxPanelHeight 500
goto chr1:713167-714758
snapshot chr1_713167_714758_h500.png
goto chr1:713500-714900
snapshot chr1_713500_714900_h500.png
exit
:return: (igv_batch_script.txt)
"""
def get_workingDir():
"""
Used for finding absolute paths
:return: returns the current working directory
"""
return os.getcwd()
working_directory = get_workingDir()
narrowFH = open(sortedNarrowPeaksfile, "r")
outFH = open("igv_batch_script.txt", "w")
# Writing out to the batch script file
outFH.write("new\n")
outFH.write("snapshotDirectory {}\n".format(snapdirectory))
outFH.write("load {}\n".format(os.path.join(working_directory, treatBWfile)))
outFH.write("load {}\n".format(os.path.join(working_directory, inputBWfile)))
outFH.write("genome {}\n".format(genome))
outFH.write("maxPanelHeight {}\n".format(maxPanelheight))
for line in narrowFH:
linelist = line.split()
chr = linelist[0] # chromosome number
start = int(linelist[1]) - int(padding) # start_position + some left padding
stop = int(linelist[2]) + int(padding) # stop_postion + some right padding
outFH.write("goto {}:{}-{}\n".format(chr, start, stop))
outFH.write("snapshot {}_{}_h{}.png\n".format(chr, start, stop, maxPanelheight))
outFH.write("exit")
outFH.close()
narrowFH.close()
def __run(self, command_list, pipe):
"""
Private method used to run commands in shell
When running pipe="yes", it runs:
sort -k9nr,9 CHIP_Thpok_Biotin_vs_Input_Thpok_peaks.narrowPeak | head -50 > outputfile.narrowfile
:param command_list:
:pipe specify whether you want to pipe commands
:return:
"""
if pipe == "yes":
p1 = sp.Popen(command_list, stdout=sp.PIPE)
p2 = sp.Popen("head -{}".format(self.n).split(), stdin=p1.stdout, stdout=sp.PIPE)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
output = p2.communicate()[0]
fh = open(self.sortedNnarrowpeaks, "w")
fh.write(output)
fh.close()
elif pipe == "no":
sp.Popen(command_list).wait()
def run(self):
"""
Sorts the Output of MACS (by decreasing q-value) & selects tops 'n' results
calls IGV CLT, loops through results -> saves screenshots to an Output folder
:return: Output folder with IGv N-peaks results
"""
self.validate()
self.__run("mkdir --p {}".format(self.output_folder).split(), "no")
self.__run("sort -k9nr,9 {}".format(self.narrow_peaks).split(), "yes")
self.__run("echo module load igv".split(), "no")
self.createIGVscript(self.input_bw, self.treatment_bw, self.sortedNnarrowpeaks, genome=self.genome,
maxPanelheight=500, padding=500, snapdirectory=self.output_folder)
self.__run("igv -m 40g -b igv_batch_script.txt".split(), "no")
#self.__run("echo Clean up the directory as needed-- rm any un-needed files!".split())
def __str__(self):
return "Parameters: {}".format(self.args_dict)
def main():
"""
Pseudo-main method
:return:
"""
# Checking the Arguments pass through CL
arg_list = sys.argv
print(arg_list)
args_dict = check_args(all_args=arg_list)
print(args_dict)
# Start working with interfacing into the Pipeline
#for command in run_shell_commands(parsed_args=args_dict):
# sp.Popen("echo {}".format(command).split()).wait() # Popen takes a list as parameter
useChIPSeq = ChipSeqPipeline(args_dict)
print(useChIPSeq)
if __name__ == "__main__":
main()
| 39.541126
| 187
| 0.614408
|
0bbf603a743155dd872aaec0166eee3b125a5c33
| 2,443
|
py
|
Python
|
test/net/integration/tcp/test.py
|
anna328p/IncludeOS
|
b9858b8e6842d6c0cc227a34a015e7e8de9438ab
|
[
"Apache-2.0"
] | null | null | null |
test/net/integration/tcp/test.py
|
anna328p/IncludeOS
|
b9858b8e6842d6c0cc227a34a015e7e8de9438ab
|
[
"Apache-2.0"
] | null | null | null |
test/net/integration/tcp/test.py
|
anna328p/IncludeOS
|
b9858b8e6842d6c0cc227a34a015e7e8de9438ab
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import socket
import sys
import os
includeos_src = os.environ.get('INCLUDEOS_SRC',
os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))).split('/test')[0])
sys.path.insert(0,includeos_src + "/test")
import vmrunner
# Usage: python test.py $GUEST_IP $HOST_IP
GUEST = '10.0.0.44' if (len(sys.argv) < 2) else sys.argv[1]
HOST = '10.0.0.1' if (len(sys.argv) < 3) else sys.argv[2]
def TCP_test():
def connect(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (GUEST, port)
print >>sys.stderr, 'connecting to %s port %s' % server_address
sock.connect(server_address)
try:
while True:
data = sock.recv(1024)
#print >>sys.stderr, '%s' % data
if data:
sock.sendall(data);
else:
break
finally:
print >>sys.stderr, 'closing socket'
sock.close()
return
connect(8081)
connect(8082)
connect(8083)
connect(8084)
def listen(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_address = (HOST, port)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
sock.listen(1)
while True:
connection, client_address = sock.accept()
try:
print >>sys.stderr, 'connection from', client_address
while True:
data = connection.recv(1024)
if data:
print >>sys.stderr, 'received data, sending data back to the client'
connection.sendall(data)
print >>sys.stderr, 'close connection to client'
connection.close()
else:
print >>sys.stderr, 'no more data from', client_address
break
finally:
connection.close()
break
sock.close()
return
listen(8085)
# Get an auto-created VM from the vmrunner
vm = vmrunner.vms[0]
# Add custom event-handler
vm.on_output("IncludeOS TCP test", TCP_test)
# Boot the VM, taking a timeout as parameter
vm.make().boot(80)
| 30.160494
| 120
| 0.546459
|
2924f8c5c2bfffa9cf86f04239c65439bb167693
| 1,080
|
py
|
Python
|
vapour/links/connections/affine_transform.py
|
speedcell4/vapour
|
c00b9b8fffddf0b134bec3ebb26d961e0468194a
|
[
"MIT"
] | null | null | null |
vapour/links/connections/affine_transform.py
|
speedcell4/vapour
|
c00b9b8fffddf0b134bec3ebb26d961e0468194a
|
[
"MIT"
] | null | null | null |
vapour/links/connections/affine_transform.py
|
speedcell4/vapour
|
c00b9b8fffddf0b134bec3ebb26d961e0468194a
|
[
"MIT"
] | null | null | null |
import chainer.functions as F
import chainer.links as L
from chainer import Variable, Chain
import numpy as np
__all__ = [
'AffineTransform',
]
class AffineTransform(Chain):
def __init__(self, *in_sizes: int, out_size: int,
nonlinear=F.tanh, nobias: bool = False, initialW=None, initial_bias=None) -> None:
super(AffineTransform, self).__init__()
self.in_size = sum(in_sizes)
self.out_size = out_size
self.nonlinear = nonlinear
self.nobias = nobias
self.fc = L.Linear(self.in_size, self.out_size, nobias, initialW, initial_bias)
def __call__(self, *xs: Variable, axis: int = 1) -> Variable:
return self.fc(F.concat(xs, axis=axis))
if __name__ == '__main__':
affine_transform = AffineTransform(3, 4, 5, out_size=6)
x1 = Variable(np.random.random((1, 3)).astype(np.float32))
x2 = Variable(np.random.random((1, 4)).astype(np.float32))
x3 = Variable(np.random.random((1, 5)).astype(np.float32))
y = affine_transform(x1, x2, x3)
print(f'y :: {y.shape} => {y.data}')
| 30
| 99
| 0.652778
|
3e349b44a16def7bcb16178ba5ee1e96ba60152b
| 920
|
py
|
Python
|
pid_contoller/pid_controller/main.py
|
matthaeusheer/playground
|
407086c8070cf71280b426db61fbe03034283760
|
[
"MIT"
] | null | null | null |
pid_contoller/pid_controller/main.py
|
matthaeusheer/playground
|
407086c8070cf71280b426db61fbe03034283760
|
[
"MIT"
] | 1
|
2020-11-14T09:42:28.000Z
|
2020-11-14T09:42:28.000Z
|
pid_contoller/pid_controller/main.py
|
matthaeusheer/playground
|
407086c8070cf71280b426db61fbe03034283760
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from pid_controller.controller import Sensor, MassSystem, PidController, Gain
from pid_controller.loop import closed_loop
from pid_controller.visualization import plot_control_loop_output
def run_pid_control(init_state, init_velocity, desired_position, system_noise_std, sensor_noise_std, delta_time,
p_gain, d_gain, i_gain, mass, eps, max_steps, max_time, gravity):
system = MassSystem(init_state, init_velocity, system_noise_std,
mass=mass, delta_time=delta_time, gravity=gravity)
sensor = Sensor(noise_std=sensor_noise_std)
gains = Gain(p_gain, d_gain, i_gain)
pid_controller = PidController(gains)
output_generator = closed_loop(system, pid_controller, sensor, desired_position,
eps, delta_time, max_time, max_steps)
plot_control_loop_output(output_generator)
plt.show()
| 41.818182
| 112
| 0.740217
|
c26fef29ffec3f3054e3da8b6ac97df82591154b
| 7,359
|
py
|
Python
|
scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
|
timgates42/trex-core
|
efe94752fcb2d0734c83d4877afe92a3dbf8eccd
|
[
"Apache-2.0"
] | 956
|
2015-06-24T15:04:55.000Z
|
2022-03-30T06:25:04.000Z
|
scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 782
|
2015-09-20T15:19:00.000Z
|
2022-03-31T23:52:05.000Z
|
scripts/automation/trex_control_plane/client_utils/trex_yaml_gen.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 429
|
2015-06-27T19:34:21.000Z
|
2022-03-23T11:02:51.000Z
|
#!/router/bin/python
import pprint
import yaml
import os
# import bisect
class CTRexYaml(object):
"""
This class functions as a YAML generator according to TRex YAML format.
CTRexYaml is compatible with both Python 2 and Python 3.
"""
YAML_TEMPLATE = [{'cap_info': [],
'duration': 10.0,
'generator': {'clients_end': '16.0.1.255',
'clients_per_gb': 201,
'clients_start': '16.0.0.1',
'distribution': 'seq',
'dual_port_mask': '1.0.0.0',
'min_clients': 101,
'servers_end': '48.0.0.255',
'servers_start': '48.0.0.1',
'tcp_aging': 1,
'udp_aging': 1},
'mac' : [0x00,0x00,0x00,0x01,0x00,0x00]}]
PCAP_TEMPLATE = {'cps': 1.0,
'ipg': 10000,
'name': '',
'rtt': 10000,
'w': 1}
def __init__ (self, trex_files_path):
"""
The initialization of this class creates a CTRexYaml object with **empty** 'cap-info', and with default client-server configuration.
Use class methods to add and assign pcap files and export the data to a YAML file.
:parameters:
trex_files_path : str
a path (on TRex server side) for the pcap files using which TRex can access it.
"""
self.yaml_obj = list(CTRexYaml.YAML_TEMPLATE)
self.empty_cap = True
self.file_list = []
self.yaml_dumped = False
self.trex_files_path = trex_files_path
def add_pcap_file (self, local_pcap_path):
"""
Adds a .pcap file with recorded traffic to the yaml object by linking the file with 'cap-info' template key fields.
:parameters:
local_pcap_path : str
a path (on client side) for the pcap file to be added.
:return:
+ The index of the inserted item (as int) if item added successfully
+ -1 if pcap file already exists in 'cap_info'.
"""
new_pcap = dict(CTRexYaml.PCAP_TEMPLATE)
new_pcap['name'] = self.trex_files_path + os.path.basename(local_pcap_path)
if self.get_pcap_idx(new_pcap['name']) != -1:
# pcap already exists in 'cap_info'
return -1
else:
self.yaml_obj[0]['cap_info'].append(new_pcap)
if self.empty_cap:
self.empty_cap = False
self.file_list.append(local_pcap_path)
return ( len(self.yaml_obj[0]['cap_info']) - 1)
def get_pcap_idx (self, pcap_name):
"""
Checks if a certain .pcap file has been added into the yaml object.
:parameters:
pcap_name : str
the name of the pcap file to be searched
:return:
+ The index of the pcap file (as int) if exists
+ -1 if not exists.
"""
comp_pcap = pcap_name if pcap_name.startswith(self.trex_files_path) else (self.trex_files_path + pcap_name)
for idx, pcap in enumerate(self.yaml_obj[0]['cap_info']):
print (pcap['name'] == comp_pcap)
if pcap['name'] == comp_pcap:
return idx
# pcap file wasn't found
return -1
def dump_as_python_obj (self):
"""
dumps with nice indentation the pythonic format (dictionaries and lists) of the currently built yaml object.
:parameters:
None
:return:
None
"""
pprint.pprint(self.yaml_obj)
def dump(self):
"""
dumps with nice indentation the YAML format of the currently built yaml object.
:parameters:
None
:return:
None
"""
print (yaml.safe_dump(self.yaml_obj, default_flow_style = False))
def to_yaml(self, filename):
"""
Exports to YAML file the built configuration into an actual YAML file.
:parameters:
filename : str
a path (on client side, including filename) to store the generated yaml file.
:return:
None
:raises:
+ :exc:`ValueError`, in case no pcap files has been added to the object.
+ :exc:`EnvironmentError`, in case of any IO error of writing to the files or OSError when trying to open it for writing.
"""
if self.empty_cap:
raise ValueError("No .pcap file has been assigned to yaml object. Must add at least one")
else:
try:
with open(filename, 'w') as yaml_file:
yaml_file.write( yaml.safe_dump(self.yaml_obj, default_flow_style = False) )
self.yaml_dumped = True
self.file_list.append(filename)
except EnvironmentError as inst:
raise
def set_cap_info_param (self, param, value, seq):
"""
Set cap-info parameters' value of a specific pcap file.
:parameters:
param : str
the name of the parameters to be set.
value : int/float
the desired value to be set to `param` key.
seq : int
an index to the relevant caps array to be changed (index supplied when adding new pcap file, see :func:`add_pcap_file`).
:return:
**True** on success
:raises:
:exc:`IndexError`, in case an out-of range index was given.
"""
try:
self.yaml_obj[0]['cap_info'][seq][param] = value
return True
except IndexError:
return False
def set_generator_param (self, param, value):
"""
Set generator parameters' value of the yaml object.
:parameters:
param : str
the name of the parameters to be set.
value : int/float/str
the desired value to be set to `param` key.
:return:
None
"""
self.yaml_obj[0]['generator'][param] = value
def get_file_list(self):
"""
Returns a list of all files related to the YAML object, including the YAML filename itself.
.. tip:: This method is especially useful for listing all the files that should be pushed to TRex server as part of the same yaml selection.
:parameters:
None
:return:
a list of filepaths, each is a local client-machine file path.
"""
if not self.yaml_dumped:
print ("WARNING: .yaml file wasn't dumped yet. Files list contains only .pcap files")
return self.file_list
if __name__ == "__main__":
pass
| 34.549296
| 149
| 0.511754
|
9e81f01caca9aa03d47bf9ebc5ae8a483191a874
| 11,951
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_vxlan.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_vxlan.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 32
|
2018-10-09T04:13:42.000Z
|
2020-05-11T07:20:28.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_vxlan.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 11
|
2018-10-09T00:14:53.000Z
|
2021-11-03T10:54:09.000Z
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_vxlan
short_description: Configure VXLAN devices in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and vxlan category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_vxlan:
description:
- Configure VXLAN devices.
default: null
type: dict
suboptions:
dstport:
description:
- VXLAN destination port (1 - 65535).
type: int
interface:
description:
- Outgoing interface for VXLAN encapsulated traffic. Source system.interface.name.
type: str
ip_version:
description:
- IP version to use for the VXLAN interface and so for communication over the VXLAN. IPv4 or IPv6 unicast or multicast.
type: str
choices:
- ipv4-unicast
- ipv6-unicast
- ipv4-multicast
- ipv6-multicast
multicast_ttl:
description:
- VXLAN multicast TTL (1-255).
type: int
name:
description:
- VXLAN device or interface name. Must be a unique interface name.
required: true
type: str
remote_ip:
description:
- IPv4 address of the VXLAN interface on the device at the remote end of the VXLAN.
type: list
suboptions:
ip:
description:
- IPv4 address.
required: true
type: str
remote_ip6:
description:
- IPv6 IP address of the VXLAN interface on the device at the remote end of the VXLAN.
type: list
suboptions:
ip6:
description:
- IPv6 address.
required: true
type: str
vni:
description:
- VXLAN network ID.
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure VXLAN devices.
fortios_system_vxlan:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_vxlan:
dstport: "3"
interface: "<your_own_value> (source system.interface.name)"
ip_version: "ipv4-unicast"
multicast_ttl: "6"
name: "default_name_7"
remote_ip:
-
ip: "<your_own_value>"
remote_ip6:
-
ip6: "<your_own_value>"
vni: "12"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_vxlan_data(json):
option_list = ['dstport', 'interface', 'ip_version',
'multicast_ttl', 'name', 'remote_ip',
'remote_ip6', 'vni']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_vxlan(data, fos):
vdom = data['vdom']
state = data['state']
system_vxlan_data = data['system_vxlan']
filtered_data = underscore_to_hyphen(filter_system_vxlan_data(system_vxlan_data))
if state == "present":
return fos.set('system',
'vxlan',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'vxlan',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_vxlan']:
resp = system_vxlan(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_vxlan": {
"required": False, "type": "dict", "default": None,
"options": {
"dstport": {"required": False, "type": "int"},
"interface": {"required": False, "type": "str"},
"ip_version": {"required": False, "type": "str",
"choices": ["ipv4-unicast", "ipv6-unicast", "ipv4-multicast",
"ipv6-multicast"]},
"multicast_ttl": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"remote_ip": {"required": False, "type": "list",
"options": {
"ip": {"required": True, "type": "str"}
}},
"remote_ip6": {"required": False, "type": "list",
"options": {
"ip6": {"required": True, "type": "str"}
}},
"vni": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 31.203655
| 139
| 0.566396
|
8f0c87ee7393b990da82c9073fa68798eec89d3e
| 1,042
|
py
|
Python
|
test/integration/test_customers.py
|
wolever/netsuite-sdk-py
|
1b1c21e2a8a532fdbf54915e7e9d30b8b5fc2d08
|
[
"MIT"
] | 47
|
2019-08-15T21:36:36.000Z
|
2022-03-18T23:44:59.000Z
|
test/integration/test_customers.py
|
wolever/netsuite-sdk-py
|
1b1c21e2a8a532fdbf54915e7e9d30b8b5fc2d08
|
[
"MIT"
] | 52
|
2019-06-17T09:43:04.000Z
|
2022-03-22T05:00:53.000Z
|
test/integration/test_customers.py
|
wolever/netsuite-sdk-py
|
1b1c21e2a8a532fdbf54915e7e9d30b8b5fc2d08
|
[
"MIT"
] | 55
|
2019-06-02T22:18:01.000Z
|
2022-03-29T07:20:31.000Z
|
import logging
import json
logger = logging.getLogger(__name__)
def test_get(nc):
data = nc.customers.get_all()
logger.debug('data = %s', data)
assert data, 'get all didnt work'
internal_id = data[0]['internalId']
data = nc.customers.get(internalId=internal_id)
logger.debug('data = %s', data)
assert data, f'No object with internalId {internal_id}'
def test_post(nc):
with open('./test/integration/data/customers/customer.json') as oj:
s = oj.read()
expr1 = json.loads(s)
logger.debug('expr1 = %s', expr1)
res = nc.customers.post(expr1)
logger.debug('res = %s', res)
assert res['externalId'] == expr1['externalId'], 'External ID does not match'
assert res['type'] == 'customer', 'Type does not match'
expr2 = nc.customers.get(externalId=res['externalId'])
logger.debug('expr2 = %s', expr2)
assert expr2['externalId'] == expr1['externalId'], 'External ID does not match'
assert expr2['companyName'] == expr1['companyName'], 'companyName does not match'
| 34.733333
| 85
| 0.661228
|
350321ddde694f65058007c51cbc866eb1154634
| 2,051
|
py
|
Python
|
get_meaning.py
|
NamitS27/WordsAPI
|
e075882202a8b865ed30c47d5524ec785f7aebf0
|
[
"MIT"
] | null | null | null |
get_meaning.py
|
NamitS27/WordsAPI
|
e075882202a8b865ed30c47d5524ec785f7aebf0
|
[
"MIT"
] | null | null | null |
get_meaning.py
|
NamitS27/WordsAPI
|
e075882202a8b865ed30c47d5524ec785f7aebf0
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
import time
import json
def get_meaning(searchterm):
"""
Fetches the meaning of the word specified
:param searchterm: the word for which you want to fetch the meaning
:return: json object of the meaning
"""
# finds the input field by id in the webpage
sbox = driver.find_element_by_id('word')
sbox.clear() # clears the input field
sbox.send_keys(searchterm) # enters the word specified in the input field
# find the 'CALL THE API' button
submit = driver.find_element_by_id("getWord")
submit.click() # invoking the click event on the button
# waiting for the results to come
time.sleep(1)
# find the code tag in the webpage where the meaning of the word (result) is present
code = driver.find_element_by_tag_name("code")
# condition if the meaning is not found
if code.text == "No results for that word.":
return {
'word':searchterm
}
# converting the meaning of the word from string to json
meaning = json.loads(code.text)
# returning the meaning of word in json formart
return meaning
if __name__=="__main__":
# URL for the Words API webpage containing a free (try it) option where one can search meaning of the provided word
webpage = r"https://www.wordsapi.com/"
'''
Specifying the particular version of chrome driver for the particular version of the chrome installed in your system
You can download chrome from here > https://www.google.com/chrome/
You can download the specific chrome driver wrt to the versin of the chrome from here > https://chromedriver.chromium.org/downloads
'''
options = webdriver.ChromeOptions()
chrome_driver_binary = "chromedriver.exe" # change the path of the chrome driver (ignore if in the same folder)
driver = webdriver.Chrome(chrome_driver_binary)
# opening the url in chrome
driver.get(webpage)
# Example
word = "example" # changes this as per the requirements
print(get_meaning(word))
| 35.362069
| 135
| 0.703072
|
a9fb7a75f3873918d6bc58d6f2bd98e24555c079
| 3,836
|
py
|
Python
|
USB/python/test-usb-dio96H.py
|
org-arl/MCC_Linux_Drivers
|
337ec41121bbff836eb32741dadd90e044edcfc7
|
[
"JasPer-2.0"
] | null | null | null |
USB/python/test-usb-dio96H.py
|
org-arl/MCC_Linux_Drivers
|
337ec41121bbff836eb32741dadd90e044edcfc7
|
[
"JasPer-2.0"
] | null | null | null |
USB/python/test-usb-dio96H.py
|
org-arl/MCC_Linux_Drivers
|
337ec41121bbff836eb32741dadd90e044edcfc7
|
[
"JasPer-2.0"
] | null | null | null |
#! /usr/bin/python3
#
# Copyright (c) 2018 Warren J. Jasper <wjasper@ncsu.edu>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from usb_dioSS import * # Solid State Relay class
import sys
import time
def toContinue():
answer = input('Continue [yY]? ')
if (answer == 'y' or answer == 'Y'):
return True
else:
return False
def main():
# dev = usb_dio96H() # USB-DIO96H
# dev = usb_dio96H_50() # USB-DIO96H-50
dev = usb_1096HFS() # USB-1096HFS
while True :
print("\nUSB-DIO96H/USB_DIO96H-50/USB-1096HFS Testing")
print("----------------")
print("Hit 'b' to blink LED")
print("Hit 'c' to test counter")
print("Hit 'A' to read all inputs")
print("Hit 's' to get status")
print("Hit 'j' for information")
print("Hit 'n' to get serial number")
print("Hit 'd' to test digital I/O ")
print("Hit 'r' to reset the device.")
print("Hit 't' to test digital bit I/O")
print("Hit 'e' to exit")
ch = input('\n')
if ch == 'b':
dev.Blink()
elif ch == 'e':
dev.h.close()
exit(0)
elif ch == 'A':
value = dev.GetAll()
for i in range(4):
print('Port',i,'A =',hex(value[4*i+0]))
print('Port',i,'B =',hex(value[4*i+1]))
print('Port',i,'C Low =',hex(value[4*i+2]))
print('Port',i,'C High =',hex(value[4*i+3]))
elif ch == 'c':
print('Test the Counter. Connect pin P2C0 <--> CTR')
dev.CInit()
for i in range(20):
dev.DOut(dev.DIO_PORT1C_LOW, 1)
time.sleep(0.05)
dev.DOut(dev.DIO_PORT1C_LOW, 0)
time.sleep(0.05)
print('Counter =',dev.CIn())
elif ch == 'd':
print('Testing Digital I/O')
print('Connect pins P1A0 through P1A7 <==> P1B0 through P1B7')
print('Connect pins P1C0 through P1C3 <==> P1C4 through P1C7')
while True:
value = int(input('Enter a byte number [0-0xff]: '),16)
dev.DOut(dev.DIO_PORT0A, value)
value2 = dev.DIn(dev.DIO_PORT0B)
print('The number you entered =',hex(value2))
value = int(input('Enter a nibble [0-0xf]: '),16)
dev.DOut(dev.DIO_PORT0C_LOW, value)
value2 = dev.DIn(dev.DIO_PORT0C_HI)
print('The number you entered =',hex(value2))
if (toContinue() != True):
break
elif ch == 't':
print('Testing Digital Bit I/O')
print('Connect pins P1A0 through P1A7 <==> P1B0 through P1B7')
while True:
pin = int(input('Enter a pin [0-7]: '))
bit_value = int(input('Enter a bit value [0|1]: '))
dev.DBitOut(dev.DIO_PORT0A, pin, bit_value)
value = dev.DBitIn(dev.DIO_PORT0B, pin)
print('The value you read is',value)
if toContinue() != True:
break
elif ch == 'n':
print("Serial No: %s" % dev.h.get_serial_number_string())
elif ch == 'j':
print("Manufacturer: %s" % dev.h.get_manufacturer_string())
print("Product: %s" % dev.h.get_product_string())
print("Serial No: %s" % dev.h.get_serial_number_string())
elif ch == 's':
status = dev.Status()
print('Status =',hex(status))
if __name__ == "__main__":
main()
| 34.872727
| 78
| 0.606361
|
7e8fcf5f3d1a2302a3b9bb9cfeaab77f22e8480e
| 19,921
|
py
|
Python
|
scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py
|
opoplawski/scipy
|
582d59caabb4a2a6fcdd06b512dcd14daf7ca6b2
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py
|
opoplawski/scipy
|
582d59caabb4a2a6fcdd06b512dcd14daf7ca6b2
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py
|
opoplawski/scipy
|
582d59caabb4a2a6fcdd06b512dcd14daf7ca6b2
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, print_function, absolute_import
__usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import warnings
import numpy as np
from numpy.testing import assert_allclose, \
assert_array_almost_equal_nulp, TestCase, run_module_suite, dec, \
assert_raises, verbose, assert_equal
from numpy import array, finfo, argsort, dot, round, conj, random
from scipy.linalg import eig, eigh
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix, isspmatrix
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \
ArpackNoConvergence
from scipy.linalg import svd, hilbert
# eigs() and eigsh() are called many times, so apply a filter for the warnings
# they generate here.
_eigs_warn_msg = "Single-precision types in `eigs` and `eighs`"
def setup_module():
warnings.filterwarnings("ignore", message=_eigs_warn_msg)
def teardown_module():
warnings.filterwarnings("default", message=_eigs_warn_msg)
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
return tol, rtol, atol
def generate_matrix(N, complex=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N,N))
if complex:
M = M + 1j * np.random.random((N,N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i,j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.where(i == j)
j[ind] = (j[ind] + 1) % N
M[i,j] = 0
M[j,i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i,j] = 0
return M
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eval, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eval, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eval - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eval - sigma)
+ 1. / (eval - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eval - sigma)
- 1. / (eval - np.conj(sigma)))
elif mode == 'cayley':
reval = (eval + sigma) / (eval - sigma)
elif mode == 'buckling':
reval = eval / (eval - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ.lower())
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval_a = exact_eval
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype)
# solve
if general:
try:
eval, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eval, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eval, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eval, typ, k, which,
sigma, OPpart, mode)
eval_a = eval
eval = eval[ind]
evec = evec[:,ind]
# check eigenvalues
assert_allclose_cc(eval, exact_eval, rtol=rtol, atol=atol, err_msg=err)
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eval * np.dot(b, evec)
else:
RHS = eval * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<%s>" % self.name
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, complex=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
def test_symmetric_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
yield (eval_evec, symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
yield (eval_evec, symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
yield (eval_evec, symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
yield (eval_evec, symmetric, d, typ, k, "LM", v0, sigma)
def test_standard_nonsymmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, complex=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case")
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_matrix(np.zeros((2, 2)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# XXX: this test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
#----------------------------------------------------------------------
# sparse SVD tests
def sorted_svd(m, k, which='LM'):
# Compute svd of a dense matrix m, and return singular vectors/values
# sorted.
if isspmatrix(m):
m = m.todense()
u, s, vh = svd(m)
if which == 'LM':
ii = np.argsort(s)[-k:]
elif which == 'SM':
ii = np.argsort(s)[:k]
else:
raise ValueError("unknown which=%r" % (which,))
return u[:, ii], s[ii], vh[ii]
def svd_estimate(u, s, vh):
return np.dot(u, np.dot(np.diag(s), vh))
def test_svd_simple_real():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], np.float)
y = np.array([[1, 2, 3, 8],
[3, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], np.float)
z = csc_matrix(x)
for m in [x.T, x, y, z, z.T]:
for k in range(1, min(m.shape)):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_simple_complex():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1 + 1j, 0, 2],
[0, 0, 1]], np.complex)
y = np.array([[1, 2, 3, 8 + 5j],
[3 - 2j, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], np.complex)
z = csc_matrix(x)
for m in [x, x.T.conjugate(), x.T, y, y.conjugate(), z, z.T]:
for k in range(1, min(m.shape) - 1):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_maxiter():
# check that maxiter works as expected
x = hilbert(6)
# ARPACK shouldn't converge on such an ill-conditioned matrix with just
# one iteration
assert_raises(ArpackNoConvergence, svds, x, 1, maxiter=1)
# but 100 iterations should be more than enough
u, s, vt = svds(x, 1, maxiter=100)
assert_allclose(s, [1.7], atol=0.5)
def test_svd_return():
# check that the return_singular_vectors parameter works as expected
x = hilbert(6)
_, s, _ = sorted_svd(x, 2)
ss = svds(x, 2, return_singular_vectors=False)
assert_allclose(s, ss)
def test_svd_which():
# check that the which parameter works as expected
x = hilbert(6)
for which in ['LM', 'SM']:
_, s, _ = sorted_svd(x, 2, which=which)
ss = svds(x, 2, which=which, return_singular_vectors=False)
ss.sort()
assert_allclose(s, ss, atol=np.sqrt(1e-15))
def test_svd_v0():
# check that the v0 parameter works as expected
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], float)
u, s, vh = svds(x, 1)
u2, s2, vh2 = svds(x, 1, v0=u[:,0])
assert_allclose(s, s2, atol=np.sqrt(1e-15))
if __name__ == "__main__":
run_module_suite()
| 31.670906
| 78
| 0.538778
|
a930f5b0e874f959fb9ddcd784a17f560fcb192c
| 1,140
|
py
|
Python
|
aliyun-python-sdk-afs/aliyunsdkafs/request/v20180112/DescribeConfigNameRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-afs/aliyunsdkafs/request/v20180112/DescribeConfigNameRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-afs/aliyunsdkafs/request/v20180112/DescribeConfigNameRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeConfigNameRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'afs', '2018-01-12', 'DescribeConfigName','afs')
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
| 38
| 77
| 0.766667
|
313fdc83513b30f45dc5efdeae2e9d25d8fc40d3
| 30,822
|
py
|
Python
|
tests/regressiontests/backends/tests.py
|
kezabelle/django
|
138de533ff677b470a1e7b4b6ff084a5b7a7444b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/regressiontests/backends/tests.py
|
kezabelle/django
|
138de533ff677b470a1e7b4b6ff084a5b7a7444b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/regressiontests/backends/tests.py
|
kezabelle/django
|
138de533ff677b470a1e7b4b6ff084a5b7a7444b
|
[
"BSD-3-Clause"
] | 1
|
2021-09-14T06:09:11.000Z
|
2021-09-14T06:09:11.000Z
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import absolute_import, unicode_literals
import datetime
import threading
from django.conf import settings
from django.core.management.color import no_style
from django.core.exceptions import ImproperlyConfigured
from django.db import (backend, connection, connections, DEFAULT_DB_ALIAS,
IntegrityError, transaction)
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.models import fields, Sum, Avg, Variance, StdDev
from django.db.utils import ConnectionHandler, DatabaseError, load_backend
from django.test import (TestCase, skipUnlessDBFeature, skipIfDBFeature,
TransactionTestCase)
from django.test.utils import override_settings, str_prefix
from django.utils import six
from django.utils.six.moves import xrange
from django.utils import unittest
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
class OracleChecks(unittest.TestCase):
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle quote_name semantics")
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
convert_unicode = backend.convert_unicode
cursor = connection.cursor()
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!'),])
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
cursor = connection.cursor()
var = cursor.var(backend.Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
c = connection.cursor()
c.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join([six.text_type(x) for x in xrange(4000)])
c.execute('INSERT INTO ltext VALUES (%s)',[long_str])
c.execute('SELECT text FROM ltext')
row = c.fetchone()
self.assertEqual(long_str, row[0].read())
c.execute('DROP TABLE ltext')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.cursor() # Ensure the connection is initialized.
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
c = connection.cursor()
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
c.execute(query)
self.assertEqual(c.fetchone()[0], 1)
class MySQLTests(TestCase):
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_autoincrement(self):
"""
Check that auto_increment fields are reset correctly by sql_flush().
Before MySQL version 5.0.13 TRUNCATE did not do auto_increment reset.
Refs #16961.
"""
statements = connection.ops.sql_flush(no_style(),
tables=['test'],
sequences=[{
'table': 'test',
'col': 'somecol',
}])
found_reset = False
for sql in statements:
found_reset = found_reset or 'ALTER TABLE' in sql
if connection.mysql_version < (5,0,13):
self.assertTrue(found_reset)
else:
self.assertFalse(found_reset)
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_server_version_connections(self):
connection.close()
connection.mysql_version
self.assertTrue(connection.connection is None)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.datetime(2010, 1, 1, 0, 0)])
def test_django_extract(self):
"""
Test the custom ``django_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_debug_sql(self):
list(models.Tag.objects.filter(name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Tag._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
tags = models.Tag.objects.extra(select={'föö': 1})
sql, params = tags.query.sql_with_params()
cursor = tags.query.get_compiler('default').execute_sql(None)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertTrue(isinstance(last_sql, six.text_type))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1,2,3),])
self.assertRaises(Exception, cursor.executemany, query, [(1,),])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertTrue(obj.pk > 10)
class PostgresVersionTest(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 8.3 beta4", 80300)
self.assert_parses("PostgreSQL 8.3", 80300)
self.assert_parses("EnterpriseDB 8.3", 80300)
self.assert_parses("PostgreSQL 8.3.6", 80306)
self.assert_parses("PostgreSQL 8.4beta1", 80400)
self.assert_parses("PostgreSQL 8.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 80301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 8.3"]
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 80300)
class PostgresNewConnectionTest(TestCase):
"""
#17062: PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back.
"""
@unittest.skipUnless(
connection.vendor == 'postgresql' and connection.isolation_level > 0,
"This test applies only to PostgreSQL without autocommit")
def test_connect_and_rollback(self):
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.enter_transaction_management()
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
try:
new_connection.close()
except DatabaseError:
pass
# Unfortunately with sqlite3 the in-memory test database cannot be
# closed, and so it cannot be re-opened during testing, and so we
# sadly disable this test for now.
class ConnectionCreatedSignalTest(TestCase):
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
cursor = connection.cursor()
self.assertTrue(data["connection"].connection is connection.connection)
connection_created.disconnect(receiver)
data.clear()
cursor = connection.cursor()
self.assertTrue(data == {})
class EscapingChecks(TestCase):
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is a sqlite-specific issue")
def test_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
response = cursor.execute(
"select strftime('%%s', date('now'))").fetchall()[0][0]
self.assertNotEqual(response, None)
# response should be an non-zero integer
self.assertTrue(int(response))
class SqlliteAggregationTests(TestCase):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
@unittest.skipUnless(connection.vendor == 'sqlite',
"No need to check SQLite aggregation semantics")
def test_aggregation(self):
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
class BackendTestCase(TestCase):
def create_squares_with_executemany(self, args):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
cursor.executemany(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i**2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i**2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i**2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
cursor = connection.cursor()
except backend.Database.DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.commit_manually():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
finally:
transaction.rollback()
class ThreadTests(TestCase):
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
connections_set = set()
connection.cursor()
connections_set.add(connection)
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_set.add(connection)
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set([conn.connection for conn in connections_set])),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_set:
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
connections_set = set()
for conn in connections.all():
connections_set.add(conn)
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_set.add(conn)
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_set), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_set:
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key.
"""
@skipIfDBFeature('allows_primary_key_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
| 42.049113
| 161
| 0.636364
|
30a591f29ebc30dfb2ed05f9bb735a7332d4c6fd
| 8,617
|
py
|
Python
|
ocrd_wrap/skimage_denoise.py
|
kba/ocrd_wrap
|
6d92c02e968a8622b1918fc71e4469c6413a8f9b
|
[
"MIT"
] | null | null | null |
ocrd_wrap/skimage_denoise.py
|
kba/ocrd_wrap
|
6d92c02e968a8622b1918fc71e4469c6413a8f9b
|
[
"MIT"
] | null | null | null |
ocrd_wrap/skimage_denoise.py
|
kba/ocrd_wrap
|
6d92c02e968a8622b1918fc71e4469c6413a8f9b
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import os.path
from PIL import Image
import numpy as np
from skimage.morphology import (
remove_small_objects,
remove_small_holes
)
from ocrd import Processor
from ocrd_utils import (
getLogger,
make_file_id,
assert_file_grp_cardinality,
MIMETYPE_PAGE
)
from ocrd_modelfactory import page_from_file
from ocrd_models.ocrd_page import (
LabelType, LabelsType,
MetadataItemType,
AlternativeImageType,
to_xml
)
from .config import OCRD_TOOL
TOOL = 'ocrd-skimage-denoise'
LOG = getLogger('processor.SkimageDenoise')
class SkimageDenoise(Processor):
def __init__(self, *args, **kwargs):
kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]
kwargs['version'] = OCRD_TOOL['version']
super(SkimageDenoise, self).__init__(*args, **kwargs)
def process(self):
"""Performs binary denoising of segment or page images with scikit-image on the workspace.
Open and deserialize PAGE input files and their respective images,
then iterate over the element hierarchy down to the requested
``level-of-operation`` in the element hierarchy.
For each segment element, retrieve a segment image according to
the layout annotation (from an existing AlternativeImage, or by
cropping via coordinates into the higher-level image, and -
when applicable - deskewing), in binarized form.
Next, denoise the image by removing too small connected components
with skimage.
Then write the new image to the workspace along with the output fileGrp,
and using a file ID with suffix ``.IMG-DEN`` with further identification
of the input element.
Produce a new PAGE output file by serialising the resulting hierarchy.
"""
oplevel = self.parameter['level-of-operation']
assert_file_grp_cardinality(self.input_file_grp, 1)
assert_file_grp_cardinality(self.output_file_grp, 1)
for (n, input_file) in enumerate(self.input_files):
file_id = make_file_id(input_file, self.output_file_grp)
page_id = input_file.pageId or input_file.ID
LOG.info("INPUT FILE %i / %s", n, page_id)
pcgts = page_from_file(self.workspace.download_file(input_file))
page = pcgts.get_Page()
metadata = pcgts.get_Metadata() # ensured by from_file()
metadata.add_MetadataItem(
MetadataItemType(type_="processingStep",
name=self.ocrd_tool['steps'][0],
value=TOOL,
Labels=[LabelsType(
externalModel="ocrd-tool",
externalId="parameters",
Label=[LabelType(type_=name,
value=self.parameter[name])
for name in self.parameter.keys()])]))
for page in [page]:
page_image, page_coords, page_image_info = self.workspace.image_from_page(
page, page_id, feature_selector='binarized')
if self.parameter['dpi'] > 0:
dpi = self.parameter['dpi']
LOG.info("Page '%s' images will use %d DPI from parameter override", page_id, dpi)
elif page_image_info.resolution != 1:
dpi = page_image_info.resolution
if page_image_info.resolutionUnit == 'cm':
dpi = round(dpi * 2.54)
LOG.info("Page '%s' images will use %d DPI from image meta-data", page_id, dpi)
else:
dpi = 300
LOG.info("Page '%s' images will use 300 DPI from fall-back", page_id)
maxsize = self.parameter['maxsize'] # in pt
maxsize *= dpi/72 # in px
#maxsize **= 2 # area
if oplevel == 'page':
self._process_segment(page, page_image, page_coords, maxsize,
"page '%s'" % page_id, input_file.pageId,
file_id + '.IMG-DEN')
continue
regions = page.get_AllRegions(classes=['Text'])
if not regions:
LOG.warning("Page '%s' contains no text regions", page_id)
for region in regions:
region_image, region_coords = self.workspace.image_from_segment(
region, page_image, page_coords, feature_selector='binarized')
if oplevel == 'region':
self._process_segment(region, region_image, region_coords, maxsize,
"region '%s'" % region.id, None,
file_id + '.IMG-DEN_' + region.id)
continue
lines = region.get_TextLine()
if not lines:
LOG.warning("Region '%s' contains no text lines", region.id)
for line in lines:
line_image, line_coords = self.workspace.image_from_segment(
line, region_image, region_coords, feature_selector='binarized')
if oplevel == 'line':
self._process_segment(line, line_image, line_coords, maxsize,
"line '%s'" % line.id, None,
file_id + '.IMG-DEN_' + line.id)
continue
words = line.get_Word()
if not words:
LOG.warning("Line '%s' contains no words", line.id)
for word in words:
word_image, word_coords = self.workspace.image_from_segment(
word, line_image, line_coords, feature_selector='binarized')
if oplevel == 'word':
self._process_segment(word, word_image, word_coords, maxsize,
"word '%s'" % word.id, None,
file_id + '.IMG-DEN_' + word.id)
continue
glyphs = word.get_Glyph()
if not glyphs:
LOG.warning("Word '%s' contains no glyphs", word.id)
for glyph in glyphs:
glyph_image, glyph_coords = self.workspace.image_from_segment(
glyph, word_image, word_coords, feature_selector='binarized')
self._process_segment(glyph, glyph_image, glyph_coords, maxsize,
"glyph '%s'" % glyph.id, None,
file_id + '.IMG-DEN_' + glyph.id)
pcgts.set_pcGtsId(file_id)
self.workspace.add_file(
ID=file_id,
file_grp=self.output_file_grp,
pageId=input_file.pageId,
mimetype=MIMETYPE_PAGE,
local_filename=os.path.join(self.output_file_grp,
file_id + '.xml'),
content=to_xml(pcgts))
def _process_segment(self, segment, image, coords, maxsize, where, page_id, file_id):
features = coords['features'] # features already applied to image
features += ',despeckled'
array = np.array(image)
# suppress bg specks in fg (holes in binary-inverted)
remove_small_objects(array, min_size=maxsize, in_place=True)
# suppress fg specks in bg (blobs in binary-inverted)
remove_small_holes(array, area_threshold=maxsize, in_place=True)
image = Image.fromarray(array)
# annotate results
file_path = self.workspace.save_image_file(
image,
file_id,
file_grp=self.output_file_grp,
page_id=page_id)
segment.add_AlternativeImage(AlternativeImageType(
filename=file_path, comments=features))
LOG.debug("Despeckled image for %s saved as '%s'", where, file_path)
| 49.24
| 102
| 0.525357
|
e58385fb835ed9c0152c4ca0775a294f61634b58
| 4,706
|
py
|
Python
|
util/data_processing.py
|
tonyqtian/tagpredictor
|
595734bfa80ac213268d36be84f082034ff4f014
|
[
"MIT"
] | 1
|
2017-11-17T00:18:14.000Z
|
2017-11-17T00:18:14.000Z
|
util/data_processing.py
|
tonyqtian/tagpredictor
|
595734bfa80ac213268d36be84f082034ff4f014
|
[
"MIT"
] | null | null | null |
util/data_processing.py
|
tonyqtian/tagpredictor
|
595734bfa80ac213268d36be84f082034ff4f014
|
[
"MIT"
] | null | null | null |
'''
Created on Mar 17, 2017
@author: tonyq
'''
import pandas as pd
import numpy as np
import re, sys
from bs4 import BeautifulSoup
import logging
from keras.preprocessing.sequence import pad_sequences
from tqdm._tqdm import tqdm
from nltk.tokenize import word_tokenize
from numpy import array, zeros
import operator
logger = logging.getLogger(__name__)
uri_re = r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))'
def stripTagsAndUris(x):
if x:
# BeautifulSoup on content
soup = BeautifulSoup(x, "html.parser")
# Stripping all <code> tags with their content if any
if soup.code:
soup.code.decompose()
# Get all the text out of the html
text = soup.get_text()
# Returning text stripping out all uris
return re.sub(uri_re, "", text)
else:
return ""
def get_words(text):
# word_split = re.compile('[^a-zA-Z0-9_\\+\\-]')
# return [word.strip().lower() for word in word_split.split(text)]
text = text.replace("\\", " ")
return word_tokenize(text)
def get_pdTable(path, notag=False):
logger.info(' Processing pandas csv ')
pdtable = pd.read_csv(path)
if notag:
return pdtable.id, pdtable.title, pdtable.content
else:
return pdtable.id, pdtable.title, pdtable.content, pdtable.tags
def tableMerge(tableList):
return [' '.join(str1) for str1 in zip(*tableList)]
def tokenizeIt(table, clean=False):
tokenizedTable = []
maxLen = 0
for content in tqdm(table, file=sys.stdout):
if clean:
text = stripTagsAndUris(content)
text = get_words(text)
tokenizedTable.append(text)
if len(text) > maxLen:
maxLen = len(text)
else:
text = content.split(' ')
tokenizedTable.append(text)
if len(text) > maxLen:
maxLen = len(text)
return tokenizedTable, maxLen
def createVocab(tableList, min_count=1, reservedList=['<pad>', '<EOF>', '<unk>']):
logger.info(' Creating vocabulary ')
contentList = []
for list1 in tableList:
contentList.extend(list1)
wdFrq = {}
total_words = 0
for line in contentList:
for wd in line:
try:
wdFrq[wd] += 1
except KeyError:
wdFrq[wd] = 1
total_words += 1
logger.info(' %i total words, %i unique words ' % (total_words, len(wdFrq)))
sorted_word_freqs = sorted(wdFrq.items(), key=operator.itemgetter(1), reverse=True)
vocab_size = 0
for _, freq in sorted_word_freqs:
if freq >= min_count:
vocab_size += 1
vocabDict = {}
vocabReverseDict = []
idx = 0
for item1 in reservedList:
vocabDict[item1] = idx
vocabReverseDict.append(item1)
idx += 1
for word, _ in sorted_word_freqs[:vocab_size]:
vocabDict[word] = idx
vocabReverseDict.append(word)
idx += 1
logger.info(' vocab size %i ' % len(vocabReverseDict))
return vocabDict, vocabReverseDict
def word2num(contentTable, vocab, unk, maxLen, padding=None, eof=None):
unk_hit = 0
totalword = 0
data = []
for line in contentTable:
w2num = []
for word in line:
if word in vocab:
w2num.append(vocab[word])
else:
if not type(unk) is type(None):
w2num.append(vocab[unk])
unk_hit += 1
totalword += 1
if not type(eof) is type(None):
w2num.append(vocab[eof])
data.append(w2num)
logger.info(' total %i tokens processed, %i unk hit ' % (totalword, unk_hit))
# pad to np array
if not type(padding) is type(None):
logger.info(' padding data to width %d by %s padding' % (maxLen, padding))
np_ary = pad_sequences(data, maxlen=maxLen, padding=padding)
else:
np_ary = array(data)
return np_ary
def to_categorical2D(y, nb_classes=None):
if not nb_classes:
nb_classes = y.max()
return (np.arange(nb_classes) == y[:,:,None]).astype(int)
def to_categoricalAll(y, nb_classes):
categorical = zeros((len(y),nb_classes))
line_idx = 0
for line in y:
for elem in line:
categorical[line_idx][elem] = 1
line_idx += 1
return categorical
def categorical_toary(y, round01=False):
(length, nb_classes) = y.shape
if round01:
y = np.around(y)
y_ary = []
for i in range(length):
y_ary.append(np.argwhere(y[i,:] == 1).ravel().tolist())
return y_ary
def prob_top_n(y, top=5):
(length, nb_classes) = y.shape
y_ary = []
for i in range(length):
idx_prob = list(zip(list(range(nb_classes)), y[i,:]))
sorted_idx_prob = sorted(idx_prob, key=operator.itemgetter(1), reverse=True)[:top]
idx_round = np.around(sorted_idx_prob).astype(int)
idx_pos = []
for (idx, prob) in idx_round:
if prob == 1:
idx_pos.append(idx)
y_ary.append(idx_pos)
return y_ary
| 28.179641
| 195
| 0.645771
|
8e48b92f7a0805a175511fefe3b07c3e9e6087a2
| 4,776
|
py
|
Python
|
release/src-rt-6.x.4708/router/samba3/source4/torture/drs/python/drs_base.py
|
zaion520/ATtomato
|
4d48bb79f8d147f89a568cf18da9e0edc41f93fb
|
[
"FSFAP"
] | 2
|
2019-01-13T09:16:31.000Z
|
2019-02-15T03:30:28.000Z
|
release/src-rt-6.x.4708/router/samba3/source4/torture/drs/python/drs_base.py
|
zaion520/ATtomato
|
4d48bb79f8d147f89a568cf18da9e0edc41f93fb
|
[
"FSFAP"
] | null | null | null |
release/src-rt-6.x.4708/router/samba3/source4/torture/drs/python/drs_base.py
|
zaion520/ATtomato
|
4d48bb79f8d147f89a568cf18da9e0edc41f93fb
|
[
"FSFAP"
] | 2
|
2020-03-08T01:58:25.000Z
|
2020-12-20T10:34:54.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Unix SMB/CIFS implementation.
# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import time
import os
sys.path.insert(0, "bin/python")
import samba
samba.ensure_external_module("testtools", "testtools")
samba.ensure_external_module("subunit", "subunit/python")
from ldb import (
SCOPE_BASE,
Message,
FLAG_MOD_REPLACE,
)
import samba.tests
class DrsBaseTestCase(samba.tests.BlackboxTestCase):
"""Base class implementation for all DRS python tests.
It is intended to provide common initialization and
and functionality used by all DRS tests in drs/python
test package. For instance, DC1 and DC2 are always used
to pass URLs for DCs to test against"""
def setUp(self):
super(DrsBaseTestCase, self).setUp()
# connect to DCs
url_dc = samba.tests.env_get_var_value("DC1")
(self.ldb_dc1, self.info_dc1) = samba.tests.connect_samdb_ex(url_dc,
ldap_only=True)
url_dc = samba.tests.env_get_var_value("DC2")
(self.ldb_dc2, self.info_dc2) = samba.tests.connect_samdb_ex(url_dc,
ldap_only=True)
# cache some of RootDSE props
self.schema_dn = self.info_dc1["schemaNamingContext"][0]
self.domain_dn = self.info_dc1["defaultNamingContext"][0]
self.config_dn = self.info_dc1["configurationNamingContext"][0]
self.forest_level = int(self.info_dc1["forestFunctionality"][0])
# we will need DCs DNS names for 'samba-tool drs' command
self.dnsname_dc1 = self.info_dc1["dnsHostName"][0]
self.dnsname_dc2 = self.info_dc2["dnsHostName"][0]
def tearDown(self):
super(DrsBaseTestCase, self).tearDown()
def _GUID_string(self, guid):
return self.ldb_dc1.schema_format_value("objectGUID", guid)
def _ldap_schemaUpdateNow(self, sam_db):
rec = {"dn": "",
"schemaUpdateNow": "1"}
m = Message.from_dict(sam_db, rec, FLAG_MOD_REPLACE)
sam_db.modify(m)
def _deleted_objects_dn(self, sam_ldb):
wkdn = "<WKGUID=18E2EA80684F11D2B9AA00C04F79F805,%s>" % self.domain_dn
res = sam_ldb.search(base=wkdn,
scope=SCOPE_BASE,
controls=["show_deleted:1"])
self.assertEquals(len(res), 1)
return str(res[0]["dn"])
def _make_obj_name(self, prefix):
return prefix + time.strftime("%s", time.gmtime())
def _samba_tool_cmdline(self, drs_command):
# find out where is net command
samba_tool_cmd = os.path.abspath("./bin/samba-tool")
# make command line credentials string
creds = self.get_credentials()
cmdline_auth = "-U%s/%s%%%s" % (creds.get_domain(),
creds.get_username(), creds.get_password())
# bin/samba-tool drs <drs_command> <cmdline_auth>
return "%s drs %s %s" % (samba_tool_cmd, drs_command, cmdline_auth)
def _net_drs_replicate(self, DC, fromDC, nc_dn=None, forced=True):
if nc_dn is None:
nc_dn = self.domain_dn
# make base command line
samba_tool_cmdline = self._samba_tool_cmdline("replicate")
if forced:
samba_tool_cmdline += " --sync-forced"
# bin/samba-tool drs replicate <Dest_DC_NAME> <Src_DC_NAME> <Naming Context>
cmd_line = "%s %s %s %s" % (samba_tool_cmdline, DC, fromDC, nc_dn)
return self.check_output(cmd_line)
def _enable_inbound_repl(self, DC):
# make base command line
samba_tool_cmd = self._samba_tool_cmdline("options")
# disable replication
self.check_run("%s %s --dsa-option=-DISABLE_INBOUND_REPL" %(samba_tool_cmd, DC))
def _disable_inbound_repl(self, DC):
# make base command line
samba_tool_cmd = self._samba_tool_cmdline("options")
# disable replication
self.check_run("%s %s --dsa-option=+DISABLE_INBOUND_REPL" %(samba_tool_cmd, DC))
| 38.829268
| 88
| 0.647613
|
c0c7ee9a0323b6ec3f4252fdfe897de4d50ecc85
| 1,726
|
py
|
Python
|
examples/figures.py
|
pbmanis/cnmodel
|
eee593c673752c19137658d5b9a381ea9ad4580f
|
[
"BSD-3-Clause"
] | 5
|
2017-07-26T21:46:14.000Z
|
2020-11-27T07:53:14.000Z
|
examples/figures.py
|
pbmanis/cnmodel
|
eee593c673752c19137658d5b9a381ea9ad4580f
|
[
"BSD-3-Clause"
] | 12
|
2017-07-26T07:16:16.000Z
|
2021-07-14T13:41:37.000Z
|
examples/figures.py
|
pbmanis/cnmodel
|
eee593c673752c19137658d5b9a381ea9ad4580f
|
[
"BSD-3-Clause"
] | 10
|
2017-07-26T07:03:29.000Z
|
2021-06-23T15:52:37.000Z
|
from __future__ import print_function
"""
"""
import sys
import subprocess
if len(sys.argv) < 2: # if no argument, print helpful message
print("Plot selected figures from paper, Manis and Campagnola, Hearing Research. 2018")
print("Usage: figures.py [2a | 2b | 2c | 3 | 4 | 5 | 6a | 6d | 7]")
exit(1)
arg = sys.argv[1] # get argument, check that it is valid
if arg not in ['2a', '2b', '2c', '3', '4', '5', '6a', '6d', '7']:
print("Usage: figures.py [2a | 2b | 2c | 3 | 4 | 5 | 6a | 6d | 7]")
exit(1)
if arg == '2a':
proc = subprocess.Popen(['python', 'examples/test_mechanisms.py', 'klt'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
print (proc.stdout.read())
# ;;
# 2b)
# python examples/test_mechanisms.py kht
# ;;
# 2c)
# python examples/test_mechanisms.py ka
# ;;
# 3)
# python examples/toy_model.py
# ;;
# 4)
# python examples/test_synapses.py sgc bushy
# ;;
# 5)
# python examples/test_decorator.py
# ;;
# 6a)
# python examples/test_bushy_variation.py a
# ;;
# 6d)
# python examples/test_bushy_variation.py d
# ;;
#
# 7)
# while true; do
# echo "This figure may take hours to generate!"
# read -p "Are you sure you want to run the script?" yn
# case $yn in
# [Yy]* ) python examples/test_physiology.py; break;;
# [Nn]* ) exit;;
# * ) echo "Please answer yes or no.";;
# esac
# done
# ;;
#
| 27.83871
| 91
| 0.505214
|
4ebb9c9334d36b725e072f9ec0586c516d0c5abc
| 68
|
py
|
Python
|
pyxb/bundles/reqif/driver.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 123
|
2015-01-12T06:43:22.000Z
|
2022-03-20T18:06:46.000Z
|
pyxb/bundles/reqif/driver.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 103
|
2015-01-08T18:35:57.000Z
|
2022-01-18T01:44:14.000Z
|
pyxb/bundles/reqif/driver.py
|
eLBati/pyxb
|
14737c23a125fd12c954823ad64fc4497816fae3
|
[
"Apache-2.0"
] | 54
|
2015-02-15T17:12:00.000Z
|
2022-03-07T23:02:32.000Z
|
# -*- coding: utf-8 -*-
from pyxb.bundles.reqif.raw.driver import *
| 22.666667
| 43
| 0.661765
|
a8476f13aa7ec8aead3ddfc3532185a0782ed373
| 1,254
|
py
|
Python
|
updater/reports/ReportOrgsAbandoned.py
|
rashamalek/hubble
|
508cc0ce4ef60fb968df5e7252ee878bf83c929f
|
[
"MIT"
] | null | null | null |
updater/reports/ReportOrgsAbandoned.py
|
rashamalek/hubble
|
508cc0ce4ef60fb968df5e7252ee878bf83c929f
|
[
"MIT"
] | 1
|
2022-03-02T10:13:49.000Z
|
2022-03-02T10:13:49.000Z
|
updater/reports/ReportOrgsAbandoned.py
|
rashamalek/hubble
|
508cc0ce4ef60fb968df5e7252ee878bf83c929f
|
[
"MIT"
] | null | null | null |
from .ReportDaily import *
# Find the organizations that have not received a push for the longest time.
# Only look at organizations that have not received a push for at least one
# year. Only look at repositories that are still maintained (not archived!).
class ReportOrgsAbandoned(ReportDaily):
def name(self):
return "organizations-abandoned"
def updateDailyData(self):
self.detailedHeader, self.detailedData = self.parseData(self.executeQuery(self.query()))
if len(self.data) == 0:
self.header = ["date", "abandoned organizations"]
self.data.append([str(self.yesterday()), len(self.detailedData)])
self.truncateData(self.timeRangeTotal())
self.sortDataByDate()
def query(self):
query = '''
SELECT
users.login AS "organization",
DATE(MAX(pushes.created_at)) AS "last push"
FROM
repositories
JOIN users ON repositories.owner_id = users.id
JOIN pushes ON pushes.repository_id = repositories.id
WHERE
users.type = "organization"
AND repositories.maintained = 1 ''' + \
self.andExcludedEntities("users.login") + '''
GROUP BY
users.id
HAVING
CAST(MAX(pushes.created_at) AS DATE) < "''' + str(self.daysAgo(365)) + '''"
ORDER BY
MAX(pushes.created_at)
'''
return query
| 32.153846
| 90
| 0.707337
|
c6a2852d37e984a32f50323523da5363f7038d6d
| 225,669
|
py
|
Python
|
spyder/plugins/editor/widgets/codeeditor.py
|
skjerns/spyder
|
c130a2e2f8782d8a27886da100e70e1e47d903f5
|
[
"MIT"
] | null | null | null |
spyder/plugins/editor/widgets/codeeditor.py
|
skjerns/spyder
|
c130a2e2f8782d8a27886da100e70e1e47d903f5
|
[
"MIT"
] | null | null | null |
spyder/plugins/editor/widgets/codeeditor.py
|
skjerns/spyder
|
c130a2e2f8782d8a27886da100e70e1e47d903f5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Editor widget based on QtGui.QPlainTextEdit
"""
# TODO: Try to separate this module from spyder to create a self
# consistent editor module (Qt source code and shell widgets library)
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
from unicodedata import category
import logging
import functools
import os.path as osp
import re
import sre_constants
import sys
import textwrap
# Third party imports
from diff_match_patch import diff_match_patch
from IPython.core.inputtransformer2 import TransformerManager
from qtpy.compat import to_qvariant
from qtpy.QtCore import (QEvent, QRegExp, Qt, QTimer, QThread, QUrl, Signal,
Slot)
from qtpy.QtGui import (QColor, QCursor, QFont, QKeySequence, QPaintEvent,
QPainter, QMouseEvent, QTextCursor, QDesktopServices,
QKeyEvent, QTextDocument, QTextFormat, QTextOption,
QTextCharFormat, QTextLayout)
from qtpy.QtWidgets import (QApplication, QMenu, QMessageBox, QSplitter,
QScrollBar)
from spyder_kernels.utils.dochelpers import getobj
from three_merge import merge
# Local imports
from spyder.api.panel import Panel
from spyder.config.base import _, get_debug_level, running_under_pytest
from spyder.config.manager import CONF
from spyder.plugins.editor.api.decoration import TextDecoration
from spyder.plugins.editor.extensions import (CloseBracketsExtension,
CloseQuotesExtension,
DocstringWriterExtension,
QMenuOnlyForEnter,
EditorExtensionsManager,
SnippetsExtension)
from spyder.plugins.completion.providers.kite.widgets import KiteCallToAction
from spyder.plugins.completion.api import (CompletionRequestTypes,
TextDocumentSyncKind,
DiagnosticSeverity)
from spyder.plugins.editor.panels import (ClassFunctionDropdown,
DebuggerPanel, EdgeLine,
FoldingPanel, IndentationGuide,
LineNumberArea, PanelsManager,
ScrollFlagArea)
from spyder.plugins.editor.utils.editor import (TextHelper, BlockUserData,
get_file_language)
from spyder.plugins.editor.utils.debugger import DebuggerManager
from spyder.plugins.editor.utils.kill_ring import QtKillRing
from spyder.plugins.editor.utils.languages import ALL_LANGUAGES, CELL_LANGUAGES
from spyder.plugins.editor.panels.utils import (
merge_folding, collect_folding_regions)
from spyder.plugins.completion.decorators import (
request, handles, class_register)
from spyder.plugins.editor.widgets.codeeditor_widgets import GoToLineDialog
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.outlineexplorer.api import (OutlineExplorerData as OED,
is_cell_header)
from spyder.py3compat import PY2, to_text_string, is_string, is_text_string
from spyder.utils import encoding, sourcecode
from spyder.utils.clipboard_helper import CLIPBOARD_HELPER
from spyder.utils.icon_manager import ima
from spyder.utils import syntaxhighlighters as sh
from spyder.utils.palette import SpyderPalette, QStylePalette
from spyder.utils.qthelpers import (add_actions, create_action, file_uri,
mimedata2url, start_file)
from spyder.utils.vcs import get_git_remotes, remote_to_url
from spyder.utils.qstringhelpers import qstring_length
try:
import nbformat as nbformat
from nbconvert import PythonExporter as nbexporter
except Exception:
nbformat = None # analysis:ignore
logger = logging.getLogger(__name__)
# Regexp to detect noqa inline comments.
NOQA_INLINE_REGEXP = re.compile(r"#?noqa", re.IGNORECASE)
@class_register
class CodeEditor(TextEditBaseWidget):
"""Source Code Editor Widget based exclusively on Qt"""
LANGUAGES = {
'Python': (sh.PythonSH, '#'),
'IPython': (sh.IPythonSH, '#'),
'Cython': (sh.CythonSH, '#'),
'Fortran77': (sh.Fortran77SH, 'c'),
'Fortran': (sh.FortranSH, '!'),
'Idl': (sh.IdlSH, ';'),
'Diff': (sh.DiffSH, ''),
'GetText': (sh.GetTextSH, '#'),
'Nsis': (sh.NsisSH, '#'),
'Html': (sh.HtmlSH, ''),
'Yaml': (sh.YamlSH, '#'),
'Cpp': (sh.CppSH, '//'),
'OpenCL': (sh.OpenCLSH, '//'),
'Enaml': (sh.EnamlSH, '#'),
'Markdown': (sh.MarkdownSH, '#'),
# Every other language
'None': (sh.TextSH, ''),
}
TAB_ALWAYS_INDENTS = (
'py', 'pyw', 'python', 'ipy', 'c', 'cpp', 'cl', 'h', 'pyt', 'pyi'
)
# Timeout to update decorations (through a QTimer) when a position
# changed is detected in the vertical scrollbar or when releasing
# the up/down arrow keys.
UPDATE_DECORATIONS_TIMEOUT = 500 # milliseconds
# Timeouts (in milliseconds) to sychronize symbols and folding after
# linting results arrive, according to the number of lines in the file.
SYNC_SYMBOLS_AND_FOLDING_TIMEOUTS = {
# Lines: Timeout
500: 500,
1500: 1200,
2500: 3200,
6500: 4500
}
# Custom signal to be emitted upon completion of the editor's paintEvent
painted = Signal(QPaintEvent)
# To have these attrs when early viewportEvent's are triggered
edge_line = None
indent_guides = None
sig_breakpoints_changed = Signal()
sig_debug_stop = Signal((int,), ())
sig_debug_start = Signal()
sig_breakpoints_saved = Signal()
sig_filename_changed = Signal(str)
sig_bookmarks_changed = Signal()
go_to_definition = Signal(str, int, int)
sig_show_object_info = Signal(int)
sig_run_selection = Signal()
sig_run_cell_and_advance = Signal()
sig_run_cell = Signal()
sig_re_run_last_cell = Signal()
sig_debug_cell = Signal()
sig_cursor_position_changed = Signal(int, int)
sig_new_file = Signal(str)
sig_refresh_formatting = Signal(bool)
#: Signal emitted when the editor loses focus
sig_focus_changed = Signal()
#: Signal emitted when a key is pressed
sig_key_pressed = Signal(QKeyEvent)
#: Signal emitted when a key is released
sig_key_released = Signal(QKeyEvent)
#: Signal emitted when the alt key is pressed and the left button of the
# mouse is clicked
sig_alt_left_mouse_pressed = Signal(QMouseEvent)
#: Signal emitted when the alt key is pressed and the cursor moves over
# the editor
sig_alt_mouse_moved = Signal(QMouseEvent)
#: Signal emitted when the cursor leaves the editor
sig_leave_out = Signal()
#: Signal emitted when the flags need to be updated in the scrollflagarea
sig_flags_changed = Signal()
#: Signal emitted when the syntax color theme of the editor.
sig_theme_colors_changed = Signal(dict)
#: Signal emitted when a new text is set on the widget
new_text_set = Signal()
# -- LSP signals
#: Signal emitted when an LSP request is sent to the LSP manager
sig_perform_completion_request = Signal(str, str, dict)
#: Signal emitted when a response is received from the completion plugin
# For now it's only used on tests, but it could be used to track
# and profile completion diagnostics.
completions_response_signal = Signal(str, object)
#: Signal to display object information on the Help plugin
sig_display_object_info = Signal(str, bool)
#: Signal only used for tests
# TODO: Remove it!
sig_signature_invoked = Signal(dict)
#: Signal emitted when processing code analysis warnings is finished
sig_process_code_analysis = Signal()
# Used for testing. When the mouse moves with Ctrl/Cmd pressed and
# a URI is found, this signal is emitted
sig_uri_found = Signal(str)
sig_file_uri_preprocessed = Signal(str)
"""
This signal is emitted when the go to uri for a file has been
preprocessed.
Parameters
----------
fpath: str
The preprocessed file path.
"""
# Signal with the info about the current completion item documentation
# str: object name
# str: object signature/documentation
# bool: force showing the info
sig_show_completion_object_info = Signal(str, str, bool)
# Used to indicate if text was inserted into the editor
sig_text_was_inserted = Signal()
# Used to indicate that text will be inserted into the editor
sig_will_insert_text = Signal(str)
# Used to indicate that a text selection will be removed
sig_will_remove_selection = Signal(tuple, tuple)
# Used to indicate that text will be pasted
sig_will_paste_text = Signal(str)
# Used to indicate that an undo operation will take place
sig_undo = Signal()
# Used to indicate that an undo operation will take place
sig_redo = Signal()
# Used to start the status spinner in the editor
sig_start_operation_in_progress = Signal()
# Used to start the status spinner in the editor
sig_stop_operation_in_progress = Signal()
def __init__(self, parent=None):
TextEditBaseWidget.__init__(self, parent)
self.setFocusPolicy(Qt.StrongFocus)
# Projects
self.current_project_path = None
# Caret (text cursor)
self.setCursorWidth(CONF.get('main', 'cursor/width'))
self.text_helper = TextHelper(self)
self._panels = PanelsManager(self)
# Mouse moving timer / Hover hints handling
# See: mouseMoveEvent
self.tooltip_widget.sig_help_requested.connect(
self.show_object_info)
self.tooltip_widget.sig_completion_help_requested.connect(
self.show_completion_object_info)
self._last_point = None
self._last_hover_word = None
self._last_hover_cursor = None
self._timer_mouse_moving = QTimer(self)
self._timer_mouse_moving.setInterval(350)
self._timer_mouse_moving.setSingleShot(True)
self._timer_mouse_moving.timeout.connect(self._handle_hover)
# Typing keys / handling on the fly completions
# See: keyPressEvent
self._last_key_pressed_text = ''
self._last_pressed_key = None
self._timer_autocomplete = QTimer(self)
self._timer_autocomplete.setSingleShot(True)
self._timer_autocomplete.timeout.connect(self._handle_completions)
# Handle completions hints
self._completions_hint_idle = False
self._timer_completions_hint = QTimer(self)
self._timer_completions_hint.setSingleShot(True)
self._timer_completions_hint.timeout.connect(
self._set_completions_hint_idle)
self.completion_widget.sig_completion_hint.connect(
self.show_hint_for_completion)
# Request symbols and folding after a timeout.
# See: process_diagnostics
self._timer_sync_symbols_and_folding = QTimer(self)
self._timer_sync_symbols_and_folding.setSingleShot(True)
self._timer_sync_symbols_and_folding.timeout.connect(
self.sync_symbols_and_folding)
self.blockCountChanged.connect(
self.set_sync_symbols_and_folding_timeout)
# Goto uri
self._last_hover_pattern_key = None
self._last_hover_pattern_text = None
# 79-col edge line
self.edge_line = self.panels.register(EdgeLine(),
Panel.Position.FLOATING)
# indent guides
self.indent_guides = self.panels.register(IndentationGuide(),
Panel.Position.FLOATING)
# Blanks enabled
self.blanks_enabled = False
# Underline errors and warnings
self.underline_errors_enabled = False
# Scrolling past the end of the document
self.scrollpastend_enabled = False
self.background = QColor('white')
# Folding
self.panels.register(FoldingPanel())
# Debugger panel (Breakpoints)
self.debugger = DebuggerManager(self)
self.panels.register(DebuggerPanel())
# Update breakpoints if the number of lines in the file changes
self.blockCountChanged.connect(self.debugger.update_breakpoints)
# Line number area management
self.linenumberarea = self.panels.register(LineNumberArea())
# Class and Method/Function Dropdowns
self.classfuncdropdown = self.panels.register(
ClassFunctionDropdown(),
Panel.Position.TOP,
)
# Colors to be defined in _apply_highlighter_color_scheme()
# Currentcell color and current line color are defined in base.py
self.occurrence_color = None
self.ctrl_click_color = None
self.sideareas_color = None
self.matched_p_color = None
self.unmatched_p_color = None
self.normal_color = None
self.comment_color = None
# --- Syntax highlight entrypoint ---
#
# - if set, self.highlighter is responsible for
# - coloring raw text data inside editor on load
# - coloring text data when editor is cloned
# - updating document highlight on line edits
# - providing color palette (scheme) for the editor
# - providing data for Outliner
# - self.highlighter is not responsible for
# - background highlight for current line
# - background highlight for search / current line occurrences
self.highlighter_class = sh.TextSH
self.highlighter = None
ccs = 'Spyder'
if ccs not in sh.COLOR_SCHEME_NAMES:
ccs = sh.COLOR_SCHEME_NAMES[0]
self.color_scheme = ccs
self.highlight_current_line_enabled = False
# Vertical scrollbar
# This is required to avoid a "RuntimeError: no access to protected
# functions or signals for objects not created from Python" in
# Linux Ubuntu. See spyder-ide/spyder#5215.
self.setVerticalScrollBar(QScrollBar())
# Highlights and flag colors
self.warning_color = SpyderPalette.COLOR_WARN_2
self.error_color = SpyderPalette.COLOR_ERROR_1
self.todo_color = SpyderPalette.GROUP_9
self.breakpoint_color = SpyderPalette.ICON_3
self.occurrence_color = QColor(SpyderPalette.GROUP_2).lighter(160)
self.found_results_color = QColor(SpyderPalette.COLOR_OCCURRENCE_4)
# Scrollbar flag area
self.scrollflagarea = self.panels.register(ScrollFlagArea(),
Panel.Position.RIGHT)
self.panels.refresh()
self.document_id = id(self)
# Indicate occurrences of the selected word
self.cursorPositionChanged.connect(self.__cursor_position_changed)
self.__find_first_pos = None
self.language = None
self.supported_language = False
self.supported_cell_language = False
self.comment_string = None
self._kill_ring = QtKillRing(self)
# Block user data
self.blockCountChanged.connect(self.update_bookmarks)
# Highlight using Pygments highlighter timer
# ---------------------------------------------------------------------
# For files that use the PygmentsSH we parse the full file inside
# the highlighter in order to generate the correct coloring.
self.timer_syntax_highlight = QTimer(self)
self.timer_syntax_highlight.setSingleShot(True)
self.timer_syntax_highlight.timeout.connect(
self.run_pygments_highlighter)
# Mark occurrences timer
self.occurrence_highlighting = None
self.occurrence_timer = QTimer(self)
self.occurrence_timer.setSingleShot(True)
self.occurrence_timer.setInterval(1500)
self.occurrence_timer.timeout.connect(self.__mark_occurrences)
self.occurrences = []
# Update decorations
self.update_decorations_timer = QTimer(self)
self.update_decorations_timer.setSingleShot(True)
self.update_decorations_timer.setInterval(
self.UPDATE_DECORATIONS_TIMEOUT)
self.update_decorations_timer.timeout.connect(
self.update_decorations)
self.verticalScrollBar().valueChanged.connect(
lambda value: self.update_decorations_timer.start())
# Mark found results
self.textChanged.connect(self.__text_has_changed)
self.found_results = []
# Docstring
self.writer_docstring = DocstringWriterExtension(self)
# Context menu
self.gotodef_action = None
self.setup_context_menu()
# Tab key behavior
self.tab_indents = None
self.tab_mode = True # see CodeEditor.set_tab_mode
# Intelligent backspace mode
self.intelligent_backspace = True
# Automatic (on the fly) completions
self.automatic_completions = True
self.automatic_completions_after_chars = 3
self.automatic_completions_after_ms = 300
# Code Folding
self.code_folding = True
self.update_folding_thread = QThread()
# Completions hint
self.completions_hint = True
self.completions_hint_after_ms = 500
self.close_parentheses_enabled = True
self.close_quotes_enabled = False
self.add_colons_enabled = True
self.auto_unindent_enabled = True
# Autoformat on save
self.format_on_save = False
# Mouse tracking
self.setMouseTracking(True)
self.__cursor_changed = False
self._mouse_left_button_pressed = False
self.ctrl_click_color = QColor(Qt.blue)
self.bookmarks = self.get_bookmarks()
# Keyboard shortcuts
self.shortcuts = self.create_shortcuts()
# Paint event
self.__visible_blocks = [] # Visible blocks, update with repaint
self.painted.connect(self._draw_editor_cell_divider)
# Outline explorer
self.oe_proxy = None
# Line stripping
self.last_change_position = None
self.last_position = None
self.last_auto_indent = None
self.skip_rstrip = False
self.strip_trailing_spaces_on_modify = True
# Hover hints
self.hover_hints_enabled = None
# Language Server
self.filename = None
self.completions_available = False
self.text_version = 0
self.save_include_text = True
self.open_close_notifications = True
self.sync_mode = TextDocumentSyncKind.FULL
self.will_save_notify = False
self.will_save_until_notify = False
self.enable_hover = True
self.auto_completion_characters = []
self.resolve_completions_enabled = False
self.signature_completion_characters = []
self.go_to_definition_enabled = False
self.find_references_enabled = False
self.highlight_enabled = False
self.formatting_enabled = False
self.range_formatting_enabled = False
self.document_symbols_enabled = False
self.formatting_characters = []
self.completion_args = None
self.folding_supported = False
self.is_cloned = False
self.operation_in_progress = False
# Diagnostics
self.update_diagnostics_thread = QThread()
self._diagnostics = []
# Editor Extensions
self.editor_extensions = EditorExtensionsManager(self)
self.editor_extensions.add(CloseQuotesExtension())
self.editor_extensions.add(SnippetsExtension())
self.editor_extensions.add(CloseBracketsExtension())
# Text diffs across versions
self.differ = diff_match_patch()
self.previous_text = ''
self.patch = []
self.leading_whitespaces = {}
# re-use parent of completion_widget (usually the main window)
completion_parent = self.completion_widget.parent()
self.kite_call_to_action = KiteCallToAction(self, completion_parent)
# Some events should not be triggered during undo/redo
# such as line stripping
self.is_undoing = False
self.is_redoing = False
# --- Helper private methods
# ------------------------------------------------------------------------
# --- Hover/Hints
def _should_display_hover(self, point):
"""Check if a hover hint should be displayed:"""
if not self._mouse_left_button_pressed:
return (self.hover_hints_enabled and point
and self.get_word_at(point))
def _handle_hover(self):
"""Handle hover hint trigger after delay."""
self._timer_mouse_moving.stop()
pos = self._last_point
# These are textual characters but should not trigger a completion
# FIXME: update per language
ignore_chars = ['(', ')', '.']
if self._should_display_hover(pos):
key, pattern_text, cursor = self.get_pattern_at(pos)
text = self.get_word_at(pos)
if pattern_text:
ctrl_text = 'Cmd' if sys.platform == "darwin" else 'Ctrl'
if key in ['file']:
hint_text = ctrl_text + ' + ' + _('click to open file')
elif key in ['mail']:
hint_text = ctrl_text + ' + ' + _('click to send email')
elif key in ['url']:
hint_text = ctrl_text + ' + ' + _('click to open url')
else:
hint_text = ctrl_text + ' + ' + _('click to open')
hint_text = '<span> {} </span>'.format(hint_text)
self.show_tooltip(text=hint_text, at_point=pos)
return
cursor = self.cursorForPosition(pos)
cursor_offset = cursor.position()
line, col = cursor.blockNumber(), cursor.columnNumber()
self._last_point = pos
if text and self._last_hover_word != text:
if all(char not in text for char in ignore_chars):
self._last_hover_word = text
self.request_hover(line, col, cursor_offset)
else:
self.hide_tooltip()
elif not self.is_completion_widget_visible():
self.hide_tooltip()
def blockuserdata_list(self):
"""Get the list of all user data in document."""
block = self.document().firstBlock()
while block.isValid():
data = block.userData()
if data:
yield data
block = block.next()
def outlineexplorer_data_list(self):
"""Get the list of all user data in document."""
for data in self.blockuserdata_list():
if data.oedata:
yield data.oedata
# ---- Keyboard Shortcuts
def create_cursor_callback(self, attr):
"""Make a callback for cursor move event type, (e.g. "Start")"""
def cursor_move_event():
cursor = self.textCursor()
move_type = getattr(QTextCursor, attr)
cursor.movePosition(move_type)
self.setTextCursor(cursor)
return cursor_move_event
def create_shortcuts(self):
"""Create the local shortcuts for the CodeEditor."""
shortcut_context_name_callbacks = (
('editor', 'code completion', self.do_completion),
('editor', 'duplicate line down', self.duplicate_line_down),
('editor', 'duplicate line up', self.duplicate_line_up),
('editor', 'delete line', self.delete_line),
('editor', 'move line up', self.move_line_up),
('editor', 'move line down', self.move_line_down),
('editor', 'go to new line', self.go_to_new_line),
('editor', 'go to definition', self.go_to_definition_from_cursor),
('editor', 'toggle comment', self.toggle_comment),
('editor', 'blockcomment', self.blockcomment),
('editor', 'unblockcomment', self.unblockcomment),
('editor', 'transform to uppercase', self.transform_to_uppercase),
('editor', 'transform to lowercase', self.transform_to_lowercase),
('editor', 'indent', lambda: self.indent(force=True)),
('editor', 'unindent', lambda: self.unindent(force=True)),
('editor', 'start of line',
self.create_cursor_callback('StartOfLine')),
('editor', 'end of line',
self.create_cursor_callback('EndOfLine')),
('editor', 'previous line', self.create_cursor_callback('Up')),
('editor', 'next line', self.create_cursor_callback('Down')),
('editor', 'previous char', self.create_cursor_callback('Left')),
('editor', 'next char', self.create_cursor_callback('Right')),
('editor', 'previous word',
self.create_cursor_callback('PreviousWord')),
('editor', 'next word', self.create_cursor_callback('NextWord')),
('editor', 'kill to line end', self.kill_line_end),
('editor', 'kill to line start', self.kill_line_start),
('editor', 'yank', self._kill_ring.yank),
('editor', 'rotate kill ring', self._kill_ring.rotate),
('editor', 'kill previous word', self.kill_prev_word),
('editor', 'kill next word', self.kill_next_word),
('editor', 'start of document',
self.create_cursor_callback('Start')),
('editor', 'end of document',
self.create_cursor_callback('End')),
('editor', 'undo', self.undo),
('editor', 'redo', self.redo),
('editor', 'cut', self.cut),
('editor', 'copy', self.copy),
('editor', 'paste', self.paste),
('editor', 'delete', self.delete),
('editor', 'select all', self.selectAll),
('editor', 'docstring',
self.writer_docstring.write_docstring_for_shortcut),
('editor', 'autoformatting', self.format_document_or_range),
('array_builder', 'enter array inline', self.enter_array_inline),
('array_builder', 'enter array table', self.enter_array_table)
)
shortcuts = []
for context, name, callback in shortcut_context_name_callbacks:
shortcuts.append(
CONF.config_shortcut(
callback, context=context, name=name, parent=self))
return shortcuts
def get_shortcut_data(self):
"""
Returns shortcut data, a list of tuples (shortcut, text, default)
shortcut (QShortcut or QAction instance)
text (string): action/shortcut description
default (string): default key sequence
"""
return [sc.data for sc in self.shortcuts]
def closeEvent(self, event):
TextEditBaseWidget.closeEvent(self, event)
def get_document_id(self):
return self.document_id
def set_as_clone(self, editor):
"""Set as clone editor"""
self.setDocument(editor.document())
self.document_id = editor.get_document_id()
self.highlighter = editor.highlighter
self.eol_chars = editor.eol_chars
self._apply_highlighter_color_scheme()
# ---- Widget setup and options
def toggle_wrap_mode(self, enable):
"""Enable/disable wrap mode"""
self.set_wrap_mode('word' if enable else None)
def toggle_line_numbers(self, linenumbers=True, markers=False):
"""Enable/disable line numbers."""
self.linenumberarea.setup_margins(linenumbers, markers)
@property
def panels(self):
"""
Returns a reference to the
:class:`spyder.widgets.panels.managers.PanelsManager`
used to manage the collection of installed panels
"""
return self._panels
def setup_editor(self,
linenumbers=True,
language=None,
markers=False,
font=None,
color_scheme=None,
wrap=False,
tab_mode=True,
strip_mode=False,
intelligent_backspace=True,
automatic_completions=True,
automatic_completions_after_chars=3,
automatic_completions_after_ms=300,
completions_hint=True,
completions_hint_after_ms=500,
hover_hints=True,
code_snippets=True,
highlight_current_line=True,
highlight_current_cell=True,
occurrence_highlighting=True,
scrollflagarea=True,
edge_line=True,
edge_line_columns=(79,),
show_blanks=False,
underline_errors=False,
close_parentheses=True,
close_quotes=False,
add_colons=True,
auto_unindent=True,
indent_chars=" "*4,
tab_stop_width_spaces=4,
cloned_from=None,
filename=None,
occurrence_timeout=1500,
show_class_func_dropdown=False,
indent_guides=False,
scroll_past_end=False,
show_debug_panel=True,
folding=True,
remove_trailing_spaces=False,
remove_trailing_newlines=False,
add_newline=False,
format_on_save=False):
"""
Set-up configuration for the CodeEditor instance.
Usually the parameters here are related with a configurable preference
in the Preference Dialog and Editor configurations:
linenumbers: Enable/Disable line number panel. Default True.
language: Set editor language for example python. Default None.
markers: Enable/Disable markers panel. Used to show elements like
Code Analysis. Default False.
font: Base font for the Editor to use. Default None.
color_scheme: Initial color scheme for the Editor to use. Default None.
wrap: Enable/Disable line wrap. Default False.
tab_mode: Enable/Disable using Tab as delimiter between word,
Default True.
strip_mode: strip_mode: Enable/Disable striping trailing spaces when
modifying the file. Default False.
intelligent_backspace: Enable/Disable automatically unindenting
inserted text (unindenting happens if the leading text length of
the line isn't module of the length of indentation chars being use)
Default True.
automatic_completions: Enable/Disable automatic completions.
The behavior of the trigger of this the completions can be
established with the two following kwargs. Default True.
automatic_completions_after_chars: Number of charts to type to trigger
an automatic completion. Default 3.
automatic_completions_after_ms: Number of milliseconds to pass before
an autocompletion is triggered. Default 300.
completions_hint: Enable/Disable documentation hints for completions.
Default True.
completions_hint_after_ms: Number of milliseconds over a completion
item to show the documentation hint. Default 500.
hover_hints: Enable/Disable documentation hover hints. Default True.
code_snippets: Enable/Disable code snippets completions. Default True.
highlight_current_line: Enable/Disable current line highlighting.
Default True.
highlight_current_cell: Enable/Disable current cell highlighting.
Default True.
occurrence_highlighting: Enable/Disable highlighting of current word
occurrence in the file. Default True.
scrollflagarea : Enable/Disable flag area that shows at the left of
the scroll bar. Default True.
edge_line: Enable/Disable vertical line to show max number of
characters per line. Customizable number of columns in the
following kwarg. Default True.
edge_line_columns: Number of columns/characters where the editor
horizontal edge line will show. Default (79,).
show_blanks: Enable/Disable blanks highlighting. Default False.
underline_errors: Enable/Disable showing and underline to highlight
errors. Default False.
close_parentheses: Enable/Disable automatic parentheses closing
insertion. Default True.
close_quotes: Enable/Disable automatic closing of quotes.
Default False.
add_colons: Enable/Disable automatic addition of colons. Default True.
auto_unindent: Enable/Disable automatically unindentation before else,
elif, finally or except statements. Default True.
indent_chars: Characters to use for indentation. Default " "*4.
tab_stop_width_spaces: Enable/Disable using tabs for indentation.
Default 4.
cloned_from: Editor instance used as template to instantiate this
CodeEditor instance. Default None.
filename: Initial filename to show. Default None.
occurrence_timeout : Timeout in milliseconds to start highlighting
matches/occurrences for the current word under the cursor.
Default 1500 ms.
show_class_func_dropdown: Enable/Disable a Matlab like widget to show
classes and functions available in the current file. Default False.
indent_guides: Enable/Disable highlighting of code indentation.
Default False.
scroll_past_end: Enable/Disable possibility to scroll file passed
its end. Default False.
show_debug_panel: Enable/Disable debug panel. Default True.
folding: Enable/Disable code folding. Default True.
remove_trailing_spaces: Remove trailing whitespaces on lines.
Default False.
remove_trailing_newlines: Remove extra lines at the end of the file.
Default False.
add_newline: Add a newline at the end of the file if there is not one.
Default False.
format_on_save: Autoformat file automatically when saving.
Default False.
"""
self.set_close_parentheses_enabled(close_parentheses)
self.set_close_quotes_enabled(close_quotes)
self.set_add_colons_enabled(add_colons)
self.set_auto_unindent_enabled(auto_unindent)
self.set_indent_chars(indent_chars)
# Show/hide the debug panel depending on the language and parameter
self.set_debug_panel(show_debug_panel, language)
# Show/hide folding panel depending on parameter
self.toggle_code_folding(folding)
# Scrollbar flag area
self.scrollflagarea.set_enabled(scrollflagarea)
# Debugging
self.debugger.set_filename(filename)
# Edge line
self.edge_line.set_enabled(edge_line)
self.edge_line.set_columns(edge_line_columns)
# Indent guides
self.toggle_identation_guides(indent_guides)
if self.indent_chars == '\t':
self.indent_guides.set_indentation_width(
tab_stop_width_spaces)
else:
self.indent_guides.set_indentation_width(len(self.indent_chars))
# Blanks
self.set_blanks_enabled(show_blanks)
# Remove trailing whitespaces
self.set_remove_trailing_spaces(remove_trailing_spaces)
# Remove trailing newlines
self.set_remove_trailing_newlines(remove_trailing_newlines)
# Add newline at the end
self.set_add_newline(add_newline)
# Scrolling past the end
self.set_scrollpastend_enabled(scroll_past_end)
# Line number area and indent guides
if cloned_from:
self.setFont(font) # this is required for line numbers area
# Needed to show indent guides for splited editor panels
# See spyder-ide/spyder#10900
self.patch = cloned_from.patch
self.is_cloned = True
self.toggle_line_numbers(linenumbers, markers)
# Lexer
self.filename = filename
self.set_language(language, filename)
# Underline errors and warnings
self.set_underline_errors_enabled(underline_errors)
# Highlight current cell
self.set_highlight_current_cell(highlight_current_cell)
# Highlight current line
self.set_highlight_current_line(highlight_current_line)
# Occurrence highlighting
self.set_occurrence_highlighting(occurrence_highlighting)
self.set_occurrence_timeout(occurrence_timeout)
# Tab always indents (even when cursor is not at the begin of line)
self.set_tab_mode(tab_mode)
# Intelligent backspace
self.toggle_intelligent_backspace(intelligent_backspace)
# Automatic completions
self.toggle_automatic_completions(automatic_completions)
self.set_automatic_completions_after_chars(
automatic_completions_after_chars)
self.set_automatic_completions_after_ms(automatic_completions_after_ms)
# Completions hint
self.toggle_completions_hint(completions_hint)
self.set_completions_hint_after_ms(completions_hint_after_ms)
# Hover hints
self.toggle_hover_hints(hover_hints)
# Code snippets
self.toggle_code_snippets(code_snippets)
# Autoformat on save
self.toggle_format_on_save(format_on_save)
if cloned_from is not None:
self.set_as_clone(cloned_from)
self.panels.refresh()
elif font is not None:
self.set_font(font, color_scheme)
elif color_scheme is not None:
self.set_color_scheme(color_scheme)
# Set tab spacing after font is set
self.set_tab_stop_width_spaces(tab_stop_width_spaces)
self.toggle_wrap_mode(wrap)
# Class/Function dropdown will be disabled if we're not in a Python
# file.
self.classfuncdropdown.setVisible(show_class_func_dropdown
and self.is_python_like())
self.set_strip_mode(strip_mode)
# --- Language Server Protocol methods -----------------------------------
# ------------------------------------------------------------------------
@Slot(str, dict)
def handle_response(self, method, params):
if method in self.handler_registry:
handler_name = self.handler_registry[method]
handler = getattr(self, handler_name)
handler(params)
# This signal is only used on tests.
# It could be used to track and profile LSP diagnostics.
self.completions_response_signal.emit(method, params)
def emit_request(self, method, params, requires_response):
"""Send request to LSP manager."""
params['requires_response'] = requires_response
params['response_instance'] = self
self.sig_perform_completion_request.emit(
self.language.lower(), method, params)
def log_lsp_handle_errors(self, message):
"""
Log errors when handling LSP responses.
This works when debugging is on or off.
"""
if get_debug_level() > 0:
# We log the error normally when running on debug mode.
logger.error(message, exc_info=True)
else:
# We need this because logger.error activates our error
# report dialog but it doesn't show the entire traceback
# there. So we intentionally leave an error in this call
# to get the entire stack info generated by it, which
# gives the info we need from users.
if PY2:
logger.error(message, exc_info=True)
print(message, file=sys.stderr)
else:
logger.error('%', 1, stack_info=True)
# ------------- LSP: Configuration and protocol start/end ----------------
def start_completion_services(self):
"""Start completion services for this instance."""
self.completions_available = True
if self.is_cloned:
additional_msg = " cloned editor"
else:
additional_msg = ""
self.document_did_open()
logger.debug(u"Completion services available for {0}: {1}".format(
additional_msg, self.filename))
def register_completion_capabilities(self, capabilities):
"""
Register completion server capabilities.
Parameters
----------
capabilities: dict
Capabilities supported by a language server.
"""
sync_options = capabilities['textDocumentSync']
completion_options = capabilities['completionProvider']
signature_options = capabilities['signatureHelpProvider']
range_formatting_options = (
capabilities['documentOnTypeFormattingProvider'])
self.open_close_notifications = sync_options.get('openClose', False)
self.sync_mode = sync_options.get('change', TextDocumentSyncKind.NONE)
self.will_save_notify = sync_options.get('willSave', False)
self.will_save_until_notify = sync_options.get('willSaveWaitUntil',
False)
self.save_include_text = sync_options['save']['includeText']
self.enable_hover = capabilities['hoverProvider']
self.folding_supported = capabilities.get(
'foldingRangeProvider', False)
self.auto_completion_characters = (
completion_options['triggerCharacters'])
self.resolve_completions_enabled = (
completion_options.get('resolveProvider', False))
self.signature_completion_characters = (
signature_options['triggerCharacters'] + ['=']) # FIXME:
self.go_to_definition_enabled = capabilities['definitionProvider']
self.find_references_enabled = capabilities['referencesProvider']
self.highlight_enabled = capabilities['documentHighlightProvider']
self.formatting_enabled = capabilities['documentFormattingProvider']
self.range_formatting_enabled = (
capabilities['documentRangeFormattingProvider'])
self.document_symbols_enabled = (
capabilities['documentSymbolProvider']
)
self.formatting_characters.append(
range_formatting_options['firstTriggerCharacter'])
self.formatting_characters += (
range_formatting_options.get('moreTriggerCharacter', []))
if self.formatting_enabled:
self.format_action.setEnabled(True)
self.sig_refresh_formatting.emit(True)
self.completions_available = True
def stop_completion_services(self):
logger.debug('Stopping completion services for %s' % self.filename)
self.completions_available = False
@request(method=CompletionRequestTypes.DOCUMENT_DID_OPEN, requires_response=False)
def document_did_open(self):
"""Send textDocument/didOpen request to the server."""
cursor = self.textCursor()
text = self.get_text_with_eol()
if self.is_ipython():
# Send valid python text to LSP as it doesn't support IPython
text = self.ipython_to_python(text)
params = {
'file': self.filename,
'language': self.language,
'version': self.text_version,
'text': text,
'codeeditor': self,
'offset': cursor.position(),
'selection_start': cursor.selectionStart(),
'selection_end': cursor.selectionEnd(),
}
return params
# ------------- LSP: Symbols ---------------------------------------
@request(method=CompletionRequestTypes.DOCUMENT_SYMBOL)
def request_symbols(self):
"""Request document symbols."""
if not self.document_symbols_enabled:
return
if self.oe_proxy is not None:
self.oe_proxy.emit_request_in_progress()
params = {'file': self.filename}
return params
@handles(CompletionRequestTypes.DOCUMENT_SYMBOL)
def process_symbols(self, params):
"""Handle symbols response."""
try:
symbols = params['params']
symbols = [] if symbols is None else symbols
self.classfuncdropdown.update_data(symbols)
if self.oe_proxy is not None:
self.oe_proxy.update_outline_info(symbols)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.log_lsp_handle_errors("Error when processing symbols")
# ------------- LSP: Linting ---------------------------------------
@request(
method=CompletionRequestTypes.DOCUMENT_DID_CHANGE, requires_response=False)
def document_did_change(self, text=None):
"""Send textDocument/didChange request to the server."""
self.text_version += 1
text = self.get_text_with_eol()
if self.is_ipython():
# Send valid python text to LSP
text = self.ipython_to_python(text)
self.patch = self.differ.patch_make(self.previous_text, text)
self.previous_text = text
cursor = self.textCursor()
params = {
'file': self.filename,
'version': self.text_version,
'text': text,
'diff': self.patch,
'offset': cursor.position(),
'selection_start': cursor.selectionStart(),
'selection_end': cursor.selectionEnd(),
}
return params
@handles(CompletionRequestTypes.DOCUMENT_PUBLISH_DIAGNOSTICS)
def process_diagnostics(self, params):
"""Handle linting response."""
# The LSP spec doesn't require that folding and symbols
# are treated in the same way as linting, i.e. to be
# recomputed on didChange, didOpen and didSave. However,
# we think that's necessary to maintain accurate folding
# and symbols all the time. Therefore, we decided to call
# those requests here, but after a certain timeout to
# avoid performance issues.
self._timer_sync_symbols_and_folding.start()
# Process results (runs in a thread)
self.process_code_analysis(params['params'])
def set_sync_symbols_and_folding_timeout(self):
"""
Set timeout to sync symbols and folding according to the file
size.
"""
current_lines = self.get_line_count()
timeout = None
for lines in self.SYNC_SYMBOLS_AND_FOLDING_TIMEOUTS.keys():
if (current_lines // lines) == 0:
timeout = self.SYNC_SYMBOLS_AND_FOLDING_TIMEOUTS[lines]
break
if not timeout:
timeouts = self.SYNC_SYMBOLS_AND_FOLDING_TIMEOUTS.values()
timeout = list(timeouts)[-1]
self._timer_sync_symbols_and_folding.setInterval(timeout)
def sync_symbols_and_folding(self):
"""
Synchronize symbols and folding after linting results arrive.
"""
self.request_folding()
self.request_symbols()
def process_code_analysis(self, diagnostics):
"""Process code analysis results in a thread."""
self.cleanup_code_analysis()
self._diagnostics = diagnostics
# Process diagnostics in a thread to improve performance.
self.update_diagnostics_thread.run = self.set_errors
self.update_diagnostics_thread.finished.connect(
self.finish_code_analysis)
self.update_diagnostics_thread.start()
def cleanup_code_analysis(self):
"""Remove all code analysis markers"""
self.setUpdatesEnabled(False)
self.clear_extra_selections('code_analysis_highlight')
self.clear_extra_selections('code_analysis_underline')
for data in self.blockuserdata_list():
data.code_analysis = []
self.setUpdatesEnabled(True)
# When the new code analysis results are empty, it is necessary
# to update manually the scrollflag and linenumber areas (otherwise,
# the old flags will still be displayed):
self.sig_flags_changed.emit()
self.linenumberarea.update()
def set_errors(self):
"""Set errors and warnings in the line number area."""
try:
self._process_code_analysis(underline=False)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.log_lsp_handle_errors("Error when processing linting")
def underline_errors(self):
"""Underline errors and warnings."""
try:
self._process_code_analysis(underline=True)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.log_lsp_handle_errors("Error when processing linting")
def finish_code_analysis(self):
"""Finish processing code analysis results."""
self.linenumberarea.update()
self.underline_errors()
self.update_extra_selections()
self.sig_process_code_analysis.emit()
self.sig_flags_changed.emit()
def errors_present(self):
"""
Return True if there are errors or warnings present in the file.
"""
return bool(len(self._diagnostics))
def _process_code_analysis(self, underline):
"""
Process all code analysis results.
Parameters
----------
underline: bool
Determines if errors and warnings are going to be set in
the line number area or underlined. It's better to separate
these two processes for perfomance reasons. That's because
setting errors can be done in a thread whereas underlining
them can't.
"""
document = self.document()
for diagnostic in self._diagnostics:
if self.is_ipython() and (
diagnostic["message"] == "undefined name 'get_ipython'"):
# get_ipython is defined in IPython files
continue
source = diagnostic.get('source', '')
msg_range = diagnostic['range']
start = msg_range['start']
end = msg_range['end']
code = diagnostic.get('code', 'E')
message = diagnostic['message']
severity = diagnostic.get(
'severity', DiagnosticSeverity.ERROR)
block = document.findBlockByNumber(start['line'])
data = block.userData()
# Skip messages according to certain criteria.
# This one works for any programming language
if 'analysis:ignore' in block.text():
continue
# This only works for Python.
if self.language == 'Python':
if NOQA_INLINE_REGEXP.search(block.text()) is not None:
continue
if not data:
data = BlockUserData(self)
if underline:
block_nb = block.blockNumber()
first, last = self.get_buffer_block_numbers()
if (self.underline_errors_enabled and
first <= block_nb <= last):
error = severity == DiagnosticSeverity.ERROR
color = self.error_color if error else self.warning_color
color = QColor(color)
color.setAlpha(255)
block.color = color
data.selection_start = start
data.selection_end = end
# Don't call highlight_selection with `update=True` so that
# all underline selections are updated in bulk in
# finish_code_analysis or update_decorations.
self.highlight_selection('code_analysis_underline',
data._selection(),
underline_color=block.color)
else:
# Don't append messages to data for cloned editors to avoid
# showing them twice or more times on hover.
# Fixes spyder-ide/spyder#15618
if not self.is_cloned:
data.code_analysis.append(
(source, code, severity, message)
)
block.setUserData(data)
# ------------- LSP: Completion ---------------------------------------
@request(method=CompletionRequestTypes.DOCUMENT_COMPLETION)
def do_completion(self, automatic=False):
"""Trigger completion."""
cursor = self.textCursor()
current_word = self.get_current_word(
completion=True,
valid_python_variable=False
)
params = {
'file': self.filename,
'line': cursor.blockNumber(),
'column': cursor.columnNumber(),
'offset': cursor.position(),
'selection_start': cursor.selectionStart(),
'selection_end': cursor.selectionEnd(),
'current_word': current_word
}
self.completion_args = (self.textCursor().position(), automatic)
return params
@handles(CompletionRequestTypes.DOCUMENT_COMPLETION)
def process_completion(self, params):
"""Handle completion response."""
args = self.completion_args
if args is None:
# This should not happen
return
self.completion_args = None
position, automatic = args
start_cursor = self.textCursor()
start_cursor.movePosition(QTextCursor.StartOfBlock)
line_text = self.get_text(start_cursor.position(), 'eol')
leading_whitespace = self.compute_whitespace(line_text)
indentation_whitespace = ' ' * leading_whitespace
eol_char = self.get_line_separator()
try:
completions = params['params']
completions = ([] if completions is None else
[completion for completion in completions
if completion.get('insertText')
or completion.get('textEdit', {}).get('newText')])
prefix = self.get_current_word(completion=True,
valid_python_variable=False)
if (len(completions) == 1
and completions[0].get('insertText') == prefix
and not completions[0].get('textEdit', {}).get('newText')):
completions.pop()
replace_end = self.textCursor().position()
under_cursor = self.get_current_word_and_position(completion=True)
if under_cursor:
word, replace_start = under_cursor
else:
word = ''
replace_start = replace_end
first_letter = ''
if len(word) > 0:
first_letter = word[0]
def sort_key(completion):
if 'textEdit' in completion:
text_insertion = completion['textEdit']['newText']
else:
text_insertion = completion['insertText']
first_insert_letter = text_insertion[0]
case_mismatch = (
(first_letter.isupper() and first_insert_letter.islower())
or
(first_letter.islower() and first_insert_letter.isupper())
)
# False < True, so case matches go first
return (case_mismatch, completion['sortText'])
completion_list = sorted(completions, key=sort_key)
# Allow for textEdit completions to be filtered by Spyder
# if on-the-fly completions are disabled, only if the
# textEdit range matches the word under the cursor.
for completion in completion_list:
if 'textEdit' in completion:
c_replace_start = completion['textEdit']['range']['start']
c_replace_end = completion['textEdit']['range']['end']
if (c_replace_start == replace_start
and c_replace_end == replace_end):
insert_text = completion['textEdit']['newText']
completion['filterText'] = insert_text
completion['insertText'] = insert_text
del completion['textEdit']
if 'insertText' in completion:
insert_text = completion['insertText']
insert_text_lines = insert_text.splitlines()
reindented_text = [insert_text_lines[0]]
for insert_line in insert_text_lines[1:]:
insert_line = indentation_whitespace + insert_line
reindented_text.append(insert_line)
reindented_text = eol_char.join(reindented_text)
completion['insertText'] = reindented_text
self.completion_widget.show_list(
completion_list, position, automatic)
self.kite_call_to_action.handle_processed_completions(completions)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
self.kite_call_to_action.hide_coverage_cta()
return
except Exception:
self.log_lsp_handle_errors('Error when processing completions')
@request(method=CompletionRequestTypes.COMPLETION_RESOLVE)
def resolve_completion_item(self, item):
return {
'file': self.filename,
'completion_item': item
}
@handles(CompletionRequestTypes.COMPLETION_RESOLVE)
def handle_completion_item_resolution(self, response):
try:
response = response['params']
if not response:
return
self.completion_widget.augment_completion_info(response)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.log_lsp_handle_errors(
"Error when handling completion item resolution")
# ------------- LSP: Signature Hints ------------------------------------
@request(method=CompletionRequestTypes.DOCUMENT_SIGNATURE)
def request_signature(self):
"""Ask for signature."""
self.document_did_change('')
line, column = self.get_cursor_line_column()
offset = self.get_position('cursor')
params = {
'file': self.filename,
'line': line,
'column': column,
'offset': offset
}
return params
@handles(CompletionRequestTypes.DOCUMENT_SIGNATURE)
def process_signatures(self, params):
"""Handle signature response."""
try:
signature_params = params['params']
if (signature_params is not None and
'activeParameter' in signature_params):
self.sig_signature_invoked.emit(signature_params)
signature_data = signature_params['signatures']
documentation = signature_data['documentation']
if isinstance(documentation, dict):
documentation = documentation['value']
# The language server returns encoded text with
# spaces defined as `\xa0`
documentation = documentation.replace(u'\xa0', ' ')
parameter_idx = signature_params['activeParameter']
parameters = signature_data['parameters']
parameter = None
if len(parameters) > 0 and parameter_idx < len(parameters):
parameter_data = parameters[parameter_idx]
parameter = parameter_data['label']
signature = signature_data['label']
# This method is part of spyder/widgets/mixins
self.show_calltip(
signature=signature,
parameter=parameter,
language=self.language,
documentation=documentation,
)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.log_lsp_handle_errors("Error when processing signature")
# ------------- LSP: Hover/Mouse ---------------------------------------
@request(method=CompletionRequestTypes.DOCUMENT_CURSOR_EVENT)
def request_cursor_event(self):
text = self.get_text_with_eol()
cursor = self.textCursor()
params = {
'file': self.filename,
'version': self.text_version,
'text': text,
'offset': cursor.position(),
'selection_start': cursor.selectionStart(),
'selection_end': cursor.selectionEnd(),
}
return params
@request(method=CompletionRequestTypes.DOCUMENT_HOVER)
def request_hover(self, line, col, offset, show_hint=True, clicked=True):
"""Request hover information."""
params = {
'file': self.filename,
'line': line,
'column': col,
'offset': offset
}
self._show_hint = show_hint
self._request_hover_clicked = clicked
return params
@handles(CompletionRequestTypes.DOCUMENT_HOVER)
def handle_hover_response(self, contents):
"""Handle hover response."""
if running_under_pytest():
from unittest.mock import Mock
# On some tests this is returning a Mock
if isinstance(contents, Mock):
return
try:
content = contents['params']
# - Don't display hover if there's no content to display.
# - Prevent spurious errors when a client returns a list.
if not content or isinstance(content, list):
return
self.sig_display_object_info.emit(
content,
self._request_hover_clicked
)
if content is not None and self._show_hint and self._last_point:
# This is located in spyder/widgets/mixins.py
word = self._last_hover_word
content = content.replace(u'\xa0', ' ')
self.show_hint(content, inspect_word=word,
at_point=self._last_point)
self._last_point = None
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.log_lsp_handle_errors("Error when processing hover")
# ------------- LSP: Go To Definition ----------------------------
@Slot()
@request(method=CompletionRequestTypes.DOCUMENT_DEFINITION)
def go_to_definition_from_cursor(self, cursor=None):
"""Go to definition from cursor instance (QTextCursor)."""
if (not self.go_to_definition_enabled or
self.in_comment_or_string()):
return
if cursor is None:
cursor = self.textCursor()
text = to_text_string(cursor.selectedText())
if len(text) == 0:
cursor.select(QTextCursor.WordUnderCursor)
text = to_text_string(cursor.selectedText())
if text is not None:
line, column = self.get_cursor_line_column()
params = {
'file': self.filename,
'line': line,
'column': column
}
return params
@handles(CompletionRequestTypes.DOCUMENT_DEFINITION)
def handle_go_to_definition(self, position):
"""Handle go to definition response."""
try:
position = position['params']
if position is not None:
def_range = position['range']
start = def_range['start']
if self.filename == position['file']:
self.go_to_line(start['line'] + 1,
start['character'],
None,
word=None)
else:
self.go_to_definition.emit(position['file'],
start['line'] + 1,
start['character'])
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.log_lsp_handle_errors(
"Error when processing go to definition")
# ------------- LSP: Document/Selection formatting --------------------
def format_document_or_range(self):
if self.has_selected_text() and self.range_formatting_enabled:
self.format_document_range()
else:
self.format_document()
@request(method=CompletionRequestTypes.DOCUMENT_FORMATTING)
def format_document(self):
if not self.formatting_enabled:
return
using_spaces = self.indent_chars != '\t'
tab_size = (len(self.indent_chars) if using_spaces else
self.tab_stop_width_spaces)
params = {
'file': self.filename,
'options': {
'tab_size': tab_size,
'insert_spaces': using_spaces,
'trim_trailing_whitespace': self.remove_trailing_spaces,
'insert_final_new_line': self.add_newline,
'trim_final_new_lines': self.remove_trailing_newlines
}
}
# Sets the document into read-only and updates its corresponding
# tab name to display the filename into parenthesis
self.setReadOnly(True)
self.document().setModified(True)
self.sig_start_operation_in_progress.emit()
self.operation_in_progress = True
return params
@request(method=CompletionRequestTypes.DOCUMENT_RANGE_FORMATTING)
def format_document_range(self):
if not self.range_formatting_enabled or not self.has_selected_text():
return
start, end = self.get_selection_start_end()
start_line, start_col = start
end_line, end_col = end
using_spaces = self.indent_chars != '\t'
tab_size = (len(self.indent_chars) if using_spaces else
self.tab_stop_width_spaces)
fmt_range = {
'start': {
'line': start_line,
'character': start_col
},
'end': {
'line': end_line,
'character': end_col
}
}
params = {
'file': self.filename,
'range': fmt_range,
'options': {
'tab_size': tab_size,
'insert_spaces': using_spaces,
'trim_trailing_whitespace': self.remove_trailing_spaces,
'insert_final_new_line': self.add_newline,
'trim_final_new_lines': self.remove_trailing_newlines
}
}
# Sets the document into read-only and updates its corresponding
# tab name to display the filename into parenthesis
self.setReadOnly(True)
self.document().setModified(True)
self.sig_start_operation_in_progress.emit()
self.operation_in_progress = True
return params
@handles(CompletionRequestTypes.DOCUMENT_FORMATTING)
def handle_document_formatting(self, edits):
try:
self._apply_document_edits(edits)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.log_lsp_handle_errors("Error when processing document "
"formatting")
finally:
# Remove read-only parenthesis and highlight document modification
self.setReadOnly(False)
self.document().setModified(False)
self.document().setModified(True)
self.sig_stop_operation_in_progress.emit()
self.operation_in_progress = False
@handles(CompletionRequestTypes.DOCUMENT_RANGE_FORMATTING)
def handle_document_range_formatting(self, edits):
try:
self._apply_document_edits(edits)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.log_lsp_handle_errors("Error when processing document "
"selection formatting")
finally:
# Remove read-only parenthesis and highlight document modification
self.setReadOnly(False)
self.document().setModified(False)
self.document().setModified(True)
self.sig_stop_operation_in_progress.emit()
self.operation_in_progress = False
def _apply_document_edits(self, edits):
"""Apply a set of atomic document edits to the current editor text."""
edits = edits['params']
if edits is None:
return
# We need to use here toPlainText and not get_text_with_eol to
# to not mess up the code when applying formatting.
# See spyder-ide/spyder#16180
text = self.toPlainText()
text_tokens = list(text)
merged_text = None
for edit in edits:
edit_range = edit['range']
repl_text = edit['newText']
start, end = edit_range['start'], edit_range['end']
start_line, start_col = start['line'], start['character']
end_line, end_col = end['line'], end['character']
start_pos = self.get_position_line_number(start_line, start_col)
end_pos = self.get_position_line_number(end_line, end_col)
text_tokens = list(text_tokens)
this_edit = list(repl_text)
if end_line == self.document().blockCount():
end_pos = self.get_position('eof')
end_pos += 1
if (end_pos == len(text_tokens) and
text_tokens[end_pos - 1] == '\n'):
end_pos += 1
this_edition = (text_tokens[:max(start_pos - 1, 0)] +
this_edit +
text_tokens[end_pos - 1:])
text_edit = ''.join(this_edition)
if merged_text is None:
merged_text = text_edit
else:
merged_text = merge(text_edit, merged_text, text)
if merged_text is not None:
cursor = self.textCursor()
cursor.beginEditBlock()
cursor.movePosition(QTextCursor.Start)
cursor.movePosition(QTextCursor.End,
QTextCursor.KeepAnchor)
cursor.insertText(merged_text)
cursor.endEditBlock()
self.document_did_change()
# ------------- LSP: Code folding ranges -------------------------------
def compute_whitespace(self, line):
tab_size = self.tab_stop_width_spaces
whitespace_regex = re.compile(r'(\s+).*')
whitespace_match = whitespace_regex.match(line)
total_whitespace = 0
if whitespace_match is not None:
whitespace_chars = whitespace_match.group(1)
whitespace_chars = whitespace_chars.replace(
'\t', tab_size * ' ')
total_whitespace = len(whitespace_chars)
return total_whitespace
def update_whitespace_count(self, line, column):
self.leading_whitespaces = {}
lines = to_text_string(self.toPlainText()).splitlines()
for i, text in enumerate(lines):
total_whitespace = self.compute_whitespace(text)
self.leading_whitespaces[i] = total_whitespace
def cleanup_folding(self):
"""Cleanup folding pane."""
folding_panel = self.panels.get(FoldingPanel)
folding_panel.folding_regions = {}
@request(method=CompletionRequestTypes.DOCUMENT_FOLDING_RANGE)
def request_folding(self):
"""Request folding."""
if not self.folding_supported or not self.code_folding:
return
params = {'file': self.filename}
return params
@handles(CompletionRequestTypes.DOCUMENT_FOLDING_RANGE)
def handle_folding_range(self, response):
"""Handle folding response."""
ranges = response['params']
if ranges is None:
return
# Compute extended_ranges here because get_text_region ends up
# calling paintEvent and that method can't be called in a
# thread due to Qt restrictions.
try:
extended_ranges = []
for start, end in ranges:
text_region = self.get_text_region(start, end)
extended_ranges.append((start, end, text_region))
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.log_lsp_handle_errors("Error when processing folding")
# Update folding in a thread
self.update_folding_thread.run = functools.partial(
self.update_and_merge_folding, extended_ranges)
self.update_folding_thread.finished.connect(
self.finish_code_folding)
self.update_folding_thread.start()
def update_and_merge_folding(self, extended_ranges):
"""Update and merge new folding information."""
try:
folding_panel = self.panels.get(FoldingPanel)
current_tree, root = merge_folding(
extended_ranges, folding_panel.current_tree,
folding_panel.root)
folding_info = collect_folding_regions(root)
self._folding_info = (current_tree, root, *folding_info)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.log_lsp_handle_errors("Error when processing folding")
def finish_code_folding(self):
"""Finish processing code folding."""
folding_panel = self.panels.get(FoldingPanel)
folding_panel.update_folding(self._folding_info)
# Update indent guides, which depend on folding
if self.indent_guides._enabled and len(self.patch) > 0:
line, column = self.get_cursor_line_column()
self.update_whitespace_count(line, column)
# ------------- LSP: Save/close file -----------------------------------
@request(method=CompletionRequestTypes.DOCUMENT_DID_SAVE,
requires_response=False)
def notify_save(self):
"""Send save request."""
params = {'file': self.filename}
if self.save_include_text:
params['text'] = self.get_text_with_eol()
return params
@request(method=CompletionRequestTypes.DOCUMENT_DID_CLOSE,
requires_response=False)
def notify_close(self):
"""Send close request."""
if self.completions_available:
params = {
'file': self.filename,
'codeeditor': self
}
return params
# -------------------------------------------------------------------------
def set_debug_panel(self, show_debug_panel, language):
"""Enable/disable debug panel."""
debugger_panel = self.panels.get(DebuggerPanel)
if (is_text_string(language) and
language.lower() in ALL_LANGUAGES['Python'] and
show_debug_panel):
debugger_panel.setVisible(True)
else:
debugger_panel.setVisible(False)
def update_debugger_panel_state(self, state, last_step, force=False):
"""Update debugger panel state."""
debugger_panel = self.panels.get(DebuggerPanel)
if force:
debugger_panel.start_clean()
return
elif state and 'fname' in last_step:
fname = last_step['fname']
if (fname and self.filename
and osp.normcase(fname) == osp.normcase(self.filename)):
debugger_panel.start_clean()
return
debugger_panel.stop_clean()
def set_folding_panel(self, folding):
"""Enable/disable folding panel."""
folding_panel = self.panels.get(FoldingPanel)
folding_panel.setVisible(folding)
def set_tab_mode(self, enable):
"""
enabled = tab always indent
(otherwise tab indents only when cursor is at the beginning of a line)
"""
self.tab_mode = enable
def set_strip_mode(self, enable):
"""
Strip all trailing spaces if enabled.
"""
self.strip_trailing_spaces_on_modify = enable
def toggle_intelligent_backspace(self, state):
self.intelligent_backspace = state
def toggle_automatic_completions(self, state):
self.automatic_completions = state
def toggle_hover_hints(self, state):
self.hover_hints_enabled = state
def toggle_code_snippets(self, state):
self.code_snippets = state
def toggle_format_on_save(self, state):
self.format_on_save = state
def toggle_code_folding(self, state):
self.code_folding = state
self.set_folding_panel(state)
if not state and self.indent_guides._enabled:
self.code_folding = True
def toggle_identation_guides(self, state):
if state and not self.code_folding:
self.code_folding = True
self.indent_guides.set_enabled(state)
def toggle_completions_hint(self, state):
"""Enable/disable completion hint."""
self.completions_hint = state
def set_automatic_completions_after_chars(self, number):
"""
Set the number of characters after which auto completion is fired.
"""
self.automatic_completions_after_chars = number
def set_automatic_completions_after_ms(self, ms):
"""
Set the amount of time in ms after which auto completion is fired.
"""
self.automatic_completions_after_ms = ms
def set_completions_hint_after_ms(self, ms):
"""
Set the amount of time in ms after which the completions hint is shown.
"""
self.completions_hint_after_ms = ms
def set_close_parentheses_enabled(self, enable):
"""Enable/disable automatic parentheses insertion feature"""
self.close_parentheses_enabled = enable
bracket_extension = self.editor_extensions.get(CloseBracketsExtension)
if bracket_extension is not None:
bracket_extension.enabled = enable
def set_close_quotes_enabled(self, enable):
"""Enable/disable automatic quote insertion feature"""
self.close_quotes_enabled = enable
quote_extension = self.editor_extensions.get(CloseQuotesExtension)
if quote_extension is not None:
quote_extension.enabled = enable
def set_add_colons_enabled(self, enable):
"""Enable/disable automatic colons insertion feature"""
self.add_colons_enabled = enable
def set_auto_unindent_enabled(self, enable):
"""Enable/disable automatic unindent after else/elif/finally/except"""
self.auto_unindent_enabled = enable
def set_occurrence_highlighting(self, enable):
"""Enable/disable occurrence highlighting"""
self.occurrence_highlighting = enable
if not enable:
self.__clear_occurrences()
def set_occurrence_timeout(self, timeout):
"""Set occurrence highlighting timeout (ms)"""
self.occurrence_timer.setInterval(timeout)
def set_underline_errors_enabled(self, state):
"""Toggle the underlining of errors and warnings."""
self.underline_errors_enabled = state
if state:
self.document_did_change()
else:
self.clear_extra_selections('code_analysis_underline')
def set_highlight_current_line(self, enable):
"""Enable/disable current line highlighting"""
self.highlight_current_line_enabled = enable
if self.highlight_current_line_enabled:
self.highlight_current_line()
else:
self.unhighlight_current_line()
def set_highlight_current_cell(self, enable):
"""Enable/disable current line highlighting"""
hl_cell_enable = enable and self.supported_cell_language
self.highlight_current_cell_enabled = hl_cell_enable
if self.highlight_current_cell_enabled:
self.highlight_current_cell()
else:
self.unhighlight_current_cell()
def set_language(self, language, filename=None):
extra_supported_languages = {'stil': 'STIL'}
self.tab_indents = language in self.TAB_ALWAYS_INDENTS
self.comment_string = ''
self.language = 'Text'
self.supported_language = False
sh_class = sh.TextSH
language = 'None' if language is None else language
if language is not None:
for (key, value) in ALL_LANGUAGES.items():
if language.lower() in value:
self.supported_language = True
sh_class, comment_string = self.LANGUAGES[key]
if key == 'IPython':
self.language = 'Python'
else:
self.language = key
self.comment_string = comment_string
if key in CELL_LANGUAGES:
self.supported_cell_language = True
self.has_cell_separators = True
break
if filename is not None and not self.supported_language:
sh_class = sh.guess_pygments_highlighter(filename)
self.support_language = sh_class is not sh.TextSH
if self.support_language:
# Pygments report S for the lexer name of R files
if sh_class._lexer.name == 'S':
self.language = 'R'
else:
self.language = sh_class._lexer.name
else:
_, ext = osp.splitext(filename)
ext = ext.lower()
if ext in extra_supported_languages:
self.language = extra_supported_languages[ext]
self._set_highlighter(sh_class)
self.completion_widget.set_language(self.language)
def _set_highlighter(self, sh_class):
self.highlighter_class = sh_class
if self.highlighter is not None:
# Removing old highlighter
# TODO: test if leaving parent/document as is eats memory
self.highlighter.setParent(None)
self.highlighter.setDocument(None)
self.highlighter = self.highlighter_class(self.document(),
self.font(),
self.color_scheme)
self.highlighter._cell_list = []
self.highlighter.sig_new_cell.connect(self.add_to_cell_list)
self._apply_highlighter_color_scheme()
self.highlighter.editor = self
def add_to_cell_list(self, oedata):
"""Add new cell to cell list."""
if self.highlighter is None:
return
self.highlighter._cell_list.append(oedata)
def get_cell_list(self):
"""Get all cells."""
if self.highlighter is None:
return []
# Filter out old cells
def good(oedata):
return oedata.is_valid() and oedata.def_type == oedata.CELL
self.highlighter._cell_list = [
oedata for oedata in self.highlighter._cell_list if good(oedata)]
return sorted(
{oedata.get_block_number(): oedata
for oedata in self.highlighter._cell_list}.items())
def is_json(self):
return (isinstance(self.highlighter, sh.PygmentsSH) and
self.highlighter._lexer.name == 'JSON')
def is_python(self):
return self.highlighter_class is sh.PythonSH
def is_ipython(self):
return self.highlighter_class is sh.IPythonSH
def is_python_or_ipython(self):
return self.is_python() or self.is_ipython()
def is_cython(self):
return self.highlighter_class is sh.CythonSH
def is_enaml(self):
return self.highlighter_class is sh.EnamlSH
def is_python_like(self):
return (self.is_python() or self.is_ipython()
or self.is_cython() or self.is_enaml())
def intelligent_tab(self):
"""Provide intelligent behavior for Tab key press."""
leading_text = self.get_text('sol', 'cursor')
if not leading_text.strip() or leading_text.endswith('#'):
# blank line or start of comment
self.indent_or_replace()
elif self.in_comment_or_string() and not leading_text.endswith(' '):
# in a word in a comment
self.do_completion()
elif leading_text.endswith('import ') or leading_text[-1] == '.':
# blank import or dot completion
self.do_completion()
elif (leading_text.split()[0] in ['from', 'import'] and
';' not in leading_text):
# import line with a single statement
# (prevents lines like: `import pdb; pdb.set_trace()`)
self.do_completion()
elif leading_text[-1] in '(,' or leading_text.endswith(', '):
self.indent_or_replace()
elif leading_text.endswith(' '):
# if the line ends with a space, indent
self.indent_or_replace()
elif re.search(r"[^\d\W]\w*\Z", leading_text, re.UNICODE):
# if the line ends with a non-whitespace character
self.do_completion()
else:
self.indent_or_replace()
def intelligent_backtab(self):
"""Provide intelligent behavior for Shift+Tab key press"""
leading_text = self.get_text('sol', 'cursor')
if not leading_text.strip():
# blank line
self.unindent()
elif self.in_comment_or_string():
self.unindent()
elif leading_text[-1] in '(,' or leading_text.endswith(', '):
position = self.get_position('cursor')
self.show_object_info(position)
else:
# if the line ends with any other character but comma
self.unindent()
def rehighlight(self):
"""Rehighlight the whole document."""
if self.highlighter is not None:
self.highlighter.rehighlight()
if self.highlight_current_cell_enabled:
self.highlight_current_cell()
else:
self.unhighlight_current_cell()
if self.highlight_current_line_enabled:
self.highlight_current_line()
else:
self.unhighlight_current_line()
def trim_trailing_spaces(self):
"""Remove trailing spaces"""
cursor = self.textCursor()
cursor.beginEditBlock()
cursor.movePosition(QTextCursor.Start)
while True:
cursor.movePosition(QTextCursor.EndOfBlock)
text = to_text_string(cursor.block().text())
length = len(text)-len(text.rstrip())
if length > 0:
cursor.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor,
length)
cursor.removeSelectedText()
if cursor.atEnd():
break
cursor.movePosition(QTextCursor.NextBlock)
cursor.endEditBlock()
self.document_did_change()
def trim_trailing_newlines(self):
"""Remove extra newlines at the end of the document."""
cursor = self.textCursor()
cursor.beginEditBlock()
cursor.movePosition(QTextCursor.End)
line = cursor.blockNumber()
this_line = self.get_text_line(line)
previous_line = self.get_text_line(line - 1)
while this_line == '':
cursor.movePosition(QTextCursor.PreviousBlock,
QTextCursor.KeepAnchor)
if self.add_newline:
if this_line == '' and previous_line != '':
cursor.movePosition(QTextCursor.NextBlock,
QTextCursor.KeepAnchor)
line -= 1
if line == 0:
break
this_line = self.get_text_line(line)
previous_line = self.get_text_line(line - 1)
if not self.add_newline:
cursor.movePosition(QTextCursor.EndOfBlock,
QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.endEditBlock()
self.document_did_change()
def add_newline_to_file(self):
"""Add a newline to the end of the file if it does not exist."""
cursor = self.textCursor()
cursor.movePosition(QTextCursor.End)
line = cursor.blockNumber()
this_line = self.get_text_line(line)
if this_line != '':
cursor.beginEditBlock()
cursor.movePosition(QTextCursor.EndOfBlock)
cursor.insertText(self.get_line_separator())
cursor.endEditBlock()
self.document_did_change()
def fix_indentation(self):
"""Replace tabs by spaces."""
text_before = to_text_string(self.toPlainText())
text_after = sourcecode.fix_indentation(text_before, self.indent_chars)
if text_before != text_after:
# We do the following rather than using self.setPlainText
# to benefit from QTextEdit's undo/redo feature.
self.selectAll()
self.skip_rstrip = True
self.insertPlainText(text_after)
self.document_did_change()
self.skip_rstrip = False
def get_current_object(self):
"""Return current object (string) """
source_code = to_text_string(self.toPlainText())
offset = self.get_position('cursor')
return sourcecode.get_primary_at(source_code, offset)
def next_cursor_position(self, position=None,
mode=QTextLayout.SkipCharacters):
"""
Get next valid cursor position.
Adapted from:
https://github.com/qt/qtbase/blob/5.15.2/src/gui/text/qtextdocument_p.cpp#L1361
"""
cursor = self.textCursor()
if cursor.atEnd():
return position
if position is None:
position = cursor.position()
else:
cursor.setPosition(position)
it = cursor.block()
start = it.position()
end = start + it.length() - 1
if (position == end):
return end + 1
return it.layout().nextCursorPosition(position - start, mode) + start
@Slot()
def delete(self):
"""Remove selected text or next character."""
if not self.has_selected_text():
cursor = self.textCursor()
if not cursor.atEnd():
cursor.setPosition(
self.next_cursor_position(), QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
self.remove_selected_text()
#------Find occurrences
def __find_first(self, text):
"""Find first occurrence: scan whole document"""
flags = QTextDocument.FindCaseSensitively|QTextDocument.FindWholeWords
cursor = self.textCursor()
# Scanning whole document
cursor.movePosition(QTextCursor.Start)
regexp = QRegExp(r"\b%s\b" % QRegExp.escape(text), Qt.CaseSensitive)
cursor = self.document().find(regexp, cursor, flags)
self.__find_first_pos = cursor.position()
return cursor
def __find_next(self, text, cursor):
"""Find next occurrence"""
flags = QTextDocument.FindCaseSensitively|QTextDocument.FindWholeWords
regexp = QRegExp(r"\b%s\b" % QRegExp.escape(text), Qt.CaseSensitive)
cursor = self.document().find(regexp, cursor, flags)
if cursor.position() != self.__find_first_pos:
return cursor
def __cursor_position_changed(self):
"""Cursor position has changed"""
line, column = self.get_cursor_line_column()
self.sig_cursor_position_changed.emit(line, column)
if self.highlight_current_cell_enabled:
self.highlight_current_cell()
else:
self.unhighlight_current_cell()
if self.highlight_current_line_enabled:
self.highlight_current_line()
else:
self.unhighlight_current_line()
if self.occurrence_highlighting:
self.occurrence_timer.stop()
self.occurrence_timer.start()
# Strip if needed
self.strip_trailing_spaces()
def __clear_occurrences(self):
"""Clear occurrence markers"""
self.occurrences = []
self.clear_extra_selections('occurrences')
self.sig_flags_changed.emit()
def get_selection(self, cursor, foreground_color=None,
background_color=None, underline_color=None,
outline_color=None,
underline_style=QTextCharFormat.SingleUnderline):
"""Get selection."""
if cursor is None:
return
selection = TextDecoration(cursor)
if foreground_color is not None:
selection.format.setForeground(foreground_color)
if background_color is not None:
selection.format.setBackground(background_color)
if underline_color is not None:
selection.format.setProperty(QTextFormat.TextUnderlineStyle,
to_qvariant(underline_style))
selection.format.setProperty(QTextFormat.TextUnderlineColor,
to_qvariant(underline_color))
if outline_color is not None:
selection.set_outline(outline_color)
return selection
def highlight_selection(self, key, cursor, foreground_color=None,
background_color=None, underline_color=None,
outline_color=None,
underline_style=QTextCharFormat.SingleUnderline,
update=False):
selection = self.get_selection(
cursor, foreground_color, background_color, underline_color,
outline_color, underline_style)
if selection is None:
return
extra_selections = self.get_extra_selections(key)
extra_selections.append(selection)
self.set_extra_selections(key, extra_selections)
if update:
self.update_extra_selections()
def __mark_occurrences(self):
"""Marking occurrences of the currently selected word"""
self.__clear_occurrences()
if not self.supported_language:
return
text = self.get_selected_text().strip()
if not text:
text = self.get_current_word()
if text is None:
return
if (self.has_selected_text() and
self.get_selected_text().strip() != text):
return
if (self.is_python_like() and
(sourcecode.is_keyword(to_text_string(text)) or
to_text_string(text) == 'self')):
return
# Highlighting all occurrences of word *text*
cursor = self.__find_first(text)
self.occurrences = []
extra_selections = self.get_extra_selections('occurrences')
first_occurrence = None
while cursor:
self.occurrences.append(cursor.blockNumber())
selection = self.get_selection(cursor)
if len(selection.cursor.selectedText()) > 0:
extra_selections.append(selection)
if len(extra_selections) == 1:
first_occurrence = selection
else:
selection.format.setBackground(self.occurrence_color)
first_occurrence.format.setBackground(
self.occurrence_color)
cursor = self.__find_next(text, cursor)
self.set_extra_selections('occurrences', extra_selections)
self.update_extra_selections()
if len(self.occurrences) > 1 and self.occurrences[-1] == 0:
# XXX: this is never happening with PySide but it's necessary
# for PyQt4... this must be related to a different behavior for
# the QTextDocument.find function between those two libraries
self.occurrences.pop(-1)
self.sig_flags_changed.emit()
#-----highlight found results (find/replace widget)
def highlight_found_results(self, pattern, word=False, regexp=False,
case=False):
"""Highlight all found patterns"""
pattern = to_text_string(pattern)
if not pattern:
return
if not regexp:
pattern = re.escape(to_text_string(pattern))
pattern = r"\b%s\b" % pattern if word else pattern
text = to_text_string(self.toPlainText())
re_flags = re.MULTILINE if case else re.IGNORECASE | re.MULTILINE
try:
regobj = re.compile(pattern, flags=re_flags)
except sre_constants.error:
return
extra_selections = []
self.found_results = []
for match in regobj.finditer(text):
pos1, pos2 = sh.get_span(match)
selection = TextDecoration(self.textCursor())
selection.format.setBackground(self.found_results_color)
selection.cursor.setPosition(pos1)
self.found_results.append(selection.cursor.blockNumber())
selection.cursor.setPosition(pos2, QTextCursor.KeepAnchor)
extra_selections.append(selection)
self.set_extra_selections('find', extra_selections)
self.update_extra_selections()
def clear_found_results(self):
"""Clear found results highlighting"""
self.found_results = []
self.clear_extra_selections('find')
self.sig_flags_changed.emit()
def __text_has_changed(self):
"""Text has changed, eventually clear found results highlighting"""
self.last_change_position = self.textCursor().position()
if self.found_results:
self.clear_found_results()
def get_linenumberarea_width(self):
"""
Return current line number area width.
This method is left for backward compatibility (BaseEditMixin
define it), any changes should be in LineNumberArea class.
"""
return self.linenumberarea.get_width()
def calculate_real_position(self, point):
"""Add offset to a point, to take into account the panels."""
point.setX(point.x() + self.panels.margin_size(Panel.Position.LEFT))
point.setY(point.y() + self.panels.margin_size(Panel.Position.TOP))
return point
def calculate_real_position_from_global(self, point):
"""Add offset to a point, to take into account the panels."""
point.setX(point.x() - self.panels.margin_size(Panel.Position.LEFT))
point.setY(point.y() + self.panels.margin_size(Panel.Position.TOP))
return point
def get_linenumber_from_mouse_event(self, event):
"""Return line number from mouse event"""
block = self.firstVisibleBlock()
line_number = block.blockNumber()
top = self.blockBoundingGeometry(block).translated(
self.contentOffset()).top()
bottom = top + self.blockBoundingRect(block).height()
while block.isValid() and top < event.pos().y():
block = block.next()
if block.isVisible(): # skip collapsed blocks
top = bottom
bottom = top + self.blockBoundingRect(block).height()
line_number += 1
return line_number
def select_lines(self, linenumber_pressed, linenumber_released):
"""Select line(s) after a mouse press/mouse press drag event"""
find_block_by_number = self.document().findBlockByNumber
move_n_blocks = (linenumber_released - linenumber_pressed)
start_line = linenumber_pressed
start_block = find_block_by_number(start_line - 1)
cursor = self.textCursor()
cursor.setPosition(start_block.position())
# Select/drag downwards
if move_n_blocks > 0:
for n in range(abs(move_n_blocks) + 1):
cursor.movePosition(cursor.NextBlock, cursor.KeepAnchor)
# Select/drag upwards or select single line
else:
cursor.movePosition(cursor.NextBlock)
for n in range(abs(move_n_blocks) + 1):
cursor.movePosition(cursor.PreviousBlock, cursor.KeepAnchor)
# Account for last line case
if linenumber_released == self.blockCount():
cursor.movePosition(cursor.EndOfBlock, cursor.KeepAnchor)
else:
cursor.movePosition(cursor.StartOfBlock, cursor.KeepAnchor)
self.setTextCursor(cursor)
# ----- Code bookmarks
def add_bookmark(self, slot_num, line=None, column=None):
"""Add bookmark to current block's userData."""
if line is None:
# Triggered by shortcut, else by spyder start
line, column = self.get_cursor_line_column()
block = self.document().findBlockByNumber(line)
data = block.userData()
if not data:
data = BlockUserData(self)
if slot_num not in data.bookmarks:
data.bookmarks.append((slot_num, column))
block.setUserData(data)
self.sig_bookmarks_changed.emit()
def get_bookmarks(self):
"""Get bookmarks by going over all blocks."""
bookmarks = {}
block = self.document().firstBlock()
for line_number in range(0, self.document().blockCount()):
data = block.userData()
if data and data.bookmarks:
for slot_num, column in data.bookmarks:
bookmarks[slot_num] = [line_number, column]
block = block.next()
return bookmarks
def clear_bookmarks(self):
"""Clear bookmarks for all blocks."""
self.bookmarks = {}
for data in self.blockuserdata_list():
data.bookmarks = []
def set_bookmarks(self, bookmarks):
"""Set bookmarks when opening file."""
self.clear_bookmarks()
for slot_num, bookmark in bookmarks.items():
self.add_bookmark(slot_num, bookmark[1], bookmark[2])
def update_bookmarks(self):
"""Emit signal to update bookmarks."""
self.sig_bookmarks_changed.emit()
# -----Code introspection
def show_completion_object_info(self, name, signature):
"""Trigger show completion info in Help Pane."""
force = True
self.sig_show_completion_object_info.emit(name, signature, force)
def show_object_info(self, position):
"""Trigger a calltip"""
self.sig_show_object_info.emit(position)
# -----blank spaces
def set_blanks_enabled(self, state):
"""Toggle blanks visibility"""
self.blanks_enabled = state
option = self.document().defaultTextOption()
option.setFlags(option.flags() | \
QTextOption.AddSpaceForLineAndParagraphSeparators)
if self.blanks_enabled:
option.setFlags(option.flags() | QTextOption.ShowTabsAndSpaces)
else:
option.setFlags(option.flags() & ~QTextOption.ShowTabsAndSpaces)
self.document().setDefaultTextOption(option)
# Rehighlight to make the spaces less apparent.
self.rehighlight()
def set_scrollpastend_enabled(self, state):
"""
Allow user to scroll past the end of the document to have the last
line on top of the screen
"""
self.scrollpastend_enabled = state
self.setCenterOnScroll(state)
self.setDocument(self.document())
def resizeEvent(self, event):
"""Reimplemented Qt method to handle p resizing"""
TextEditBaseWidget.resizeEvent(self, event)
self.panels.resize()
def showEvent(self, event):
"""Overrides showEvent to update the viewport margins."""
super(CodeEditor, self).showEvent(event)
self.panels.refresh()
#-----Misc.
def _apply_highlighter_color_scheme(self):
"""Apply color scheme from syntax highlighter to the editor"""
hl = self.highlighter
if hl is not None:
self.set_palette(background=hl.get_background_color(),
foreground=hl.get_foreground_color())
self.currentline_color = hl.get_currentline_color()
self.currentcell_color = hl.get_currentcell_color()
self.occurrence_color = hl.get_occurrence_color()
self.ctrl_click_color = hl.get_ctrlclick_color()
self.sideareas_color = hl.get_sideareas_color()
self.comment_color = hl.get_comment_color()
self.normal_color = hl.get_foreground_color()
self.matched_p_color = hl.get_matched_p_color()
self.unmatched_p_color = hl.get_unmatched_p_color()
self.edge_line.update_color()
self.indent_guides.update_color()
self.sig_theme_colors_changed.emit(
{'occurrence': self.occurrence_color})
def apply_highlighter_settings(self, color_scheme=None):
"""Apply syntax highlighter settings"""
if self.highlighter is not None:
# Updating highlighter settings (font and color scheme)
self.highlighter.setup_formats(self.font())
if color_scheme is not None:
self.set_color_scheme(color_scheme)
else:
self.highlighter.rehighlight()
def set_font(self, font, color_scheme=None):
"""Set font"""
# Note: why using this method to set color scheme instead of
# 'set_color_scheme'? To avoid rehighlighting the document twice
# at startup.
if color_scheme is not None:
self.color_scheme = color_scheme
self.setFont(font)
self.panels.refresh()
self.apply_highlighter_settings(color_scheme)
def set_color_scheme(self, color_scheme):
"""Set color scheme for syntax highlighting"""
self.color_scheme = color_scheme
if self.highlighter is not None:
# this calls self.highlighter.rehighlight()
self.highlighter.set_color_scheme(color_scheme)
self._apply_highlighter_color_scheme()
if self.highlight_current_cell_enabled:
self.highlight_current_cell()
else:
self.unhighlight_current_cell()
if self.highlight_current_line_enabled:
self.highlight_current_line()
else:
self.unhighlight_current_line()
def set_text(self, text):
"""Set the text of the editor"""
self.setPlainText(text)
self.set_eol_chars(text)
self.document_did_change(text)
if (isinstance(self.highlighter, sh.PygmentsSH)
and not running_under_pytest()):
self.highlighter.make_charlist()
def set_text_from_file(self, filename, language=None):
"""Set the text of the editor from file *fname*"""
self.filename = filename
text, _enc = encoding.read(filename)
if language is None:
language = get_file_language(filename, text)
self.set_language(language, filename)
self.set_text(text)
def append(self, text):
"""Append text to the end of the text widget"""
cursor = self.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.document_did_change()
def adjust_indentation(self, line, indent_adjustment):
"""Adjust indentation."""
if indent_adjustment == 0 or line == "":
return line
using_spaces = self.indent_chars != '\t'
if indent_adjustment > 0:
if using_spaces:
return ' ' * indent_adjustment + line
else:
return (
self.indent_chars
* (indent_adjustment // self.tab_stop_width_spaces)
+ line)
max_indent = self.get_line_indentation(line)
indent_adjustment = min(max_indent, -indent_adjustment)
indent_adjustment = (indent_adjustment if using_spaces else
indent_adjustment // self.tab_stop_width_spaces)
return line[indent_adjustment:]
@Slot()
def paste(self):
"""
Insert text or file/folder path copied from clipboard.
Reimplement QPlainTextEdit's method to fix the following issue:
on Windows, pasted text has only 'LF' EOL chars even if the original
text has 'CRLF' EOL chars.
The function also changes the clipboard data if they are copied as
files/folders but does not change normal text data except if they are
multiple lines. Since we are changing clipboard data we cannot use
paste, which directly pastes from clipboard instead we use
insertPlainText and pass the formatted/changed text without modifying
clipboard content.
"""
clipboard = QApplication.clipboard()
text = to_text_string(clipboard.text())
if clipboard.mimeData().hasUrls():
# Have copied file and folder urls pasted as text paths.
# See spyder-ide/spyder#8644 for details.
urls = clipboard.mimeData().urls()
if all([url.isLocalFile() for url in urls]):
if len(urls) > 1:
sep_chars = ',' + self.get_line_separator()
text = sep_chars.join('"' + url.toLocalFile().
replace(osp.os.sep, '/')
+ '"' for url in urls)
else:
text = urls[0].toLocalFile().replace(osp.os.sep, '/')
eol_chars = self.get_line_separator()
if len(text.splitlines()) > 1:
text = eol_chars.join((text + eol_chars).splitlines())
# Align multiline text based on first line
cursor = self.textCursor()
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.setPosition(cursor.selectionStart())
cursor.setPosition(cursor.block().position(),
QTextCursor.KeepAnchor)
preceding_text = cursor.selectedText()
first_line_selected, *remaining_lines = (text + eol_chars).splitlines()
first_line = preceding_text + first_line_selected
first_line_adjustment = 0
# Dedent if automatic indentation makes code invalid
# Minimum indentation = max of current and paster indentation
if (self.is_python_like() and len(preceding_text.strip()) == 0
and len(first_line.strip()) > 0):
# Correct indentation
desired_indent = self.find_indentation()
if desired_indent:
# minimum indentation is either the current indentation
# or the indentation of the paster text
desired_indent = max(
desired_indent,
self.get_line_indentation(first_line_selected),
self.get_line_indentation(preceding_text))
first_line_adjustment = (
desired_indent - self.get_line_indentation(first_line))
# Only dedent, don't indent
first_line_adjustment = min(first_line_adjustment, 0)
# Only dedent, don't indent
first_line = self.adjust_indentation(
first_line, first_line_adjustment)
# Fix indentation of multiline text based on first line
if len(remaining_lines) > 0 and len(first_line.strip()) > 0:
lines_adjustment = first_line_adjustment
lines_adjustment += CLIPBOARD_HELPER.remaining_lines_adjustment(
preceding_text)
# Make sure the code is not flattened
indentations = [
self.get_line_indentation(line)
for line in remaining_lines if line.strip() != ""]
if indentations:
max_dedent = min(indentations)
lines_adjustment = max(lines_adjustment, -max_dedent)
# Get new text
remaining_lines = [
self.adjust_indentation(line, lines_adjustment)
for line in remaining_lines]
text = eol_chars.join([first_line, *remaining_lines])
self.skip_rstrip = True
self.sig_will_paste_text.emit(text)
cursor.removeSelectedText()
cursor.insertText(text)
cursor.endEditBlock()
self.sig_text_was_inserted.emit()
self.document_did_change(text)
self.skip_rstrip = False
def _save_clipboard_indentation(self):
"""
Save the indentation corresponding to the clipboard data.
Must be called right after copying.
"""
cursor = self.textCursor()
cursor.setPosition(cursor.selectionStart())
cursor.setPosition(cursor.block().position(),
QTextCursor.KeepAnchor)
preceding_text = cursor.selectedText()
CLIPBOARD_HELPER.save_indentation(
preceding_text, self.tab_stop_width_spaces)
@Slot()
def cut(self):
"""Reimplement cut to signal listeners about changes on the text."""
has_selected_text = self.has_selected_text()
if not has_selected_text:
return
start, end = self.get_selection_start_end()
self.sig_will_remove_selection.emit(start, end)
TextEditBaseWidget.cut(self)
self._save_clipboard_indentation()
self.sig_text_was_inserted.emit()
self.document_did_change('')
@Slot()
def copy(self):
"""Reimplement copy to save indentation."""
TextEditBaseWidget.copy(self)
self._save_clipboard_indentation()
@Slot()
def undo(self):
"""Reimplement undo to decrease text version number."""
if self.document().isUndoAvailable():
self.text_version -= 1
self.skip_rstrip = True
self.is_undoing = True
TextEditBaseWidget.undo(self)
self.document_did_change('')
self.sig_undo.emit()
self.sig_text_was_inserted.emit()
self.is_undoing = False
self.skip_rstrip = False
@Slot()
def redo(self):
"""Reimplement redo to increase text version number."""
if self.document().isRedoAvailable():
self.text_version += 1
self.skip_rstrip = True
self.is_redoing = True
TextEditBaseWidget.redo(self)
self.document_did_change('text')
self.sig_redo.emit()
self.sig_text_was_inserted.emit()
self.is_redoing = False
self.skip_rstrip = False
# =========================================================================
# High-level editor features
# =========================================================================
@Slot()
def center_cursor_on_next_focus(self):
"""QPlainTextEdit's "centerCursor" requires the widget to be visible"""
self.centerCursor()
self.focus_in.disconnect(self.center_cursor_on_next_focus)
def go_to_line(self, line, start_column=0, end_column=0, word=''):
"""Go to line number *line* and eventually highlight it"""
self.text_helper.goto_line(line, column=start_column,
end_column=end_column, move=True,
word=word)
def exec_gotolinedialog(self):
"""Execute the GoToLineDialog dialog box"""
dlg = GoToLineDialog(self)
if dlg.exec_():
self.go_to_line(dlg.get_line_number())
def hide_tooltip(self):
"""
Hide the tooltip widget.
The tooltip widget is a special QLabel that looks like a tooltip,
this method is here so it can be hidden as necessary. For example,
when the user leaves the Linenumber area when hovering over lint
warnings and errors.
"""
self._timer_mouse_moving.stop()
self._last_hover_word = None
self.clear_extra_selections('code_analysis_highlight')
if self.tooltip_widget.isVisible():
self.tooltip_widget.hide()
def _set_completions_hint_idle(self):
self._completions_hint_idle = True
self.completion_widget.trigger_completion_hint()
# --- Hint for completions
def show_hint_for_completion(self, word, documentation, at_point):
"""Show hint for completion element."""
if self.completions_hint and self._completions_hint_idle:
documentation = documentation.replace(u'\xa0', ' ')
completion_doc = {'name': word,
'signature': documentation}
if documentation and len(documentation) > 0:
self.show_hint(
documentation,
inspect_word=word,
at_point=at_point,
completion_doc=completion_doc,
max_lines=self._DEFAULT_MAX_LINES,
max_width=self._DEFAULT_COMPLETION_HINT_MAX_WIDTH)
self.tooltip_widget.move(at_point)
return
self.hide_tooltip()
def update_decorations(self):
"""Update decorations on the visible portion of the screen."""
if self.underline_errors_enabled:
# Clear current selections before painting the new ones.
# This prevents accumulating them when moving around in the file,
# which generated a memory leak and sluggishness in the editor
# after some time.
self.clear_extra_selections('code_analysis_underline')
self.underline_errors()
self.update_extra_selections()
# This is required to update decorations whether there are or not
# underline errors in the visible portion of the screen.
# See spyder-ide/spyder#14268.
self.decorations.update()
def show_code_analysis_results(self, line_number, block_data):
"""Show warning/error messages."""
# Diagnostic severity
icons = {
DiagnosticSeverity.ERROR: 'error',
DiagnosticSeverity.WARNING: 'warning',
DiagnosticSeverity.INFORMATION: 'information',
DiagnosticSeverity.HINT: 'hint',
}
code_analysis = block_data.code_analysis
# Size must be adapted from font
fm = self.fontMetrics()
size = fm.height()
template = (
'<img src="data:image/png;base64, {}"'
' height="{size}" width="{size}" /> '
'{} <i>({} {})</i>'
)
msglist = []
max_lines_msglist = 25
sorted_code_analysis = sorted(code_analysis, key=lambda i: i[2])
for src, code, sev, msg in sorted_code_analysis:
if src == 'pylint' and '[' in msg and ']' in msg:
# Remove extra redundant info from pylint messages
msg = msg.split(']')[-1]
msg = msg.strip()
# Avoid messing TODO, FIXME
# Prevent error if msg only has one element
if len(msg) > 1:
msg = msg[0].upper() + msg[1:]
# Get individual lines following paragraph format and handle
# symbols like '<' and '>' to not mess with br tags
msg = msg.replace('<', '<').replace('>', '>')
paragraphs = msg.splitlines()
new_paragraphs = []
long_paragraphs = 0
lines_per_message = 6
for paragraph in paragraphs:
new_paragraph = textwrap.wrap(
paragraph,
width=self._DEFAULT_MAX_HINT_WIDTH)
if lines_per_message > 2:
if len(new_paragraph) > 1:
new_paragraph = '<br>'.join(new_paragraph[:2]) + '...'
long_paragraphs += 1
lines_per_message -= 2
else:
new_paragraph = '<br>'.join(new_paragraph)
lines_per_message -= 1
new_paragraphs.append(new_paragraph)
if len(new_paragraphs) > 1:
# Define max lines taking into account that in the same
# tooltip you can find multiple warnings and messages
# and each one can have multiple lines
if long_paragraphs != 0:
max_lines = 3
max_lines_msglist -= max_lines * 2
else:
max_lines = 5
max_lines_msglist -= max_lines
msg = '<br>'.join(new_paragraphs[:max_lines]) + '<br>'
else:
msg = '<br>'.join(new_paragraphs)
base_64 = ima.base64_from_icon(icons[sev], size, size)
if max_lines_msglist >= 0:
msglist.append(template.format(base_64, msg, src,
code, size=size))
if msglist:
self.show_tooltip(
title=_("Code analysis"),
text='\n'.join(msglist),
title_color=QStylePalette.COLOR_ACCENT_4,
at_line=line_number,
with_html_format=True
)
self.highlight_line_warning(block_data)
def highlight_line_warning(self, block_data):
"""Highlight errors and warnings in this editor."""
self.clear_extra_selections('code_analysis_highlight')
self.highlight_selection('code_analysis_highlight',
block_data._selection(),
background_color=block_data.color,
update=True)
self.linenumberarea.update()
def get_current_warnings(self):
"""
Get all warnings for the current editor and return
a list with the message and line number.
"""
block = self.document().firstBlock()
line_count = self.document().blockCount()
warnings = []
while True:
data = block.userData()
if data and data.code_analysis:
for warning in data.code_analysis:
warnings.append([warning[-1], block.blockNumber() + 1])
# See spyder-ide/spyder#9924
if block.blockNumber() + 1 == line_count:
break
block = block.next()
return warnings
def go_to_next_warning(self):
"""
Go to next code warning message and return new cursor position.
"""
block = self.textCursor().block()
line_count = self.document().blockCount()
for __ in range(line_count):
line_number = block.blockNumber() + 1
if line_number < line_count:
block = block.next()
else:
block = self.document().firstBlock()
data = block.userData()
if data and data.code_analysis:
line_number = block.blockNumber() + 1
self.go_to_line(line_number)
self.show_code_analysis_results(line_number, data)
return self.get_position('cursor')
def go_to_previous_warning(self):
"""
Go to previous code warning message and return new cursor position.
"""
block = self.textCursor().block()
line_count = self.document().blockCount()
for __ in range(line_count):
line_number = block.blockNumber() + 1
if line_number > 1:
block = block.previous()
else:
block = self.document().lastBlock()
data = block.userData()
if data and data.code_analysis:
line_number = block.blockNumber() + 1
self.go_to_line(line_number)
self.show_code_analysis_results(line_number, data)
return self.get_position('cursor')
def cell_list(self):
"""Get the outline explorer data for all cells."""
for oedata in self.outlineexplorer_data_list():
if oedata.def_type == OED.CELL:
yield oedata
def get_cell_code(self, cell):
"""
Get cell code for a given cell.
If the cell doesn't exist, raises an exception
"""
selected_block = None
if is_string(cell):
for oedata in self.cell_list():
if oedata.def_name == cell:
selected_block = oedata.block
break
else:
if cell == 0:
selected_block = self.document().firstBlock()
else:
cell_list = list(self.cell_list())
if cell <= len(cell_list):
selected_block = cell_list[cell - 1].block
if not selected_block:
raise RuntimeError("Cell {} not found.".format(repr(cell)))
cursor = QTextCursor(selected_block)
cell_code, _ = self.get_cell_as_executable_code(cursor)
return cell_code
def get_cell_count(self):
"""Get number of cells in document."""
return 1 + len(list(self.cell_list()))
#------Tasks management
def go_to_next_todo(self):
"""Go to next todo and return new cursor position"""
block = self.textCursor().block()
line_count = self.document().blockCount()
while True:
if block.blockNumber()+1 < line_count:
block = block.next()
else:
block = self.document().firstBlock()
data = block.userData()
if data and data.todo:
break
line_number = block.blockNumber()+1
self.go_to_line(line_number)
self.show_tooltip(
title=_("To do"),
text=data.todo,
title_color=QStylePalette.COLOR_ACCENT_4,
at_line=line_number,
)
return self.get_position('cursor')
def process_todo(self, todo_results):
"""Process todo finder results"""
for data in self.blockuserdata_list():
data.todo = ''
for message, line_number in todo_results:
block = self.document().findBlockByNumber(line_number - 1)
data = block.userData()
if not data:
data = BlockUserData(self)
data.todo = message
block.setUserData(data)
self.sig_flags_changed.emit()
#------Comments/Indentation
def add_prefix(self, prefix):
"""Add prefix to current line or selected line(s)"""
cursor = self.textCursor()
if self.has_selected_text():
# Add prefix to selected line(s)
start_pos, end_pos = cursor.selectionStart(), cursor.selectionEnd()
# Let's see if selection begins at a block start
first_pos = min([start_pos, end_pos])
first_cursor = self.textCursor()
first_cursor.setPosition(first_pos)
cursor.beginEditBlock()
cursor.setPosition(end_pos)
# Check if end_pos is at the start of a block: if so, starting
# changes from the previous block
if cursor.atBlockStart():
cursor.movePosition(QTextCursor.PreviousBlock)
if cursor.position() < start_pos:
cursor.setPosition(start_pos)
move_number = self.__spaces_for_prefix()
while cursor.position() >= start_pos:
cursor.movePosition(QTextCursor.StartOfBlock)
line_text = to_text_string(cursor.block().text())
if (self.get_character(cursor.position()) == ' '
and '#' in prefix and not line_text.isspace()
or (not line_text.startswith(' ')
and line_text != '')):
cursor.movePosition(QTextCursor.Right,
QTextCursor.MoveAnchor,
move_number)
cursor.insertText(prefix)
elif '#' not in prefix:
cursor.insertText(prefix)
if cursor.blockNumber() == 0:
# Avoid infinite loop when indenting the very first line
break
cursor.movePosition(QTextCursor.PreviousBlock)
cursor.movePosition(QTextCursor.EndOfBlock)
cursor.endEditBlock()
else:
# Add prefix to current line
cursor.beginEditBlock()
cursor.movePosition(QTextCursor.StartOfBlock)
if self.get_character(cursor.position()) == ' ' and '#' in prefix:
cursor.movePosition(QTextCursor.NextWord)
cursor.insertText(prefix)
cursor.endEditBlock()
self.document_did_change()
def __spaces_for_prefix(self):
"""Find the less indented level of text."""
cursor = self.textCursor()
if self.has_selected_text():
# Add prefix to selected line(s)
start_pos, end_pos = cursor.selectionStart(), cursor.selectionEnd()
# Let's see if selection begins at a block start
first_pos = min([start_pos, end_pos])
first_cursor = self.textCursor()
first_cursor.setPosition(first_pos)
cursor.beginEditBlock()
cursor.setPosition(end_pos)
# Check if end_pos is at the start of a block: if so, starting
# changes from the previous block
if cursor.atBlockStart():
cursor.movePosition(QTextCursor.PreviousBlock)
if cursor.position() < start_pos:
cursor.setPosition(start_pos)
number_spaces = -1
while cursor.position() >= start_pos:
cursor.movePosition(QTextCursor.StartOfBlock)
line_text = to_text_string(cursor.block().text())
start_with_space = line_text.startswith(' ')
left_number_spaces = self.__number_of_spaces(line_text)
if not start_with_space:
left_number_spaces = 0
if ((number_spaces == -1
or number_spaces > left_number_spaces)
and not line_text.isspace() and line_text != ''):
number_spaces = left_number_spaces
if cursor.blockNumber() == 0:
# Avoid infinite loop when indenting the very first line
break
cursor.movePosition(QTextCursor.PreviousBlock)
cursor.movePosition(QTextCursor.EndOfBlock)
cursor.endEditBlock()
return number_spaces
def remove_suffix(self, suffix):
"""
Remove suffix from current line (there should not be any selection)
"""
cursor = self.textCursor()
cursor.setPosition(cursor.position() - qstring_length(suffix),
QTextCursor.KeepAnchor)
if to_text_string(cursor.selectedText()) == suffix:
cursor.removeSelectedText()
self.document_did_change()
def remove_prefix(self, prefix):
"""Remove prefix from current line or selected line(s)"""
cursor = self.textCursor()
if self.has_selected_text():
# Remove prefix from selected line(s)
start_pos, end_pos = sorted([cursor.selectionStart(),
cursor.selectionEnd()])
cursor.setPosition(start_pos)
if not cursor.atBlockStart():
cursor.movePosition(QTextCursor.StartOfBlock)
start_pos = cursor.position()
cursor.beginEditBlock()
cursor.setPosition(end_pos)
# Check if end_pos is at the start of a block: if so, starting
# changes from the previous block
if cursor.atBlockStart():
cursor.movePosition(QTextCursor.PreviousBlock)
if cursor.position() < start_pos:
cursor.setPosition(start_pos)
cursor.movePosition(QTextCursor.StartOfBlock)
old_pos = None
while cursor.position() >= start_pos:
new_pos = cursor.position()
if old_pos == new_pos:
break
else:
old_pos = new_pos
line_text = to_text_string(cursor.block().text())
self.__remove_prefix(prefix, cursor, line_text)
cursor.movePosition(QTextCursor.PreviousBlock)
cursor.endEditBlock()
else:
# Remove prefix from current line
cursor.movePosition(QTextCursor.StartOfBlock)
line_text = to_text_string(cursor.block().text())
self.__remove_prefix(prefix, cursor, line_text)
def __remove_prefix(self, prefix, cursor, line_text):
"""Handle the removal of the prefix for a single line."""
start_with_space = line_text.startswith(' ')
if start_with_space:
left_spaces = self.__even_number_of_spaces(line_text)
else:
left_spaces = False
if start_with_space:
right_number_spaces = self.__number_of_spaces(line_text, group=1)
else:
right_number_spaces = self.__number_of_spaces(line_text)
# Handle prefix remove for comments with spaces
if (prefix.strip() and line_text.lstrip().startswith(prefix + ' ')
or line_text.startswith(prefix + ' ') and '#' in prefix):
cursor.movePosition(QTextCursor.Right,
QTextCursor.MoveAnchor,
line_text.find(prefix))
if (right_number_spaces == 1
and (left_spaces or not start_with_space)
or (not start_with_space and right_number_spaces % 2 != 0)
or (left_spaces and right_number_spaces % 2 != 0)):
# Handle inserted '# ' with the count of the number of spaces
# at the right and left of the prefix.
cursor.movePosition(QTextCursor.Right,
QTextCursor.KeepAnchor, len(prefix + ' '))
else:
# Handle manual insertion of '#'
cursor.movePosition(QTextCursor.Right,
QTextCursor.KeepAnchor, len(prefix))
cursor.removeSelectedText()
# Check for prefix without space
elif (prefix.strip() and line_text.lstrip().startswith(prefix)
or line_text.startswith(prefix)):
cursor.movePosition(QTextCursor.Right,
QTextCursor.MoveAnchor,
line_text.find(prefix))
cursor.movePosition(QTextCursor.Right,
QTextCursor.KeepAnchor, len(prefix))
cursor.removeSelectedText()
self.document_did_change()
def __even_number_of_spaces(self, line_text, group=0):
"""
Get if there is a correct indentation from a group of spaces of a line.
"""
spaces = re.findall(r'\s+', line_text)
if len(spaces) - 1 >= group:
return len(spaces[group]) % len(self.indent_chars) == 0
def __number_of_spaces(self, line_text, group=0):
"""Get the number of spaces from a group of spaces in a line."""
spaces = re.findall(r'\s+', line_text)
if len(spaces) - 1 >= group:
return len(spaces[group])
def __get_brackets(self, line_text, closing_brackets=[]):
"""
Return unmatched opening brackets and left-over closing brackets.
(str, []) -> ([(pos, bracket)], [bracket], comment_pos)
Iterate through line_text to find unmatched brackets.
Returns three objects as a tuple:
1) bracket_stack:
a list of tuples of pos and char of each unmatched opening bracket
2) closing brackets:
this line's unmatched closing brackets + arg closing_brackets.
If this line ad no closing brackets, arg closing_brackets might
be matched with previously unmatched opening brackets in this line.
3) Pos at which a # comment begins. -1 if it doesn't.'
"""
# Remove inline comment and check brackets
bracket_stack = [] # list containing this lines unmatched opening
# same deal, for closing though. Ignore if bracket stack not empty,
# since they are mismatched in that case.
bracket_unmatched_closing = []
comment_pos = -1
deactivate = None
escaped = False
pos, c = None, None
for pos, c in enumerate(line_text):
# Handle '\' inside strings
if escaped:
escaped = False
# Handle strings
elif deactivate:
if c == deactivate:
deactivate = None
elif c == "\\":
escaped = True
elif c in ["'", '"']:
deactivate = c
# Handle comments
elif c == "#":
comment_pos = pos
break
# Handle brackets
elif c in ('(', '[', '{'):
bracket_stack.append((pos, c))
elif c in (')', ']', '}'):
if bracket_stack and bracket_stack[-1][1] == \
{')': '(', ']': '[', '}': '{'}[c]:
bracket_stack.pop()
else:
bracket_unmatched_closing.append(c)
del pos, deactivate, escaped
# If no closing brackets are left over from this line,
# check the ones from previous iterations' prevlines
if not bracket_unmatched_closing:
for c in list(closing_brackets):
if bracket_stack and bracket_stack[-1][1] == \
{')': '(', ']': '[', '}': '{'}[c]:
bracket_stack.pop()
closing_brackets.remove(c)
else:
break
del c
closing_brackets = bracket_unmatched_closing + closing_brackets
return (bracket_stack, closing_brackets, comment_pos)
def fix_indent(self, *args, **kwargs):
"""Indent line according to the preferences"""
if self.is_python_like():
ret = self.fix_indent_smart(*args, **kwargs)
else:
ret = self.simple_indentation(*args, **kwargs)
self.document_did_change()
return ret
def simple_indentation(self, forward=True, **kwargs):
"""
Simply preserve the indentation-level of the previous line.
"""
cursor = self.textCursor()
block_nb = cursor.blockNumber()
prev_block = self.document().findBlockByNumber(block_nb - 1)
prevline = to_text_string(prev_block.text())
indentation = re.match(r"\s*", prevline).group()
# Unident
if not forward:
indentation = indentation[len(self.indent_chars):]
cursor.insertText(indentation)
return False # simple indentation don't fix indentation
def find_indentation(self, forward=True, comment_or_string=False,
cur_indent=None):
"""
Find indentation (Python only, no text selection)
forward=True: fix indent only if text is not enough indented
(otherwise force indent)
forward=False: fix indent only if text is too much indented
(otherwise force unindent)
comment_or_string: Do not adjust indent level for
unmatched opening brackets and keywords
max_blank_lines: maximum number of blank lines to search before giving
up
cur_indent: current indent. This is the indent before we started
processing. E.g. when returning, indent before rstrip.
Returns the indentation for the current line
Assumes self.is_python_like() to return True
"""
cursor = self.textCursor()
block_nb = cursor.blockNumber()
# find the line that contains our scope
line_in_block = False
visual_indent = False
add_indent = 0 # How many levels of indent to add
prevline = None
prevtext = ""
empty_lines = True
closing_brackets = []
for prevline in range(block_nb - 1, -1, -1):
cursor.movePosition(QTextCursor.PreviousBlock)
prevtext = to_text_string(cursor.block().text()).rstrip()
bracket_stack, closing_brackets, comment_pos = self.__get_brackets(
prevtext, closing_brackets)
if not prevtext:
continue
if prevtext.endswith((':', '\\')):
# Presume a block was started
line_in_block = True # add one level of indent to correct_indent
# Does this variable actually do *anything* of relevance?
# comment_or_string = True
if bracket_stack or not closing_brackets:
break
if prevtext.strip() != '':
empty_lines = False
if empty_lines and prevline is not None and prevline < block_nb - 2:
# The previous line is too far, ignore
prevtext = ''
prevline = block_nb - 2
line_in_block = False
# splits of prevtext happen a few times. Let's just do it once
words = re.split(r'[\s\(\[\{\}\]\)]', prevtext.lstrip())
if line_in_block:
add_indent += 1
if prevtext and not comment_or_string:
if bracket_stack:
# Hanging indent
if prevtext.endswith(('(', '[', '{')):
add_indent += 1
if words[0] in ('class', 'def', 'elif', 'except', 'for',
'if', 'while', 'with'):
add_indent += 1
elif not ( # I'm not sure this block should exist here
(
self.tab_stop_width_spaces
if self.indent_chars == '\t' else
len(self.indent_chars)
) * 2 < len(prevtext)):
visual_indent = True
else:
# There's stuff after unmatched opening brackets
visual_indent = True
elif (words[-1] in ('continue', 'break', 'pass',)
or words[0] == "return" and not line_in_block
):
add_indent -= 1
if prevline:
prevline_indent = self.get_block_indentation(prevline)
else:
prevline_indent = 0
if visual_indent: # can only be true if bracket_stack
correct_indent = bracket_stack[-1][0] + 1
elif add_indent:
# Indent
if self.indent_chars == '\t':
correct_indent = prevline_indent + self.tab_stop_width_spaces * add_indent
else:
correct_indent = prevline_indent + len(self.indent_chars) * add_indent
else:
correct_indent = prevline_indent
# TODO untangle this block
if prevline and not bracket_stack and not prevtext.endswith(':'):
if forward:
# Keep indentation of previous line
ref_line = block_nb - 1
else:
# Find indentation context
ref_line = prevline
if cur_indent is None:
cur_indent = self.get_block_indentation(ref_line)
is_blank = not self.get_text_line(ref_line).strip()
trailing_text = self.get_text_line(block_nb).strip()
# If brackets are matched and no block gets opened
# Match the above line's indent and nudge to the next multiple of 4
if cur_indent < prevline_indent and (trailing_text or is_blank):
# if line directly above is blank or there is text after cursor
# Ceiling division
correct_indent = -(-cur_indent // len(self.indent_chars)) * \
len(self.indent_chars)
return correct_indent
def fix_indent_smart(self, forward=True, comment_or_string=False,
cur_indent=None):
"""
Fix indentation (Python only, no text selection)
forward=True: fix indent only if text is not enough indented
(otherwise force indent)
forward=False: fix indent only if text is too much indented
(otherwise force unindent)
comment_or_string: Do not adjust indent level for
unmatched opening brackets and keywords
max_blank_lines: maximum number of blank lines to search before giving
up
cur_indent: current indent. This is the indent before we started
processing. E.g. when returning, indent before rstrip.
Returns True if indent needed to be fixed
Assumes self.is_python_like() to return True
"""
cursor = self.textCursor()
block_nb = cursor.blockNumber()
indent = self.get_block_indentation(block_nb)
correct_indent = self.find_indentation(
forward, comment_or_string, cur_indent)
if correct_indent >= 0 and not (
indent == correct_indent or
forward and indent > correct_indent or
not forward and indent < correct_indent
):
# Insert the determined indent
cursor = self.textCursor()
cursor.movePosition(QTextCursor.StartOfBlock)
if self.indent_chars == '\t':
indent = indent // self.tab_stop_width_spaces
cursor.setPosition(cursor.position()+indent, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
if self.indent_chars == '\t':
indent_text = (
'\t' * (correct_indent // self.tab_stop_width_spaces) +
' ' * (correct_indent % self.tab_stop_width_spaces)
)
else:
indent_text = ' '*correct_indent
cursor.insertText(indent_text)
return True
return False
@Slot()
def clear_all_output(self):
"""Removes all output in the ipynb format (Json only)"""
try:
nb = nbformat.reads(self.toPlainText(), as_version=4)
if nb.cells:
for cell in nb.cells:
if 'outputs' in cell:
cell['outputs'] = []
if 'prompt_number' in cell:
cell['prompt_number'] = None
# We do the following rather than using self.setPlainText
# to benefit from QTextEdit's undo/redo feature.
self.selectAll()
self.skip_rstrip = True
self.insertPlainText(nbformat.writes(nb))
self.skip_rstrip = False
except Exception as e:
QMessageBox.critical(self, _('Removal error'),
_("It was not possible to remove outputs from "
"this notebook. The error is:\n\n") + \
to_text_string(e))
return
@Slot()
def convert_notebook(self):
"""Convert an IPython notebook to a Python script in editor"""
try:
nb = nbformat.reads(self.toPlainText(), as_version=4)
script = nbexporter().from_notebook_node(nb)[0]
except Exception as e:
QMessageBox.critical(self, _('Conversion error'),
_("It was not possible to convert this "
"notebook. The error is:\n\n") + \
to_text_string(e))
return
self.sig_new_file.emit(script)
def indent(self, force=False):
"""
Indent current line or selection
force=True: indent even if cursor is not a the beginning of the line
"""
leading_text = self.get_text('sol', 'cursor')
if self.has_selected_text():
self.add_prefix(self.indent_chars)
elif (force or not leading_text.strip() or
(self.tab_indents and self.tab_mode)):
if self.is_python_like():
if not self.fix_indent(forward=True):
self.add_prefix(self.indent_chars)
else:
self.add_prefix(self.indent_chars)
else:
if len(self.indent_chars) > 1:
length = len(self.indent_chars)
self.insert_text(" "*(length-(len(leading_text) % length)))
else:
self.insert_text(self.indent_chars)
def indent_or_replace(self):
"""Indent or replace by 4 spaces depending on selection and tab mode"""
if (self.tab_indents and self.tab_mode) or not self.has_selected_text():
self.indent()
else:
cursor = self.textCursor()
if (self.get_selected_text() ==
to_text_string(cursor.block().text())):
self.indent()
else:
cursor1 = self.textCursor()
cursor1.setPosition(cursor.selectionStart())
cursor2 = self.textCursor()
cursor2.setPosition(cursor.selectionEnd())
if cursor1.blockNumber() != cursor2.blockNumber():
self.indent()
else:
self.replace(self.indent_chars)
def unindent(self, force=False):
"""
Unindent current line or selection
force=True: unindent even if cursor is not a the beginning of the line
"""
if self.has_selected_text():
if self.indent_chars == "\t":
# Tabs, remove one tab
self.remove_prefix(self.indent_chars)
else:
# Spaces
space_count = len(self.indent_chars)
leading_spaces = self.__spaces_for_prefix()
remainder = leading_spaces % space_count
if remainder:
# Get block on "space multiple grid".
# See spyder-ide/spyder#5734.
self.remove_prefix(" "*remainder)
else:
# Unindent one space multiple
self.remove_prefix(self.indent_chars)
else:
leading_text = self.get_text('sol', 'cursor')
if (force or not leading_text.strip() or
(self.tab_indents and self.tab_mode)):
if self.is_python_like():
if not self.fix_indent(forward=False):
self.remove_prefix(self.indent_chars)
elif leading_text.endswith('\t'):
self.remove_prefix('\t')
else:
self.remove_prefix(self.indent_chars)
@Slot()
def toggle_comment(self):
"""Toggle comment on current line or selection"""
cursor = self.textCursor()
start_pos, end_pos = sorted([cursor.selectionStart(),
cursor.selectionEnd()])
cursor.setPosition(end_pos)
last_line = cursor.block().blockNumber()
if cursor.atBlockStart() and start_pos != end_pos:
last_line -= 1
cursor.setPosition(start_pos)
first_line = cursor.block().blockNumber()
# If the selection contains only commented lines and surrounding
# whitespace, uncomment. Otherwise, comment.
is_comment_or_whitespace = True
at_least_one_comment = False
for _line_nb in range(first_line, last_line+1):
text = to_text_string(cursor.block().text()).lstrip()
is_comment = text.startswith(self.comment_string)
is_whitespace = (text == '')
is_comment_or_whitespace *= (is_comment or is_whitespace)
if is_comment:
at_least_one_comment = True
cursor.movePosition(QTextCursor.NextBlock)
if is_comment_or_whitespace and at_least_one_comment:
self.uncomment()
else:
self.comment()
def is_comment(self, block):
"""Detect inline comments.
Return True if the block is an inline comment.
"""
if block is None:
return False
text = to_text_string(block.text()).lstrip()
return text.startswith(self.comment_string)
def comment(self):
"""Comment current line or selection."""
self.add_prefix(self.comment_string + ' ')
def uncomment(self):
"""Uncomment current line or selection."""
blockcomment = self.unblockcomment()
if not blockcomment:
self.remove_prefix(self.comment_string)
def __blockcomment_bar(self, compatibility=False):
"""Handle versions of blockcomment bar for backwards compatibility."""
# Blockcomment bar in Spyder version >= 4
blockcomment_bar = self.comment_string + ' ' + '=' * (
79 - len(self.comment_string + ' '))
if compatibility:
# Blockcomment bar in Spyder version < 4
blockcomment_bar = self.comment_string + '=' * (
79 - len(self.comment_string))
return blockcomment_bar
def transform_to_uppercase(self):
"""Change to uppercase current line or selection."""
cursor = self.textCursor()
prev_pos = cursor.position()
selected_text = to_text_string(cursor.selectedText())
if len(selected_text) == 0:
prev_pos = cursor.position()
cursor.select(QTextCursor.WordUnderCursor)
selected_text = to_text_string(cursor.selectedText())
s = selected_text.upper()
cursor.insertText(s)
self.set_cursor_position(prev_pos)
self.document_did_change()
def transform_to_lowercase(self):
"""Change to lowercase current line or selection."""
cursor = self.textCursor()
prev_pos = cursor.position()
selected_text = to_text_string(cursor.selectedText())
if len(selected_text) == 0:
prev_pos = cursor.position()
cursor.select(QTextCursor.WordUnderCursor)
selected_text = to_text_string(cursor.selectedText())
s = selected_text.lower()
cursor.insertText(s)
self.set_cursor_position(prev_pos)
self.document_did_change()
def blockcomment(self):
"""Block comment current line or selection."""
comline = self.__blockcomment_bar() + self.get_line_separator()
cursor = self.textCursor()
if self.has_selected_text():
self.extend_selection_to_complete_lines()
start_pos, end_pos = cursor.selectionStart(), cursor.selectionEnd()
else:
start_pos = end_pos = cursor.position()
cursor.beginEditBlock()
cursor.setPosition(start_pos)
cursor.movePosition(QTextCursor.StartOfBlock)
while cursor.position() <= end_pos:
cursor.insertText(self.comment_string + " ")
cursor.movePosition(QTextCursor.EndOfBlock)
if cursor.atEnd():
break
cursor.movePosition(QTextCursor.NextBlock)
end_pos += len(self.comment_string + " ")
cursor.setPosition(end_pos)
cursor.movePosition(QTextCursor.EndOfBlock)
if cursor.atEnd():
cursor.insertText(self.get_line_separator())
else:
cursor.movePosition(QTextCursor.NextBlock)
cursor.insertText(comline)
cursor.setPosition(start_pos)
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.insertText(comline)
cursor.endEditBlock()
self.document_did_change()
def unblockcomment(self):
"""Un-block comment current line or selection."""
# Needed for backward compatibility with Spyder previous blockcomments.
# See spyder-ide/spyder#2845.
unblockcomment = self.__unblockcomment()
if not unblockcomment:
unblockcomment = self.__unblockcomment(compatibility=True)
else:
return unblockcomment
self.document_did_change()
def __unblockcomment(self, compatibility=False):
"""Un-block comment current line or selection helper."""
def __is_comment_bar(cursor):
return to_text_string(cursor.block().text()
).startswith(
self.__blockcomment_bar(compatibility=compatibility))
# Finding first comment bar
cursor1 = self.textCursor()
if __is_comment_bar(cursor1):
return
while not __is_comment_bar(cursor1):
cursor1.movePosition(QTextCursor.PreviousBlock)
if cursor1.blockNumber() == 0:
break
if not __is_comment_bar(cursor1):
return False
def __in_block_comment(cursor):
cs = self.comment_string
return to_text_string(cursor.block().text()).startswith(cs)
# Finding second comment bar
cursor2 = QTextCursor(cursor1)
cursor2.movePosition(QTextCursor.NextBlock)
while not __is_comment_bar(cursor2) and __in_block_comment(cursor2):
cursor2.movePosition(QTextCursor.NextBlock)
if cursor2.block() == self.document().lastBlock():
break
if not __is_comment_bar(cursor2):
return False
# Removing block comment
cursor3 = self.textCursor()
cursor3.beginEditBlock()
cursor3.setPosition(cursor1.position())
cursor3.movePosition(QTextCursor.NextBlock)
while cursor3.position() < cursor2.position():
cursor3.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor)
if not cursor3.atBlockEnd():
# standard commenting inserts '# ' but a trailing space on an
# empty line might be stripped.
if not compatibility:
cursor3.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor)
cursor3.removeSelectedText()
cursor3.movePosition(QTextCursor.NextBlock)
for cursor in (cursor2, cursor1):
cursor3.setPosition(cursor.position())
cursor3.select(QTextCursor.BlockUnderCursor)
cursor3.removeSelectedText()
cursor3.endEditBlock()
return True
#------Kill ring handlers
# Taken from Jupyter's QtConsole
# Copyright (c) 2001-2015, IPython Development Team
# Copyright (c) 2015-, Jupyter Development Team
def kill_line_end(self):
"""Kill the text on the current line from the cursor forward"""
cursor = self.textCursor()
cursor.clearSelection()
cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)
if not cursor.hasSelection():
# Line deletion
cursor.movePosition(QTextCursor.NextBlock,
QTextCursor.KeepAnchor)
self._kill_ring.kill_cursor(cursor)
self.setTextCursor(cursor)
self.document_did_change()
def kill_line_start(self):
"""Kill the text on the current line from the cursor backward"""
cursor = self.textCursor()
cursor.clearSelection()
cursor.movePosition(QTextCursor.StartOfBlock,
QTextCursor.KeepAnchor)
self._kill_ring.kill_cursor(cursor)
self.setTextCursor(cursor)
self.document_did_change()
def _get_word_start_cursor(self, position):
"""Find the start of the word to the left of the given position. If a
sequence of non-word characters precedes the first word, skip over
them. (This emulates the behavior of bash, emacs, etc.)
"""
document = self.document()
position -= 1
while (position and not
self.is_letter_or_number(document.characterAt(position))):
position -= 1
while position and self.is_letter_or_number(
document.characterAt(position)):
position -= 1
cursor = self.textCursor()
cursor.setPosition(self.next_cursor_position())
return cursor
def _get_word_end_cursor(self, position):
"""Find the end of the word to the right of the given position. If a
sequence of non-word characters precedes the first word, skip over
them. (This emulates the behavior of bash, emacs, etc.)
"""
document = self.document()
cursor = self.textCursor()
position = cursor.position()
cursor.movePosition(QTextCursor.End)
end = cursor.position()
while (position < end and
not self.is_letter_or_number(document.characterAt(position))):
position = self.next_cursor_position(position)
while (position < end and
self.is_letter_or_number(document.characterAt(position))):
position = self.next_cursor_position(position)
cursor.setPosition(position)
return cursor
def kill_prev_word(self):
"""Kill the previous word"""
position = self.textCursor().position()
cursor = self._get_word_start_cursor(position)
cursor.setPosition(position, QTextCursor.KeepAnchor)
self._kill_ring.kill_cursor(cursor)
self.setTextCursor(cursor)
self.document_did_change()
def kill_next_word(self):
"""Kill the next word"""
position = self.textCursor().position()
cursor = self._get_word_end_cursor(position)
cursor.setPosition(position, QTextCursor.KeepAnchor)
self._kill_ring.kill_cursor(cursor)
self.setTextCursor(cursor)
self.document_did_change()
#------Autoinsertion of quotes/colons
def __get_current_color(self, cursor=None):
"""Get the syntax highlighting color for the current cursor position"""
if cursor is None:
cursor = self.textCursor()
block = cursor.block()
pos = cursor.position() - block.position() # relative pos within block
layout = block.layout()
block_formats = layout.additionalFormats()
if block_formats:
# To easily grab current format for autoinsert_colons
if cursor.atBlockEnd():
current_format = block_formats[-1].format
else:
current_format = None
for fmt in block_formats:
if (pos >= fmt.start) and (pos < fmt.start + fmt.length):
current_format = fmt.format
if current_format is None:
return None
color = current_format.foreground().color().name()
return color
else:
return None
def in_comment_or_string(self, cursor=None, position=None):
"""Is the cursor or position inside or next to a comment or string?
If *cursor* is None, *position* is used instead. If *position* is also
None, then the current cursor position is used.
"""
if self.highlighter:
if cursor is None:
cursor = self.textCursor()
if position:
cursor.setPosition(position)
current_color = self.__get_current_color(cursor=cursor)
comment_color = self.highlighter.get_color_name('comment')
string_color = self.highlighter.get_color_name('string')
if (current_color == comment_color) or (current_color == string_color):
return True
else:
return False
else:
return False
def __colon_keyword(self, text):
stmt_kws = ['def', 'for', 'if', 'while', 'with', 'class', 'elif',
'except']
whole_kws = ['else', 'try', 'except', 'finally']
text = text.lstrip()
words = text.split()
if any([text == wk for wk in whole_kws]):
return True
elif len(words) < 2:
return False
elif any([words[0] == sk for sk in stmt_kws]):
return True
else:
return False
def __forbidden_colon_end_char(self, text):
end_chars = [':', '\\', '[', '{', '(', ',']
text = text.rstrip()
if any([text.endswith(c) for c in end_chars]):
return True
else:
return False
def __has_colon_not_in_brackets(self, text):
"""
Return whether a string has a colon which is not between brackets.
This function returns True if the given string has a colon which is
not between a pair of (round, square or curly) brackets. It assumes
that the brackets in the string are balanced.
"""
bracket_ext = self.editor_extensions.get(CloseBracketsExtension)
for pos, char in enumerate(text):
if (char == ':' and
not bracket_ext.unmatched_brackets_in_line(text[:pos])):
return True
return False
def __has_unmatched_opening_bracket(self):
"""
Checks if there are any unmatched opening brackets before the current
cursor position.
"""
position = self.textCursor().position()
for brace in [']', ')', '}']:
match = self.find_brace_match(position, brace, forward=False)
if match is not None:
return True
return False
def autoinsert_colons(self):
"""Decide if we want to autoinsert colons"""
bracket_ext = self.editor_extensions.get(CloseBracketsExtension)
self.completion_widget.hide()
line_text = self.get_text('sol', 'cursor')
if not self.textCursor().atBlockEnd():
return False
elif self.in_comment_or_string():
return False
elif not self.__colon_keyword(line_text):
return False
elif self.__forbidden_colon_end_char(line_text):
return False
elif bracket_ext.unmatched_brackets_in_line(line_text):
return False
elif self.__has_colon_not_in_brackets(line_text):
return False
elif self.__has_unmatched_opening_bracket():
return False
else:
return True
def next_char(self):
cursor = self.textCursor()
cursor.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor)
next_char = to_text_string(cursor.selectedText())
return next_char
def in_comment(self, cursor=None, position=None):
"""Returns True if the given position is inside a comment.
Parameters
----------
cursor : QTextCursor, optional
The position to check.
position : int, optional
The position to check if *cursor* is None. This parameter
is ignored when *cursor* is not None.
If both *cursor* and *position* are none, then the position returned
by self.textCursor() is used instead.
"""
if self.highlighter:
if cursor is None:
cursor = self.textCursor()
if position is not None:
cursor.setPosition(position)
current_color = self.__get_current_color(cursor)
comment_color = self.highlighter.get_color_name('comment')
return (current_color == comment_color)
else:
return False
def in_string(self, cursor=None, position=None):
"""Returns True if the given position is inside a string.
Parameters
----------
cursor : QTextCursor, optional
The position to check.
position : int, optional
The position to check if *cursor* is None. This parameter
is ignored when *cursor* is not None.
If both *cursor* and *position* are none, then the position returned
by self.textCursor() is used instead.
"""
if self.highlighter:
if cursor is None:
cursor = self.textCursor()
if position is not None:
cursor.setPosition(position)
current_color = self.__get_current_color(cursor)
string_color = self.highlighter.get_color_name('string')
return (current_color == string_color)
else:
return False
# ------ Qt Event handlers
def setup_context_menu(self):
"""Setup context menu"""
self.undo_action = create_action(
self, _("Undo"), icon=ima.icon('undo'),
shortcut=CONF.get_shortcut('editor', 'undo'), triggered=self.undo)
self.redo_action = create_action(
self, _("Redo"), icon=ima.icon('redo'),
shortcut=CONF.get_shortcut('editor', 'redo'), triggered=self.redo)
self.cut_action = create_action(
self, _("Cut"), icon=ima.icon('editcut'),
shortcut=CONF.get_shortcut('editor', 'cut'), triggered=self.cut)
self.copy_action = create_action(
self, _("Copy"), icon=ima.icon('editcopy'),
shortcut=CONF.get_shortcut('editor', 'copy'), triggered=self.copy)
self.paste_action = create_action(
self, _("Paste"), icon=ima.icon('editpaste'),
shortcut=CONF.get_shortcut('editor', 'paste'),
triggered=self.paste)
selectall_action = create_action(
self, _("Select All"), icon=ima.icon('selectall'),
shortcut=CONF.get_shortcut('editor', 'select all'),
triggered=self.selectAll)
toggle_comment_action = create_action(
self, _("Comment")+"/"+_("Uncomment"), icon=ima.icon('comment'),
shortcut=CONF.get_shortcut('editor', 'toggle comment'),
triggered=self.toggle_comment)
self.clear_all_output_action = create_action(
self, _("Clear all ouput"), icon=ima.icon('ipython_console'),
triggered=self.clear_all_output)
self.ipynb_convert_action = create_action(
self, _("Convert to Python script"), icon=ima.icon('python'),
triggered=self.convert_notebook)
self.gotodef_action = create_action(
self, _("Go to definition"),
shortcut=CONF.get_shortcut('editor', 'go to definition'),
triggered=self.go_to_definition_from_cursor)
# Run actions
self.run_cell_action = create_action(
self, _("Run cell"), icon=ima.icon('run_cell'),
shortcut=CONF.get_shortcut('editor', 'run cell'),
triggered=self.sig_run_cell.emit)
self.run_cell_and_advance_action = create_action(
self, _("Run cell and advance"), icon=ima.icon('run_cell'),
shortcut=CONF.get_shortcut('editor', 'run cell and advance'),
triggered=self.sig_run_cell_and_advance.emit)
self.re_run_last_cell_action = create_action(
self, _("Re-run last cell"), icon=ima.icon('run_cell'),
shortcut=CONF.get_shortcut('editor', 're-run last cell'),
triggered=self.sig_re_run_last_cell.emit)
self.run_selection_action = create_action(
self, _("Run &selection or current line"),
icon=ima.icon('run_selection'),
shortcut=CONF.get_shortcut('editor', 'run selection'),
triggered=self.sig_run_selection.emit)
self.debug_cell_action = create_action(
self, _("Debug cell"), icon=ima.icon('debug_cell'),
shortcut=CONF.get_shortcut('editor', 'debug cell'),
triggered=self.sig_debug_cell.emit)
# Zoom actions
zoom_in_action = create_action(
self, _("Zoom in"), icon=ima.icon('zoom_in'),
shortcut=QKeySequence(QKeySequence.ZoomIn),
triggered=self.zoom_in.emit)
zoom_out_action = create_action(
self, _("Zoom out"), icon=ima.icon('zoom_out'),
shortcut=QKeySequence(QKeySequence.ZoomOut),
triggered=self.zoom_out.emit)
zoom_reset_action = create_action(
self, _("Zoom reset"), shortcut=QKeySequence("Ctrl+0"),
triggered=self.zoom_reset.emit)
# Docstring
writer = self.writer_docstring
self.docstring_action = create_action(
self, _("Generate docstring"),
shortcut=CONF.get_shortcut('editor', 'docstring'),
triggered=writer.write_docstring_at_first_line_of_function)
# Document formatting
formatter = CONF.get(
'completions',
('provider_configuration', 'lsp', 'values', 'formatting'),
''
)
self.format_action = create_action(
self,
_('Format file or selection with {0}').format(
formatter.capitalize()),
shortcut=CONF.get_shortcut('editor', 'autoformatting'),
triggered=self.format_document_or_range)
self.format_action.setEnabled(False)
# Build menu
self.menu = QMenu(self)
actions_1 = [self.run_cell_action, self.run_cell_and_advance_action,
self.re_run_last_cell_action, self.run_selection_action,
self.gotodef_action, None, self.undo_action,
self.redo_action, None, self.cut_action,
self.copy_action, self.paste_action, selectall_action]
actions_2 = [None, zoom_in_action, zoom_out_action, zoom_reset_action,
None, toggle_comment_action, self.docstring_action,
self.format_action]
if nbformat is not None:
nb_actions = [self.clear_all_output_action,
self.ipynb_convert_action, None]
actions = actions_1 + nb_actions + actions_2
add_actions(self.menu, actions)
else:
actions = actions_1 + actions_2
add_actions(self.menu, actions)
# Read-only context-menu
self.readonly_menu = QMenu(self)
add_actions(self.readonly_menu,
(self.copy_action, None, selectall_action,
self.gotodef_action))
def keyReleaseEvent(self, event):
"""Override Qt method."""
self.sig_key_released.emit(event)
key = event.key()
direction_keys = {Qt.Key_Up, Qt.Key_Left, Qt.Key_Right, Qt.Key_Down}
if key in direction_keys:
self.request_cursor_event()
# Update decorations after releasing these keys because they don't
# trigger the emission of the valueChanged signal in
# verticalScrollBar.
# See https://bugreports.qt.io/browse/QTBUG-25365
if key in {Qt.Key_Up, Qt.Key_Down}:
self.update_decorations_timer.start()
# This necessary to run our Pygments highlighter again after the
# user generated text changes
if event.text():
# Stop the active timer and start it again to not run it on
# every event
if self.timer_syntax_highlight.isActive():
self.timer_syntax_highlight.stop()
# Adjust interval to rehighlight according to the lines
# present in the file
total_lines = self.get_line_count()
if total_lines < 1000:
self.timer_syntax_highlight.setInterval(600)
elif total_lines < 2000:
self.timer_syntax_highlight.setInterval(800)
else:
self.timer_syntax_highlight.setInterval(1000)
self.timer_syntax_highlight.start()
self._restore_editor_cursor_and_selections()
super(CodeEditor, self).keyReleaseEvent(event)
event.ignore()
def event(self, event):
"""Qt method override."""
if event.type() == QEvent.ShortcutOverride:
event.ignore()
return False
else:
return super(CodeEditor, self).event(event)
def _start_completion_timer(self):
"""Helper to start timer or complete."""
if self.automatic_completions_after_ms > 0:
self._timer_autocomplete.start(
self.automatic_completions_after_ms)
else:
self._handle_completions()
def _handle_keypress_event(self, event):
"""Handle keypress events."""
TextEditBaseWidget.keyPressEvent(self, event)
# Trigger the following actions only if the event generates
# a text change.
text = to_text_string(event.text())
if text:
# The next three lines are a workaround for a quirk of
# QTextEdit. See spyder-ide/spyder#12663 and
# https://bugreports.qt.io/browse/QTBUG-35861
cursor = self.textCursor()
cursor.setPosition(cursor.position())
self.setTextCursor(cursor)
self.document_did_change()
self.sig_text_was_inserted.emit()
def keyPressEvent(self, event):
"""Reimplement Qt method."""
tab_pressed = False
if self.completions_hint_after_ms > 0:
self._completions_hint_idle = False
self._timer_completions_hint.start(self.completions_hint_after_ms)
else:
self._set_completions_hint_idle()
# Send the signal to the editor's extension.
event.ignore()
self.sig_key_pressed.emit(event)
self.kite_call_to_action.handle_key_press(event)
key = event.key()
text = to_text_string(event.text())
has_selection = self.has_selected_text()
ctrl = event.modifiers() & Qt.ControlModifier
shift = event.modifiers() & Qt.ShiftModifier
if text:
self.__clear_occurrences()
# Only ask for completions if there's some text generated
# as part of the event. Events such as pressing Crtl,
# Shift or Alt don't generate any text.
# Fixes spyder-ide/spyder#11021
self._start_completion_timer()
if key in {Qt.Key_Up, Qt.Key_Left, Qt.Key_Right, Qt.Key_Down}:
self.hide_tooltip()
if event.isAccepted():
# The event was handled by one of the editor extension.
return
if key in [Qt.Key_Control, Qt.Key_Shift, Qt.Key_Alt,
Qt.Key_Meta, Qt.KeypadModifier]:
# The user pressed only a modifier key.
if ctrl:
pos = self.mapFromGlobal(QCursor.pos())
pos = self.calculate_real_position_from_global(pos)
if self._handle_goto_uri_event(pos):
event.accept()
return
if self._handle_goto_definition_event(pos):
event.accept()
return
return
# ---- Handle hard coded and builtin actions
operators = {'+', '-', '*', '**', '/', '//', '%', '@', '<<', '>>',
'&', '|', '^', '~', '<', '>', '<=', '>=', '==', '!='}
delimiters = {',', ':', ';', '@', '=', '->', '+=', '-=', '*=', '/=',
'//=', '%=', '@=', '&=', '|=', '^=', '>>=', '<<=', '**='}
if text not in self.auto_completion_characters:
if text in operators or text in delimiters:
self.completion_widget.hide()
if key in (Qt.Key_Enter, Qt.Key_Return):
if not shift and not ctrl:
if (self.add_colons_enabled and self.is_python_like() and
self.autoinsert_colons()):
self.textCursor().beginEditBlock()
self.insert_text(':' + self.get_line_separator())
if self.strip_trailing_spaces_on_modify:
self.fix_and_strip_indent()
else:
self.fix_indent()
self.textCursor().endEditBlock()
elif self.is_completion_widget_visible():
self.select_completion_list()
else:
self.textCursor().beginEditBlock()
cur_indent = self.get_block_indentation(
self.textCursor().blockNumber())
self._handle_keypress_event(event)
# Check if we're in a comment or a string at the
# current position
cmt_or_str_cursor = self.in_comment_or_string()
# Check if the line start with a comment or string
cursor = self.textCursor()
cursor.setPosition(cursor.block().position(),
QTextCursor.KeepAnchor)
cmt_or_str_line_begin = self.in_comment_or_string(
cursor=cursor)
# Check if we are in a comment or a string
cmt_or_str = cmt_or_str_cursor and cmt_or_str_line_begin
if self.strip_trailing_spaces_on_modify:
self.fix_and_strip_indent(
comment_or_string=cmt_or_str,
cur_indent=cur_indent)
else:
self.fix_indent(comment_or_string=cmt_or_str,
cur_indent=cur_indent)
self.textCursor().endEditBlock()
elif key == Qt.Key_Insert and not shift and not ctrl:
self.setOverwriteMode(not self.overwriteMode())
elif key == Qt.Key_Backspace and not shift and not ctrl:
if has_selection or not self.intelligent_backspace:
self._handle_keypress_event(event)
else:
leading_text = self.get_text('sol', 'cursor')
leading_length = len(leading_text)
trailing_spaces = leading_length - len(leading_text.rstrip())
trailing_text = self.get_text('cursor', 'eol')
matches = ('()', '[]', '{}', '\'\'', '""')
if (not leading_text.strip() and
(leading_length > len(self.indent_chars))):
if leading_length % len(self.indent_chars) == 0:
self.unindent()
else:
self._handle_keypress_event(event)
elif trailing_spaces and not trailing_text.strip():
self.remove_suffix(leading_text[-trailing_spaces:])
elif (leading_text and trailing_text and
(leading_text[-1] + trailing_text[0] in matches)):
cursor = self.textCursor()
cursor.movePosition(QTextCursor.PreviousCharacter)
cursor.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor, 2)
cursor.removeSelectedText()
self.document_did_change()
else:
self._handle_keypress_event(event)
elif key == Qt.Key_Home:
self.stdkey_home(shift, ctrl)
elif key == Qt.Key_End:
# See spyder-ide/spyder#495: on MacOS X, it is necessary to
# redefine this basic action which should have been implemented
# natively
self.stdkey_end(shift, ctrl)
elif text in self.auto_completion_characters:
self.insert_text(text)
if text == ".":
if not self.in_comment_or_string():
text = self.get_text('sol', 'cursor')
last_obj = getobj(text)
prev_char = text[-2] if len(text) > 1 else ''
if (prev_char in {')', ']', '}'} or
(last_obj and not last_obj.isdigit())):
# Completions should be triggered immediately when
# an autocompletion character is introduced.
self.do_completion(automatic=True)
else:
self.do_completion(automatic=True)
elif (text in self.signature_completion_characters and
not self.has_selected_text()):
self.insert_text(text)
self.request_signature()
elif (key == Qt.Key_Colon and not has_selection and
self.auto_unindent_enabled):
leading_text = self.get_text('sol', 'cursor')
if leading_text.lstrip() in ('else', 'finally'):
ind = lambda txt: len(txt) - len(txt.lstrip())
prevtxt = (to_text_string(self.textCursor().block().
previous().text()))
if self.language == 'Python':
prevtxt = prevtxt.rstrip()
if ind(leading_text) == ind(prevtxt):
self.unindent(force=True)
self._handle_keypress_event(event)
elif (key == Qt.Key_Space and not shift and not ctrl and not
has_selection and self.auto_unindent_enabled):
self.completion_widget.hide()
leading_text = self.get_text('sol', 'cursor')
if leading_text.lstrip() in ('elif', 'except'):
ind = lambda txt: len(txt)-len(txt.lstrip())
prevtxt = (to_text_string(self.textCursor().block().
previous().text()))
if self.language == 'Python':
prevtxt = prevtxt.rstrip()
if ind(leading_text) == ind(prevtxt):
self.unindent(force=True)
self._handle_keypress_event(event)
elif key == Qt.Key_Tab and not ctrl:
# Important note: <TAB> can't be called with a QShortcut because
# of its singular role with respect to widget focus management
tab_pressed = True
if not has_selection and not self.tab_mode:
self.intelligent_tab()
else:
# indent the selected text
self.indent_or_replace()
elif key == Qt.Key_Backtab and not ctrl:
# Backtab, i.e. Shift+<TAB>, could be treated as a QShortcut but
# there is no point since <TAB> can't (see above)
tab_pressed = True
if not has_selection and not self.tab_mode:
self.intelligent_backtab()
else:
# indent the selected text
self.unindent()
event.accept()
elif not event.isAccepted():
self._handle_keypress_event(event)
self._last_key_pressed_text = text
self._last_pressed_key = key
if self.automatic_completions_after_ms == 0 and not tab_pressed:
self._handle_completions()
if not event.modifiers():
# Accept event to avoid it being handled by the parent.
# Modifiers should be passed to the parent because they
# could be shortcuts
event.accept()
def _handle_completions(self):
"""Handle on the fly completions after delay."""
cursor = self.textCursor()
pos = cursor.position()
cursor.select(QTextCursor.WordUnderCursor)
text = to_text_string(cursor.selectedText())
key = self._last_pressed_key
if key is not None:
if key in [Qt.Key_Return, Qt.Key_Escape,
Qt.Key_Tab, Qt.Key_Backtab, Qt.Key_Space]:
self._last_pressed_key = None
return
# Correctly handle completions when Backspace key is pressed.
# We should not show the widget if deleting a space before a word.
if key == Qt.Key_Backspace:
cursor.setPosition(pos - 1, QTextCursor.MoveAnchor)
cursor.select(QTextCursor.WordUnderCursor)
prev_text = to_text_string(cursor.selectedText())
cursor.setPosition(pos - 1, QTextCursor.MoveAnchor)
cursor.setPosition(pos, QTextCursor.KeepAnchor)
prev_char = cursor.selectedText()
if prev_text == '' or prev_char in (u'\u2029', ' ', '\t'):
return
# Text might be after a dot '.'
if text == '':
cursor.setPosition(pos - 1, QTextCursor.MoveAnchor)
cursor.select(QTextCursor.WordUnderCursor)
text = to_text_string(cursor.selectedText())
if text != '.':
text = ''
# WordUnderCursor fails if the cursor is next to a right brace.
# If the returned text starts with it, we move to the left.
if text.startswith((')', ']', '}')):
cursor.setPosition(pos - 1, QTextCursor.MoveAnchor)
cursor.select(QTextCursor.WordUnderCursor)
text = to_text_string(cursor.selectedText())
self.document_did_change(text)
is_backspace = (
self.is_completion_widget_visible() and key == Qt.Key_Backspace)
if (len(text) >= self.automatic_completions_after_chars
and self._last_key_pressed_text or is_backspace):
# Perform completion on the fly
if self.automatic_completions and not self.in_comment_or_string():
# Variables can include numbers and underscores
if (text.isalpha() or text.isalnum() or '_' in text
or '.' in text):
self.do_completion(automatic=True)
self._last_key_pressed_text = ''
self._last_pressed_key = None
def fix_and_strip_indent(self, *args, **kwargs):
"""
Automatically fix indent and strip previous automatic indent.
args and kwargs are forwarded to self.fix_indent
"""
# Fix indent
cursor_before = self.textCursor().position()
# A change just occurred on the last line (return was pressed)
if cursor_before > 0:
self.last_change_position = cursor_before - 1
self.fix_indent(*args, **kwargs)
cursor_after = self.textCursor().position()
# Remove previous spaces and update last_auto_indent
nspaces_removed = self.strip_trailing_spaces()
self.last_auto_indent = (cursor_before - nspaces_removed,
cursor_after - nspaces_removed)
self.document_did_change()
def run_pygments_highlighter(self):
"""Run pygments highlighter."""
if isinstance(self.highlighter, sh.PygmentsSH):
self.highlighter.make_charlist()
def get_pattern_at(self, coordinates):
"""
Return key, text and cursor for pattern (if found at coordinates).
"""
return self.get_pattern_cursor_at(self.highlighter.patterns,
coordinates)
def get_pattern_cursor_at(self, pattern, coordinates):
"""
Find pattern located at the line where the coordinate is located.
This returns the actual match and the cursor that selects the text.
"""
cursor, key, text = None, None, None
break_loop = False
# Check if the pattern is in line
line = self.get_line_at(coordinates)
match = pattern.search(line)
while match:
for key, value in list(match.groupdict().items()):
if value:
start, end = sh.get_span(match)
# Get cursor selection if pattern found
cursor = self.cursorForPosition(coordinates)
cursor.movePosition(QTextCursor.StartOfBlock)
line_start_position = cursor.position()
cursor.setPosition(line_start_position + start,
cursor.MoveAnchor)
start_rect = self.cursorRect(cursor)
cursor.setPosition(line_start_position + end,
cursor.MoveAnchor)
end_rect = self.cursorRect(cursor)
bounding_rect = start_rect.united(end_rect)
# Check coordinates are located within the selection rect
if bounding_rect.contains(coordinates):
text = line[start:end]
cursor.setPosition(line_start_position + start,
cursor.KeepAnchor)
break_loop = True
break
if break_loop:
break
match = pattern.search(line, end)
return key, text, cursor
def _preprocess_file_uri(self, uri):
"""Format uri to conform to absolute or relative file paths."""
fname = uri.replace('file://', '')
if fname[-1] == '/':
fname = fname[:-1]
# ^/ is used to denote the current project root
if fname.startswith("^/"):
if self.current_project_path is not None:
fname = osp.join(self.current_project_path, fname[2:])
else:
fname = fname.replace("^/", "~/")
if fname.startswith("~/"):
fname = osp.expanduser(fname)
dirname = osp.dirname(osp.abspath(self.filename))
if osp.isdir(dirname):
if not osp.isfile(fname):
# Maybe relative
fname = osp.join(dirname, fname)
self.sig_file_uri_preprocessed.emit(fname)
return fname
def _handle_goto_definition_event(self, pos):
"""Check if goto definition can be applied and apply highlight."""
text = self.get_word_at(pos)
if text and not sourcecode.is_keyword(to_text_string(text)):
if not self.__cursor_changed:
QApplication.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.__cursor_changed = True
cursor = self.cursorForPosition(pos)
cursor.select(QTextCursor.WordUnderCursor)
self.clear_extra_selections('ctrl_click')
self.highlight_selection(
'ctrl_click', cursor, update=True,
foreground_color=self.ctrl_click_color,
underline_color=self.ctrl_click_color,
underline_style=QTextCharFormat.SingleUnderline)
return True
else:
return False
def _handle_goto_uri_event(self, pos):
"""Check if go to uri can be applied and apply highlight."""
key, pattern_text, cursor = self.get_pattern_at(pos)
if key and pattern_text and cursor:
self._last_hover_pattern_key = key
self._last_hover_pattern_text = pattern_text
color = self.ctrl_click_color
if key in ['file']:
fname = self._preprocess_file_uri(pattern_text)
if not osp.isfile(fname):
color = QColor(SpyderPalette.COLOR_ERROR_2)
self.clear_extra_selections('ctrl_click')
self.highlight_selection(
'ctrl_click', cursor, update=True,
foreground_color=color,
underline_color=color,
underline_style=QTextCharFormat.SingleUnderline)
if not self.__cursor_changed:
QApplication.setOverrideCursor(
QCursor(Qt.PointingHandCursor))
self.__cursor_changed = True
self.sig_uri_found.emit(pattern_text)
return True
else:
self._last_hover_pattern_key = key
self._last_hover_pattern_text = pattern_text
return False
def go_to_uri_from_cursor(self, uri):
"""Go to url from cursor and defined hover patterns."""
key = self._last_hover_pattern_key
full_uri = uri
if key in ['file']:
fname = self._preprocess_file_uri(uri)
if osp.isfile(fname) and encoding.is_text_file(fname):
# Open in editor
self.go_to_definition.emit(fname, 0, 0)
else:
# Use external program
fname = file_uri(fname)
start_file(fname)
elif key in ['mail', 'url']:
if '@' in uri and not uri.startswith('mailto:'):
full_uri = 'mailto:' + uri
quri = QUrl(full_uri)
QDesktopServices.openUrl(quri)
elif key in ['issue']:
# Issue URI
repo_url = uri.replace('#', '/issues/')
if uri.startswith(('gh-', 'bb-', 'gl-')):
number = uri[3:]
remotes = get_git_remotes(self.filename)
remote = remotes.get('upstream', remotes.get('origin'))
if remote:
full_uri = remote_to_url(remote) + '/issues/' + number
else:
full_uri = None
elif uri.startswith('gh:') or ':' not in uri:
# Github
repo_and_issue = repo_url
if uri.startswith('gh:'):
repo_and_issue = repo_url[3:]
full_uri = 'https://github.com/' + repo_and_issue
elif uri.startswith('gl:'):
# Gitlab
full_uri = 'https://gitlab.com/' + repo_url[3:]
elif uri.startswith('bb:'):
# Bitbucket
full_uri = 'https://bitbucket.org/' + repo_url[3:]
if full_uri:
quri = QUrl(full_uri)
QDesktopServices.openUrl(quri)
else:
QMessageBox.information(
self,
_('Information'),
_('This file is not part of a local repository or '
'upstream/origin remotes are not defined!'),
QMessageBox.Ok,
)
self.hide_tooltip()
return full_uri
def line_range(self, position):
"""
Get line range from position.
"""
if position is None:
return None
if position >= self.document().characterCount():
return None
# Check if still on the line
cursor = self.textCursor()
cursor.setPosition(position)
line_range = (cursor.block().position(),
cursor.block().position()
+ cursor.block().length() - 1)
return line_range
def strip_trailing_spaces(self):
"""
Strip trailing spaces if needed.
Remove trailing whitespace on leaving a non-string line containing it.
Return the number of removed spaces.
"""
if not running_under_pytest():
if not self.hasFocus():
# Avoid problem when using split editor
return 0
# Update current position
current_position = self.textCursor().position()
last_position = self.last_position
self.last_position = current_position
if self.skip_rstrip:
return 0
line_range = self.line_range(last_position)
if line_range is None:
# Doesn't apply
return 0
def pos_in_line(pos):
"""Check if pos is in last line."""
if pos is None:
return False
return line_range[0] <= pos <= line_range[1]
if pos_in_line(current_position):
# Check if still on the line
return 0
# Check if end of line in string
cursor = self.textCursor()
cursor.setPosition(line_range[1])
if (not self.strip_trailing_spaces_on_modify
or self.in_string(cursor=cursor)):
if self.last_auto_indent is None:
return 0
elif (self.last_auto_indent !=
self.line_range(self.last_auto_indent[0])):
# line not empty
self.last_auto_indent = None
return 0
line_range = self.last_auto_indent
self.last_auto_indent = None
elif not pos_in_line(self.last_change_position):
# Should process if pressed return or made a change on the line:
return 0
cursor.setPosition(line_range[0])
cursor.setPosition(line_range[1],
QTextCursor.KeepAnchor)
# remove spaces on the right
text = cursor.selectedText()
strip = text.rstrip()
# I think all the characters we can strip are in a single QChar.
# Therefore there shouldn't be any length problems.
N_strip = qstring_length(text[len(strip):])
if N_strip > 0:
# Select text to remove
cursor.setPosition(line_range[1] - N_strip)
cursor.setPosition(line_range[1],
QTextCursor.KeepAnchor)
cursor.removeSelectedText()
self.document_did_change()
# Correct last change position
self.last_change_position = line_range[1]
self.last_position = self.textCursor().position()
return N_strip
return 0
def move_line_up(self):
"""Move up current line or selected text"""
self.__move_line_or_selection(after_current_line=False)
def move_line_down(self):
"""Move down current line or selected text"""
self.__move_line_or_selection(after_current_line=True)
def __move_line_or_selection(self, after_current_line=True):
cursor = self.textCursor()
# Unfold any folded code block before moving lines up/down
folding_panel = self.panels.get('FoldingPanel')
fold_start_line = cursor.blockNumber() + 1
block = cursor.block().next()
if fold_start_line in folding_panel.folding_status:
fold_status = folding_panel.folding_status[fold_start_line]
if fold_status:
folding_panel.toggle_fold_trigger(block)
if after_current_line:
# Unfold any folded region when moving lines down
fold_start_line = cursor.blockNumber() + 2
block = cursor.block().next().next()
if fold_start_line in folding_panel.folding_status:
fold_status = folding_panel.folding_status[fold_start_line]
if fold_status:
folding_panel.toggle_fold_trigger(block)
else:
# Unfold any folded region when moving lines up
block = cursor.block()
offset = 0
if self.has_selected_text():
((selection_start, _),
(selection_end)) = self.get_selection_start_end()
if selection_end != selection_start:
offset = 1
fold_start_line = block.blockNumber() - 1 - offset
# Find the innermost code folding region for the current position
enclosing_regions = sorted(list(
folding_panel.current_tree[fold_start_line]))
folding_status = folding_panel.folding_status
if len(enclosing_regions) > 0:
for region in enclosing_regions:
fold_start_line = region.begin
block = self.document().findBlockByNumber(fold_start_line)
if fold_start_line in folding_status:
fold_status = folding_status[fold_start_line]
if fold_status:
folding_panel.toggle_fold_trigger(block)
self._TextEditBaseWidget__move_line_or_selection(
after_current_line=after_current_line)
def mouseMoveEvent(self, event):
"""Underline words when pressing <CONTROL>"""
# Restart timer every time the mouse is moved
# This is needed to correctly handle hover hints with a delay
self._timer_mouse_moving.start()
pos = event.pos()
self._last_point = pos
alt = event.modifiers() & Qt.AltModifier
ctrl = event.modifiers() & Qt.ControlModifier
if alt:
self.sig_alt_mouse_moved.emit(event)
event.accept()
return
if ctrl:
if self._handle_goto_uri_event(pos):
event.accept()
return
if self.has_selected_text():
TextEditBaseWidget.mouseMoveEvent(self, event)
return
if self.go_to_definition_enabled and ctrl:
if self._handle_goto_definition_event(pos):
event.accept()
return
if self.__cursor_changed:
self._restore_editor_cursor_and_selections()
else:
if (not self._should_display_hover(pos)
and not self.is_completion_widget_visible()):
self.hide_tooltip()
TextEditBaseWidget.mouseMoveEvent(self, event)
def setPlainText(self, txt):
"""
Extends setPlainText to emit the new_text_set signal.
:param txt: The new text to set.
:param mime_type: Associated mimetype. Setting the mime will update the
pygments lexer.
:param encoding: text encoding
"""
super(CodeEditor, self).setPlainText(txt)
self.new_text_set.emit()
def focusOutEvent(self, event):
"""Extend Qt method"""
self.sig_focus_changed.emit()
self._restore_editor_cursor_and_selections()
super(CodeEditor, self).focusOutEvent(event)
def focusInEvent(self, event):
formatting_enabled = getattr(self, 'formatting_enabled', False)
self.sig_refresh_formatting.emit(formatting_enabled)
super(CodeEditor, self).focusInEvent(event)
def leaveEvent(self, event):
"""Extend Qt method"""
self.sig_leave_out.emit()
self._restore_editor_cursor_and_selections()
TextEditBaseWidget.leaveEvent(self, event)
def mousePressEvent(self, event):
"""Override Qt method."""
self.hide_tooltip()
self.kite_call_to_action.handle_mouse_press(event)
ctrl = event.modifiers() & Qt.ControlModifier
alt = event.modifiers() & Qt.AltModifier
pos = event.pos()
self._mouse_left_button_pressed = event.button() == Qt.LeftButton
if event.button() == Qt.LeftButton and ctrl:
TextEditBaseWidget.mousePressEvent(self, event)
cursor = self.cursorForPosition(pos)
uri = self._last_hover_pattern_text
if uri:
self.go_to_uri_from_cursor(uri)
else:
self.go_to_definition_from_cursor(cursor)
elif event.button() == Qt.LeftButton and alt:
self.sig_alt_left_mouse_pressed.emit(event)
else:
TextEditBaseWidget.mousePressEvent(self, event)
def mouseReleaseEvent(self, event):
"""Override Qt method."""
if event.button() == Qt.LeftButton:
self._mouse_left_button_pressed = False
self.request_cursor_event()
TextEditBaseWidget.mouseReleaseEvent(self, event)
def contextMenuEvent(self, event):
"""Reimplement Qt method"""
nonempty_selection = self.has_selected_text()
self.copy_action.setEnabled(nonempty_selection)
self.cut_action.setEnabled(nonempty_selection)
self.clear_all_output_action.setVisible(self.is_json() and
nbformat is not None)
self.ipynb_convert_action.setVisible(self.is_json() and
nbformat is not None)
self.run_cell_action.setVisible(self.is_python_or_ipython())
self.run_cell_and_advance_action.setVisible(self.is_python_or_ipython())
self.run_selection_action.setVisible(self.is_python_or_ipython())
self.re_run_last_cell_action.setVisible(self.is_python_or_ipython())
self.gotodef_action.setVisible(self.go_to_definition_enabled)
formatter = CONF.get(
'completions',
('provider_configuration', 'lsp', 'values', 'formatting'),
''
)
self.format_action.setText(_(
'Format file or selection with {0}').format(
formatter.capitalize()))
# Check if a docstring is writable
writer = self.writer_docstring
writer.line_number_cursor = self.get_line_number_at(event.pos())
result = writer.get_function_definition_from_first_line()
if result:
self.docstring_action.setEnabled(True)
else:
self.docstring_action.setEnabled(False)
# Code duplication go_to_definition_from_cursor and mouse_move_event
cursor = self.textCursor()
text = to_text_string(cursor.selectedText())
if len(text) == 0:
cursor.select(QTextCursor.WordUnderCursor)
text = to_text_string(cursor.selectedText())
self.undo_action.setEnabled(self.document().isUndoAvailable())
self.redo_action.setEnabled(self.document().isRedoAvailable())
menu = self.menu
if self.isReadOnly():
menu = self.readonly_menu
menu.popup(event.globalPos())
event.accept()
def _restore_editor_cursor_and_selections(self):
"""Restore the cursor and extra selections of this code editor."""
if self.__cursor_changed:
self.__cursor_changed = False
QApplication.restoreOverrideCursor()
self.clear_extra_selections('ctrl_click')
self._last_hover_pattern_key = None
self._last_hover_pattern_text = None
#------ Drag and drop
def dragEnterEvent(self, event):
"""
Reimplemented Qt method.
Inform Qt about the types of data that the widget accepts.
"""
logger.debug("dragEnterEvent was received")
all_urls = mimedata2url(event.mimeData())
if all_urls:
# Let the parent widget handle this
logger.debug("Let the parent widget handle this dragEnterEvent")
event.ignore()
else:
logger.debug("Call TextEditBaseWidget dragEnterEvent method")
TextEditBaseWidget.dragEnterEvent(self, event)
def dropEvent(self, event):
"""
Reimplemented Qt method.
Unpack dropped data and handle it.
"""
logger.debug("dropEvent was received")
if mimedata2url(event.mimeData()):
logger.debug("Let the parent widget handle this")
event.ignore()
else:
logger.debug("Call TextEditBaseWidget dropEvent method")
TextEditBaseWidget.dropEvent(self, event)
self.document_did_change()
#------ Paint event
def paintEvent(self, event):
"""Overrides paint event to update the list of visible blocks"""
self.update_visible_blocks(event)
TextEditBaseWidget.paintEvent(self, event)
self.painted.emit(event)
def update_visible_blocks(self, event):
"""Update the list of visible blocks/lines position"""
self.__visible_blocks[:] = []
block = self.firstVisibleBlock()
blockNumber = block.blockNumber()
top = int(self.blockBoundingGeometry(block).translated(
self.contentOffset()).top())
bottom = top + int(self.blockBoundingRect(block).height())
ebottom_bottom = self.height()
while block.isValid():
visible = bottom <= ebottom_bottom
if not visible:
break
if block.isVisible():
self.__visible_blocks.append((top, blockNumber+1, block))
block = block.next()
top = bottom
bottom = top + int(self.blockBoundingRect(block).height())
blockNumber = block.blockNumber()
def _draw_editor_cell_divider(self):
"""Draw a line on top of a define cell"""
if self.supported_cell_language:
cell_line_color = self.comment_color
painter = QPainter(self.viewport())
pen = painter.pen()
pen.setStyle(Qt.SolidLine)
pen.setBrush(cell_line_color)
painter.setPen(pen)
for top, line_number, block in self.visible_blocks:
if is_cell_header(block):
painter.drawLine(4, top, self.width(), top)
@property
def visible_blocks(self):
"""
Returns the list of visible blocks.
Each element in the list is a tuple made up of the line top position,
the line number (already 1 based), and the QTextBlock itself.
:return: A list of tuple(top position, line number, block)
:rtype: List of tuple(int, int, QtGui.QTextBlock)
"""
return self.__visible_blocks
def is_editor(self):
return True
def popup_docstring(self, prev_text, prev_pos):
"""Show the menu for generating docstring."""
line_text = self.textCursor().block().text()
if line_text != prev_text:
return
if prev_pos != self.textCursor().position():
return
writer = self.writer_docstring
if writer.get_function_definition_from_below_last_line():
point = self.cursorRect().bottomRight()
point = self.calculate_real_position(point)
point = self.mapToGlobal(point)
self.menu_docstring = QMenuOnlyForEnter(self)
self.docstring_action = create_action(
self, _("Generate docstring"), icon=ima.icon('TextFileIcon'),
triggered=writer.write_docstring)
self.menu_docstring.addAction(self.docstring_action)
self.menu_docstring.setActiveAction(self.docstring_action)
self.menu_docstring.popup(point)
def delayed_popup_docstring(self):
"""Show context menu for docstring.
This method is called after typing '''. After typing ''', this function
waits 300ms. If there was no input for 300ms, show the context menu.
"""
line_text = self.textCursor().block().text()
pos = self.textCursor().position()
timer = QTimer()
timer.singleShot(300, lambda: self.popup_docstring(line_text, pos))
def set_current_project_path(self, root_path=None):
"""
Set the current active project root path.
Parameters
----------
root_path: str or None, optional
Path to current project root path. Default is None.
"""
self.current_project_path = root_path
def count_leading_empty_lines(self, cell):
"""Count the number of leading empty cells."""
lines = cell.splitlines(keepends=True)
if not lines:
return 0
for i, line in enumerate(lines):
if line and not line.isspace():
return i
return len(lines)
def ipython_to_python(self, code):
"""Transform IPython code to python code."""
tm = TransformerManager()
number_empty_lines = self.count_leading_empty_lines(code)
try:
code = tm.transform_cell(code)
except SyntaxError:
return code
return '\n' * number_empty_lines + code
def is_letter_or_number(self, char):
"""
Returns whether the specified unicode character is a letter or a
number.
"""
cat = category(char)
return cat.startswith('L') or cat.startswith('N')
# =============================================================================
# Editor + Class browser test
# =============================================================================
class TestWidget(QSplitter):
def __init__(self, parent):
QSplitter.__init__(self, parent)
self.editor = CodeEditor(self)
self.editor.setup_editor(linenumbers=True, markers=True, tab_mode=False,
font=QFont("Courier New", 10),
show_blanks=True, color_scheme='Zenburn')
self.addWidget(self.editor)
self.setWindowIcon(ima.icon('spyder'))
def load(self, filename):
self.editor.set_text_from_file(filename)
self.setWindowTitle("%s - %s (%s)" % (_("Editor"),
osp.basename(filename),
osp.dirname(filename)))
self.editor.hide_tooltip()
def test(fname):
from spyder.utils.qthelpers import qapplication
app = qapplication(test_time=5)
win = TestWidget(None)
win.show()
win.load(fname)
win.resize(900, 700)
sys.exit(app.exec_())
if __name__ == '__main__':
if len(sys.argv) > 1:
fname = sys.argv[1]
else:
fname = __file__
test(fname)
| 41.233145
| 91
| 0.580793
|
17bcbf8ffe4bb48c0b31afd03cda79acf0cde04e
| 1,223
|
py
|
Python
|
petstagram/accounts/managers.py
|
ivanoff-ivan/petstagram
|
6ad2fa038843c6e2011fd9242e3edec857f48707
|
[
"MIT"
] | null | null | null |
petstagram/accounts/managers.py
|
ivanoff-ivan/petstagram
|
6ad2fa038843c6e2011fd9242e3edec857f48707
|
[
"MIT"
] | null | null | null |
petstagram/accounts/managers.py
|
ivanoff-ivan/petstagram
|
6ad2fa038843c6e2011fd9242e3edec857f48707
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import base_user as auth_base
from django.contrib.auth.hashers import make_password
class PetstagramUserManager(auth_base.BaseUserManager):
def _create_user(self, username, password, **extra_fields):
if not username:
raise ValueError('The given username must be set')
user = self.model(username=username, **extra_fields)
user.password = make_password(password)
user.save(using=self._db)
return user
def create_user(self, username, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, password, **extra_fields)
def create_superuser(self, username, password=None, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(username, password, **extra_fields)
| 42.172414
| 72
| 0.705642
|
3ae3bdc6fdc89bc015a61bcf9d7874c9b7628332
| 33,498
|
py
|
Python
|
viper/modules/misp.py
|
Mario-Kart-Felix/mal-scrap
|
bc396a15ea5b144eb1c0f05759d1f9419d6671df
|
[
"BSD-3-Clause"
] | 2
|
2015-12-17T20:25:09.000Z
|
2017-10-08T19:14:57.000Z
|
viper/modules/misp.py
|
Mario-Kart-Felix/mal-scrap
|
bc396a15ea5b144eb1c0f05759d1f9419d6671df
|
[
"BSD-3-Clause"
] | 1
|
2015-01-05T18:07:13.000Z
|
2015-01-07T21:43:57.000Z
|
viper/modules/misp.py
|
Mario-Kart-Felix/mal-scrap
|
bc396a15ea5b144eb1c0f05759d1f9419d6671df
|
[
"BSD-3-Clause"
] | 3
|
2017-10-18T00:56:53.000Z
|
2020-05-24T09:38:54.000Z
|
# -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import argparse
import textwrap
import os
import json
import logging
try:
from pymisp import PyMISP, PyMISPError, MISPEvent
try:
from pymisp import MISPEncode
except ImportError:
from pymisp import EncodeFull as MISPEncode
HAVE_PYMISP = True
except:
HAVE_PYMISP = False
try:
import requests
HAVE_REQUESTS = True
except:
HAVE_REQUESTS = False
from viper.common.abstracts import Module
from viper.core.database import Database
from viper.core.session import __sessions__
from viper.core.project import __project__
from viper.core.storage import get_sample_path
from viper.common.objects import MispEvent
from viper.common.constants import VIPER_ROOT
from viper.core.config import __config__
log = logging.getLogger('viper')
cfg = __config__
cfg.parse_http_client(cfg.misp)
class MISP(Module):
cmd = 'misp'
description = 'Upload and query IOCs to/from a MISP instance'
authors = ['Raphaël Vinot']
from .misp_methods import admin # noqa
from .misp_methods import create_event # noqa
from .misp_methods import download # noqa
from .misp_methods import check_hashes, _prepare_attributes, _populate # noqa
from .misp_methods import store, _get_local_events # noqa
from .misp_methods import tag # noqa
from .misp_methods import galaxies # noqa
from .misp_methods import version # noqa
from .misp_methods import open_samples, _load_tmp_samples, _display_tmp_files, _clean_tmp_samples # noqa
from .misp_methods import add, add_hashes, _check_add, _change_event # noqa
def __init__(self):
super(MISP, self).__init__()
self.cur_path = __project__.get_path()
self.parser.add_argument("--url", help='URL of the MISP instance')
self.parser.add_argument("--off", action='store_true', help='Use offline (can only work on pre-downloaded events)')
self.parser.add_argument("--on", action='store_true', help='Switch to online mode')
self.parser.add_argument("-k", "--key", help='Your key on the MISP instance')
self.parser.add_argument("-v", "--verify", action='store_false', help='Disable certificate verification (for self-signed)')
subparsers = self.parser.add_subparsers(dest='subname')
# ##### Upload sample to MISP #####
parser_up = subparsers.add_parser('upload', help='Send malware sample to MISP.',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''
Distribution levels:
* 0: Your organisation only
* 1: This community only
* 2: Connected communities
* 3: All communities
* 5: Inherit
Sample categories:
* 0: Payload delivery
* 1: Artifacts dropped
* 2: Payload installation
* 3: External analysis
Analysis levels:
* 0: Initial
* 1: Ongoing
* 2: Completed
Threat levels:
* 0: High
* 1: Medium
* 2: Low
* 3: Undefined
'''))
parser_up.add_argument("-e", "--event", type=int, help="Event ID to update. If None, and you're not connected to a MISP event a new one is created.")
parser_up.add_argument("-d", "--distrib", type=int, choices=[0, 1, 2, 3, 5], help="Distribution of the attributes for the new event.")
parser_up.add_argument("-s", "--sharing", type=int, help="Sharing group ID when distribution is set to 4.")
parser_up.add_argument("-ids", action='store_true', help="Is eligible for automatically creating IDS signatures.")
parser_up.add_argument("-c", "--categ", type=int, choices=[0, 1, 2, 3], default=1, help="Category of the samples.")
parser_up.add_argument("-i", "--info", nargs='+', help="Event info field of a new event.")
parser_up.add_argument("-o", "--comment", nargs='+', help="Comment associated to the sample.")
parser_up.add_argument("-a", "--analysis", type=int, choices=[0, 1, 2], help="Analysis level a new event.")
parser_up.add_argument("-t", "--threat", type=int, choices=[0, 1, 2, 3], help="Threat level of a new event.")
# ##### Download samples from event #####
parser_down = subparsers.add_parser('download', help='Download malware samples from MISP.')
group = parser_down.add_mutually_exclusive_group()
group.add_argument("-e", "--event", type=int, help="Download all the samples related to this event ID.")
group.add_argument("-l", "--list", nargs='*', help="Download all the samples related to a list of events. Empty list to download all the samples of all the events stored in the current project.") # noqa
group.add_argument("--hash", help="Download the sample related to this hash (only MD5).")
# ##### Search in MISP #####
parser_search = subparsers.add_parser('search', help='Search in all the attributes.')
parser_search.add_argument("query", nargs='*', help="String to search (if empty, search the hashes of the current file).")
# ##### Check hashes on VT #####
parser_checkhashes = subparsers.add_parser('check_hashes', help='Crosscheck hashes on VT.')
parser_checkhashes.add_argument("event", nargs='?', default=None, type=int, help="Lookup all the hashes of an event on VT.")
parser_checkhashes.add_argument("-p", "--populate", action='store_true', help="Automatically populate event with hashes found on VT.")
# ##### Download Yara rules #####
parser_checkhashes = subparsers.add_parser('yara', help='Get YARA rules of an event.')
parser_checkhashes.add_argument("event", nargs='?', default=None, type=int, help="Download the yara rules of that event.")
# ##### Get Events #####
parser_pull = subparsers.add_parser('pull', help='Initialize the session with an existing MISP event.')
parser_pull.add_argument("event", nargs='+', type=int, help="(List of) Event(s) ID.")
# ##### Create an Event #####
parser_create_event = subparsers.add_parser('create_event', help='Create a new event on MISP and initialize the session with it.',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''
Distribution levels:
* 0: Your organisation only
* 1: This community only
* 2: Connected communities
* 3: All communities
* 4: Sharing group
Sharing Group:
* #: ID of sharing group
Analysis levels:
* 0: Initial
* 1: Ongoing
* 2: Completed
Threat levels:
* 0: High
* 1: Medium
* 2: Low
* 3: Undefined
'''))
parser_create_event.add_argument("-d", "--distrib", type=int, choices=[0, 1, 2, 3, 4], help="Distribution of the attributes for the new event.")
parser_create_event.add_argument("-s", "--sharing", type=int, help="Sharing group ID when distribution is set to 4.")
parser_create_event.add_argument("-t", "--threat", type=int, choices=[0, 1, 2, 3], help="Threat level of a new event.")
parser_create_event.add_argument("-a", "--analysis", type=int, choices=[0, 1, 2], help="Analysis level a new event.")
parser_create_event.add_argument("-i", "--info", required=True, nargs='+', help="Event info field of a new event.")
parser_create_event.add_argument("--date", help="Date of the event. (Default: today).")
# ##### Add Hashes #####
h = subparsers.add_parser("add_hashes", help="If no parameters, add all the hashes of the current session.")
h.add_argument("-f", "--filename", help="Filename")
h.add_argument("-m", "--md5", help="MD5")
h.add_argument("-s", "--sha1", help="SHA1")
h.add_argument("-a", "--sha256", help="SHA256")
# ##### Add attributes #####
parser_add = subparsers.add_parser('add', help='Add attributes to an existing MISP event.')
subparsers_add = parser_add.add_subparsers(dest='add')
# Hashes
# Generic add
temp_me = MISPEvent()
if hasattr(temp_me, "types"):
known_types = temp_me.types
else:
# New API
known_types = temp_me.get_known_types()
for t in known_types:
sp = subparsers_add.add_parser(t, help="Add {} to the event.".format(t))
sp.add_argument(t, nargs='+')
# ##### Show attributes #####
subparsers.add_parser('show', help='Show attributes to an existing MISP event.')
# ##### Open file #####
o = subparsers.add_parser('open', help='Open a sample from the temp directory.')
ox = o.add_mutually_exclusive_group(required=True)
ox.add_argument("-l", "--list", action='store_true', help="List available files")
ox.add_argument("-d", "--delete", help="Delete temporary files (use 'all' to remove all the local samples or an Event ID to only remove the associated samples)")
ox.add_argument("sid", nargs='?', type=int, help='Sample ID to open (from the list option).')
# ##### Publish an event #####
subparsers.add_parser('publish', help='Publish an existing MISP event.')
# ##### Show version #####
subparsers.add_parser('version', help='Returns the version of the MISP instance.')
# Store
s = subparsers.add_parser('store', help='Store the current MISP event in the current project.')
s.add_argument("-l", "--list", action='store_true', help="List stored MISP events")
s.add_argument("-u", "--update", action='store_true', help="Update all stored MISP events")
s.add_argument("-s", "--sync", action='store_true', help="Sync all MISP Events with the remote MISP instance")
s.add_argument("-d", "--delete", type=int, help="Delete a stored MISP event")
s.add_argument("-o", "--open", help="Open a stored MISP event")
# Tags
s = subparsers.add_parser('tag', help='Tag managment using MISP taxonomies.')
s.add_argument("-l", "--list", action='store_true', help="List Existing taxonomies.")
s.add_argument("-d", "--details", help="Display all values of a taxonomy.")
s.add_argument("-s", "--search", help="Search all tags matching a value.")
s.add_argument("-e", "--event", help="Add tag to the current event.")
s.add_argument("-a", "--attribute", nargs='+', help="Add tag to an attribute of the current event. Syntax: <identifier for the attribute> <machinetag>")
# Galaxies
s = subparsers.add_parser('galaxies', help='Use misp-galaxy with PyMISPGalaxies.')
s.add_argument("-l", "--list", action='store_true', help="List existing galaxies.")
s.add_argument("-d", "--details", help="Display all values of a galaxy.")
s.add_argument("-v", "--cluster-value", nargs='+', help="Display all details of a cluster value.")
s.add_argument("-s", "--search", nargs='+', help="Search all galaxies matching a value.")
# Admin
s = subparsers.add_parser('admin', help='Administration options.')
admin_parser = s.add_subparsers(dest='admin')
# Organisation
org = admin_parser.add_parser('org', help="Organisation managment.")
subparsers_org = org.add_subparsers(dest='org')
# Get
display = subparsers_org.add_parser('display', help="Display an organisation.")
display.add_argument('id', help='ID of the organisation to display. Use "local" to display all local organisations, "external" for all remote organisations, and "all", for both.')
# Search
search = subparsers_org.add_parser('search', help="Search an organisation by name.")
search.add_argument('name', help='(Partial) name of the organisation.')
search.add_argument('-t', '--type', default='local', choices=['local', 'external', 'all'],
help='Use "local" to search in all local organisations, "external" for remote organisations, and "all", for both.')
# Add
add_org = subparsers_org.add_parser('add', help="Add an organisation.")
add_org.add_argument('name', help='Organisation name.')
add_org.add_argument('-u', '--uuid', default=None, help='UUID of the organisation.')
add_org.add_argument('-d', '--description', default=[], nargs='+', help='Description of the organisation.')
add_org.add_argument('-t', '--type', default=[], nargs='+', help='Type of the organisation.')
add_org.add_argument('-n', '--nationality', default=None, help='Nationality of the organisation.')
add_org.add_argument('-s', '--sector', default=[], nargs='+', help='Sector of the organisation.')
add_org.add_argument('-c', '--contacts', default=[], nargs='+', help='Contact point(s) in the organisation.')
add_org.add_argument('--not-local', default=True, action='store_false', help='**Not** a local organisation.')
# Delete
delete = subparsers_org.add_parser('delete', help="Delete an organisation.")
delete.add_argument('id', help='ID of the organisation to delete.')
# Edit
edit = subparsers_org.add_parser('edit', help="Edit an organisation.")
edit.add_argument('id', help='ID of the organisation to edit.')
edit.add_argument('-n', '--name', help='Organisation name.')
edit.add_argument('-u', '--uuid', help='UUID of the organisation.')
edit.add_argument('-d', '--description', default=[], nargs='+', help='Description of the organisation.')
edit.add_argument('-t', '--type', default=[], nargs='+', help='Type of the organisation.')
edit.add_argument('--nationality', help='Nationality of the organisation.')
edit.add_argument('-s', '--sector', default=[], nargs='+', help='Sector of the organisation.')
edit.add_argument('-c', '--contacts', default=[], nargs='+', help='Contact point(s) in the organisation.')
edit.add_argument('--not-local', default=True, action='store_false', help='**Not** a local organisation.')
# User
user = admin_parser.add_parser('user', help="User managment.")
subparsers_user = user.add_subparsers(dest='user')
# Get
display = subparsers_user.add_parser('display', help="Display a user.")
display.add_argument('id', help='ID of the user to display. Use "all" to display all users.')
# Search
search = subparsers_user.add_parser('search', help="Search a user by email.")
search.add_argument('name', help='(Partial) email of the user.')
# Add
add_usr = subparsers_user.add_parser('add', help="Add a user.")
add_usr.add_argument('email', help='User email address.')
add_usr.add_argument('-o', '--org-id', default=None, help='Organisation ID of the user.')
add_usr.add_argument('-r', '--role-id', default=None, help='Role of the user')
add_usr.add_argument('-g', '--gpgkey', default=None, help='Path to the GPG public key export')
add_usr.add_argument('-c', '--change-pw', default=None, action='store_true', help='Force thanging the password after next login')
add_usr.add_argument('-t', '--termsaccepted', default=None, action='store_true', help='Set the TOC to accepted')
add_usr.add_argument('-p', '--password', default=None, help='Set a new password')
add_usr.add_argument('-d', '--disabled', default=None, action='store_true', help='Disable the account')
# Delete
delete = subparsers_user.add_parser('delete', help="Delete a user.")
delete.add_argument('id', help='ID of the user to delete.')
# Edit
edit = subparsers_user.add_parser('edit', help="Edit a user.")
edit.add_argument('id', help='ID of the user to edit.')
edit.add_argument('-e', '--email', help='User email address.')
edit.add_argument('-o', '--org-id', default=None, help='Organisation ID of the user.')
edit.add_argument('-r', '--role-id', default=None, help='Role of the user')
edit.add_argument('-g', '--gpgkey', default=None, help='Path to the GPG public key export')
edit.add_argument('-c', '--change-pw', default=None, action='store_true', help='Force thanging the password after next login')
edit.add_argument('-t', '--termsaccepted', default=None, action='store_true', help='Set the TOC to accepted')
edit.add_argument('-p', '--password', default=None, help='Set a new password')
edit.add_argument('-d', '--disabled', default=None, action='store_true', help='Disable the account')
# Role
role = admin_parser.add_parser('role', help="Role managment.")
subparsers_role = role.add_subparsers(dest='role')
# Get
display = subparsers_role.add_parser('display', help="Display all the roles.")
# Search
search = subparsers_role.add_parser('search', help="Search a role by name.")
search.add_argument('name', help='(Partial) name of the role.')
# Tags
t = admin_parser.add_parser('tag', help="Tag managment.")
subparsers_tag = t.add_subparsers(dest='tag')
# Get
display = subparsers_tag.add_parser('display', help="Display all the tags.")
# Search
search = subparsers_tag.add_parser('search', help="Search a tag by name.")
search.add_argument('name', help='(Partial) name of the tag.')
self.categories = {0: 'Payload delivery', 1: 'Artifacts dropped', 2: 'Payload installation', 3: 'External analysis'}
# ####### Generic Helpers ########
def _get_eventid(self, quiet=False):
if vars(self.args).get('event'):
return self.args.event
else:
# Get current event ID if possible
if not __sessions__.is_attached_misp(quiet):
return None
return __sessions__.current.misp_event.event.id
def _has_error_message(self, result):
if result.get('errors'):
for message in result['errors']:
self.log('error', message)
return True
elif result.get('error'):
self.log('error', result.get('error'))
return True
return False
def _search_local_hashes(self, event, open_session=True):
local = []
samples_count = 0
if isinstance(event, MISPEvent):
misp_event = event
elif event.get('Event') is None:
self.log('error', event)
return
else:
misp_event = MISPEvent()
misp_event.load(event)
for a in misp_event.attributes:
row = None
if a.type == 'malware-sample':
samples_count += 1
if a.type in ('md5', 'sha1', 'sha256'):
row = Database().find(key=a.type, value=a.value)
elif a.type in ('filename|md5', 'filename|sha1', 'filename|sha256'):
row = Database().find(key=a.type.split('|')[1], value=a.value.split('|')[1])
elif a.type == 'malware-sample':
row = Database().find(key='md5', value=a.value.split('|')[1])
if row:
local.append(row[0])
self.log('info', 'Event {} contains {} samples.'.format(misp_event.id, samples_count))
if not open_session:
return
shas = set([l.sha256 for l in local])
if len(shas) == 1:
__sessions__.new(get_sample_path(shas.pop()), MispEvent(misp_event, self.offline_mode))
elif len(shas) > 1:
self.log('success', 'The following samples are in this viper instance:')
__sessions__.new(misp_event=MispEvent(misp_event, self.offline_mode))
for s in shas:
self.log('item', s)
else:
__sessions__.new(misp_event=MispEvent(misp_event, self.offline_mode))
self.log('info', 'No known (in Viper) samples in that event.')
def _find_related_id(self, event):
if not event.RelatedEvent:
return []
related = [(_event.id, _event.info) for _event in event.RelatedEvent]
to_return = list(set(related))
to_return.sort(key=lambda tup: tup[0])
return to_return
def _dump(self, event=None):
event_path = os.path.join(self.cur_path, 'misp_events')
if not os.path.exists(event_path):
os.makedirs(event_path)
if not event:
to_dump = __sessions__.current.misp_event.event
elif isinstance(event, MISPEvent):
to_dump = event
else:
to_dump = MISPEvent()
to_dump.load(event)
if to_dump.id:
filename = str(to_dump.id)
elif (__sessions__.is_attached_misp(True) and
__sessions__.current.misp_event.current_dump_file):
filename = __sessions__.current.misp_event.current_dump_file
else:
i = 1
while True:
filename = 'new_event_{}.json'.format(i)
if not os.path.exists(os.path.join(event_path, filename)):
break
i += 1
path = os.path.join(event_path, filename)
with open(path, 'w') as f:
json.dump(to_dump, f, cls=MISPEncode)
self.log('success', '{} stored successfully.'.format(filename.rstrip('.json')))
return filename
# ##########################################
def yara(self):
if self.offline_mode:
self.log('error', 'Offline mode, unable to get yara rules')
return
ok = False
data = None
event_id = self._get_eventid()
if event_id is None:
return
ok, data = self.misp.get_yara(event_id)
if not ok:
self.log('error', data)
return
rule_path = os.path.join(VIPER_ROOT, 'data/yara', self.args.event + '.yara')
if os.path.exists(rule_path):
self.log('error', 'File {} already exists.'.format(rule_path))
return
with open(rule_path, 'wb') as f:
f.write(data.encode('utf-8'))
self.log('success', 'The yara rules of event {} have been downloaded: {}'.format(self.args.event, rule_path))
def upload(self):
if self.offline_mode:
self.log('error', 'Offline mode, unable to upload a sample')
return
categ = self.categories.get(self.args.categ)
if self.args.info is not None:
info = ' '.join(self.args.info)
else:
info = None
if self.args.comment is not None:
comment = ' '.join(self.args.comment)
else:
comment = None
# No need to check the output: is the event_id is none, we create a new one.
event_id = self._get_eventid(True)
try:
result = self.misp.upload_sample(__sessions__.current.file.name, __sessions__.current.file.path,
event_id, self.args.distrib, self.args.ids, categ, info, comment,
self.args.analysis, self.args.threat)
except Exception as e:
self.log('error', e)
return
if not self._has_error_message(result):
self.log('success', "File uploaded successfully")
if event_id is None:
event_id = result['id']
full_event = self.misp.get(event_id)
if not self._has_error_message(full_event):
return __sessions__.new(misp_event=MispEvent(full_event, self.offline_mode))
def searchall(self):
if self.args.query:
self._search(' '.join(self.args.query))
else:
if not __sessions__.is_attached_file(True):
self.log('error', "Not attached to a file, nothing to search for.")
return False
to_search = [__sessions__.current.file.md5, __sessions__.current.file.sha1, __sessions__.current.file.sha256]
for q in to_search:
self._search(q)
def _search(self, query):
if self.offline_mode:
self.log('error', 'Offline mode, unable to search')
return
result = self.misp.search_all(query)
if self._has_error_message(result):
return
self.log('success', '{} matches on the following events:'.format(query))
for e in result['response']:
nb_samples = 0
nb_hashes = 0
me = MISPEvent()
me.load(e)
for a in me.attributes:
if a.type == 'malware-sample':
nb_samples += 1
if a.type in ('md5', 'sha1', 'sha256', 'filename|md5', 'filename|sha1', 'filename|sha256'):
nb_hashes += 1
self.log('item', '{} ({} samples, {} hashes) - {}{}{}'.format(me.info, nb_samples, nb_hashes, self.url, '/events/view/', me.id))
def pull(self):
if self.offline_mode:
self.log('error', 'Offline mode, unable to pull a remote event')
return
open_session = len(self.args.event) == 1
for e in self.args.event:
event = self.misp.get(e)
if not self._has_error_message(event):
self._search_local_hashes(event, open_session)
self._dump(event)
def publish(self):
__sessions__.current.misp_event.event.publish()
if self.offline_mode:
self._dump()
else:
event = self.misp.update(__sessions__.current.misp_event.event._json())
if not self._has_error_message(event):
self.log('success', 'Event {} published.'.format(event['Event']['id']))
__sessions__.new(misp_event=MispEvent(event, self.offline_mode))
def show(self):
current_event = __sessions__.current.misp_event.event
related = self._find_related_id(current_event)
if len(related) > 0:
self.log('info', 'Related events:')
for r, title in related:
self.log('item', '{}/events/view/{} - {}'.format(self.url.rstrip('/'), r, title))
header = ['type', 'value', 'comment', 'related']
rows = []
for a in current_event.attributes:
# FIXME: this has been removed upstream: https://github.com/MISP/MISP/issues/1793
# Keeping it like that for now, until we decide how to re-enable it
idlist = []
if a.RelatedAttribute:
for r in a.RelatedAttribute:
# idlist.append(r.id)
pass
rows.append([a.type, a.value, '\n'.join(textwrap.wrap(a.comment, 30)), '\n'.join(textwrap.wrap(' '.join(idlist), 15))])
self.log('table', dict(header=header, rows=rows))
if current_event.published:
self.log('info', 'This event has been published')
else:
self.log('info', 'This event has not been published')
if __sessions__.current.misp_event.event.id:
self.log('info', 'Link to Event: {}/events/view/{}'.format(self.url.rstrip('/'), __sessions__.current.misp_event.event.id))
def run(self):
super(MISP, self).run()
if self.args is None:
return
if not HAVE_PYMISP:
self.log('error', "Missing dependency, install pymisp (`pip install pymisp`)")
return
self.offline_mode = False
if self.args.on:
self.offline_mode = False
if __sessions__.is_attached_misp(True):
__sessions__.current.misp_event.off = False
elif self.args.off or (__sessions__.is_attached_misp(True) and
__sessions__.current.misp_event.off):
self.offline_mode = True
if __sessions__.is_attached_misp(True):
__sessions__.current.misp_event.off = True
self.url = self.args.url
if self.url is None:
self.url = cfg.misp.misp_url
if self.url is None:
self.log('error', "This command requires the URL of the MISP instance you want to query.")
return
self.key = self.args.key
if self.key is None:
self.key = cfg.misp.misp_key
if self.key is None:
self.log('error', "This command requires a MISP private API key.")
return
if not self.args.verify:
verify = False
else:
verify = cfg.misp.tls_verify
# Capture default distribution and sharing group settings. Backwards compatability and empty string check
self.distribution = cfg.misp.get("misp_distribution", None)
self.distribution = None if self.distribution == "" else self.distribution
if type(self.distribution) not in (type(None), int):
self.distribution = None
self.log('info', "The distribution stored in viper config is not an integer, setting to None")
self.sharinggroup = cfg.misp.get("misp_sharinggroup", None)
self.sharinggroup = None if self.sharinggroup == "" else self.sharinggroup
if type(self.sharinggroup) not in (type(None), int):
self.sharinggroup = None
self.log('info', "The sharing group stored in viper config is not an integer, setting to None")
if not self.offline_mode:
try:
self.misp = PyMISP(self.url, self.key, ssl=verify, proxies=cfg.misp.proxies, cert=cfg.misp.cert)
except PyMISPError as e:
self.log('error', e.message)
return
# Require an open MISP session
if self.args.subname in ['add_hashes', 'add', 'show', 'publish'] and not __sessions__.is_attached_misp():
return
# Require an open file session
if self.args.subname in ['upload'] and not __sessions__.is_attached_file():
return
try:
if self.args.subname == 'upload':
self.upload()
elif self.args.subname == 'search':
self.searchall()
elif self.args.subname == 'download':
self.download()
elif self.args.subname == 'check_hashes':
self.check_hashes()
elif self.args.subname == 'yara':
self.yara()
elif self.args.subname == 'pull':
self.pull()
elif self.args.subname == 'create_event':
self.create_event()
elif self.args.subname == 'add':
self.add()
elif self.args.subname == 'add_hashes':
self.add_hashes()
elif self.args.subname == 'show':
self.show()
elif self.args.subname == 'open':
self.open_samples()
elif self.args.subname == 'publish':
self.publish()
elif self.args.subname == 'version':
self.version()
elif self.args.subname == 'store':
self.store()
elif self.args.subname == 'tag':
self.tag()
elif self.args.subname == 'galaxies':
self.galaxies()
elif self.args.subname == 'admin':
self.admin()
else:
self.log('error', "No calls defined for this command.")
except requests.exceptions.HTTPError as e:
self.log('error', e)
| 51.064024
| 211
| 0.570989
|
1051ff4c639fa7e4cf7b18f3439326df08db862c
| 31,350
|
py
|
Python
|
d3rlpy/base.py
|
Mohan-Zhang-u/d3rlpy
|
3ab3c0bbd6f86e73c171a6084f3130d60be85b5f
|
[
"MIT"
] | 565
|
2020-08-01T02:44:28.000Z
|
2022-03-30T15:00:54.000Z
|
d3rlpy/base.py
|
Mohan-Zhang-u/d3rlpy
|
3ab3c0bbd6f86e73c171a6084f3130d60be85b5f
|
[
"MIT"
] | 144
|
2020-08-01T03:45:10.000Z
|
2022-03-30T14:51:16.000Z
|
d3rlpy/base.py
|
Mohan-Zhang-u/d3rlpy
|
3ab3c0bbd6f86e73c171a6084f3130d60be85b5f
|
[
"MIT"
] | 103
|
2020-08-26T13:27:34.000Z
|
2022-03-31T12:24:27.000Z
|
import copy
import json
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from typing import (
Any,
Callable,
DefaultDict,
Dict,
Generator,
List,
Optional,
Sequence,
Tuple,
Union,
)
import gym
import numpy as np
from tqdm.auto import tqdm
from .argument_utility import (
ActionScalerArg,
RewardScalerArg,
ScalerArg,
UseGPUArg,
check_action_scaler,
check_reward_scaler,
check_scaler,
)
from .constants import (
CONTINUOUS_ACTION_SPACE_MISMATCH_ERROR,
DISCRETE_ACTION_SPACE_MISMATCH_ERROR,
IMPL_NOT_INITIALIZED_ERROR,
ActionSpace,
)
from .context import disable_parallel
from .dataset import Episode, MDPDataset, Transition, TransitionMiniBatch
from .decorators import pretty_repr
from .gpu import Device
from .iterators import RandomIterator, RoundIterator, TransitionIterator
from .logger import LOG, D3RLPyLogger
from .models.encoders import EncoderFactory, create_encoder_factory
from .models.optimizers import OptimizerFactory
from .models.q_functions import QFunctionFactory, create_q_func_factory
from .online.utility import get_action_size_from_env
from .preprocessing import (
ActionScaler,
RewardScaler,
Scaler,
create_action_scaler,
create_reward_scaler,
create_scaler,
)
class ImplBase(metaclass=ABCMeta):
@abstractmethod
def save_model(self, fname: str) -> None:
pass
@abstractmethod
def load_model(self, fname: str) -> None:
pass
@property
@abstractmethod
def observation_shape(self) -> Sequence[int]:
pass
@property
@abstractmethod
def action_size(self) -> int:
pass
def _serialize_params(params: Dict[str, Any]) -> Dict[str, Any]:
for key, value in params.items():
if isinstance(value, Device):
params[key] = value.get_id()
elif isinstance(
value,
(
Scaler,
ActionScaler,
RewardScaler,
EncoderFactory,
QFunctionFactory,
),
):
params[key] = {
"type": value.get_type(),
"params": value.get_params(),
}
elif isinstance(value, OptimizerFactory):
params[key] = value.get_params()
return params
def _deseriealize_params(params: Dict[str, Any]) -> Dict[str, Any]:
for key, value in params.items():
if key == "scaler" and params["scaler"]:
scaler_type = params["scaler"]["type"]
scaler_params = params["scaler"]["params"]
scaler = create_scaler(scaler_type, **scaler_params)
params[key] = scaler
elif key == "action_scaler" and params["action_scaler"]:
scaler_type = params["action_scaler"]["type"]
scaler_params = params["action_scaler"]["params"]
action_scaler = create_action_scaler(scaler_type, **scaler_params)
params[key] = action_scaler
elif key == "reward_scaler" and params["reward_scaler"]:
scaler_type = params["reward_scaler"]["type"]
scaler_params = params["reward_scaler"]["params"]
reward_scaler = create_reward_scaler(scaler_type, **scaler_params)
params[key] = reward_scaler
elif "optim_factory" in key:
params[key] = OptimizerFactory(**value)
elif "encoder_factory" in key:
params[key] = create_encoder_factory(
value["type"], **value["params"]
)
elif key == "q_func_factory":
params[key] = create_q_func_factory(
value["type"], **value["params"]
)
return params
@pretty_repr
class LearnableBase:
_batch_size: int
_n_frames: int
_n_steps: int
_gamma: float
_scaler: Optional[Scaler]
_action_scaler: Optional[ActionScaler]
_reward_scaler: Optional[RewardScaler]
_real_ratio: float
_generated_maxlen: int
_impl: Optional[ImplBase]
_eval_results: DefaultDict[str, List[float]]
_loss_history: DefaultDict[str, List[float]]
_active_logger: Optional[D3RLPyLogger]
_grad_step: int
def __init__(
self,
batch_size: int,
n_frames: int,
n_steps: int,
gamma: float,
scaler: ScalerArg = None,
action_scaler: ActionScalerArg = None,
reward_scaler: RewardScalerArg = None,
real_ratio: float = 1.0,
generated_maxlen: int = 100000,
kwargs: Optional[Dict[str, Any]] = None,
):
self._batch_size = batch_size
self._n_frames = n_frames
self._n_steps = n_steps
self._gamma = gamma
self._scaler = check_scaler(scaler)
self._action_scaler = check_action_scaler(action_scaler)
self._reward_scaler = check_reward_scaler(reward_scaler)
self._real_ratio = real_ratio
self._generated_maxlen = generated_maxlen
self._impl = None
self._eval_results = defaultdict(list)
self._loss_history = defaultdict(list)
self._active_logger = None
self._grad_step = 0
if kwargs and len(kwargs.keys()) > 0:
LOG.warning("Unused arguments are passed.", **kwargs)
def __setattr__(self, name: str, value: Any) -> None:
super().__setattr__(name, value)
# propagate property updates to implementation object
if hasattr(self, "_impl") and self._impl and hasattr(self._impl, name):
setattr(self._impl, name, value)
@classmethod
def from_json(
cls, fname: str, use_gpu: UseGPUArg = False
) -> "LearnableBase":
"""Returns algorithm configured with json file.
The Json file should be the one saved during fitting.
.. code-block:: python
from d3rlpy.algos import Algo
# create algorithm with saved configuration
algo = Algo.from_json('d3rlpy_logs/<path-to-json>/params.json')
# ready to load
algo.load_model('d3rlpy_logs/<path-to-model>/model_100.pt')
# ready to predict
algo.predict(...)
Args:
fname: file path to `params.json`.
use_gpu: flag to use GPU, device ID or device.
Returns:
algorithm.
"""
with open(fname, "r") as f:
params = json.load(f)
observation_shape = tuple(params["observation_shape"])
action_size = params["action_size"]
del params["observation_shape"]
del params["action_size"]
# reconstruct objects from json
params = _deseriealize_params(params)
# overwrite use_gpu flag
params["use_gpu"] = use_gpu
algo = cls(**params)
algo.create_impl(observation_shape, action_size)
return algo
def set_params(self, **params: Any) -> "LearnableBase":
"""Sets the given arguments to the attributes if they exist.
This method sets the given values to the attributes including ones in
subclasses. If the values that don't exist as attributes are
passed, they are ignored.
Some of scikit-learn utilities will use this method.
.. code-block:: python
algo.set_params(batch_size=100)
Args:
params: arbitrary inputs to set as attributes.
Returns:
itself.
"""
for key, val in params.items():
if hasattr(self, key):
try:
setattr(self, key, val)
except AttributeError:
# try passing to protected keys
assert hasattr(self, "_" + key), f"{key} does not exist."
setattr(self, "_" + key, val)
else:
assert hasattr(self, "_" + key), f"{key} does not exist."
setattr(self, "_" + key, val)
return self
def get_params(self, deep: bool = True) -> Dict[str, Any]:
"""Returns the all attributes.
This method returns the all attributes including ones in subclasses.
Some of scikit-learn utilities will use this method.
.. code-block:: python
params = algo.get_params(deep=True)
# the returned values can be used to instantiate the new object.
algo2 = AlgoBase(**params)
Args:
deep: flag to deeply copy objects such as `impl`.
Returns:
attribute values in dictionary.
"""
rets = {}
for key in dir(self):
# remove magic properties
if key[:2] == "__":
continue
# remove specific keys
if key in [
"_eval_results",
"_loss_history",
"_active_logger",
"_grad_step",
"active_logger",
"grad_step",
"observation_shape",
"action_size",
]:
continue
value = getattr(self, key)
# remove underscore
if key[0] == "_":
key = key[1:]
# pick scalar parameters
if np.isscalar(value):
rets[key] = value
elif isinstance(value, object) and not callable(value):
if deep:
rets[key] = copy.deepcopy(value)
else:
rets[key] = value
return rets
def save_model(self, fname: str) -> None:
"""Saves neural network parameters.
.. code-block:: python
algo.save_model('model.pt')
Args:
fname: destination file path.
"""
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
self._impl.save_model(fname)
def load_model(self, fname: str) -> None:
"""Load neural network parameters.
.. code-block:: python
algo.load_model('model.pt')
Args:
fname: source file path.
"""
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
self._impl.load_model(fname)
def fit(
self,
dataset: Union[List[Episode], MDPDataset],
n_epochs: Optional[int] = None,
n_steps: Optional[int] = None,
n_steps_per_epoch: int = 10000,
save_metrics: bool = True,
experiment_name: Optional[str] = None,
with_timestamp: bool = True,
logdir: str = "d3rlpy_logs",
verbose: bool = True,
show_progress: bool = True,
tensorboard_dir: Optional[str] = None,
eval_episodes: Optional[List[Episode]] = None,
save_interval: int = 1,
scorers: Optional[
Dict[str, Callable[[Any, List[Episode]], float]]
] = None,
shuffle: bool = True,
callback: Optional[Callable[["LearnableBase", int, int], None]] = None,
) -> List[Tuple[int, Dict[str, float]]]:
"""Trains with the given dataset.
.. code-block:: python
algo.fit(episodes, n_steps=1000000)
Args:
dataset: list of episodes to train.
n_epochs: the number of epochs to train.
n_steps: the number of steps to train.
n_steps_per_epoch: the number of steps per epoch. This value will
be ignored when ``n_steps`` is ``None``.
save_metrics: flag to record metrics in files. If False,
the log directory is not created and the model parameters are
not saved during training.
experiment_name: experiment name for logging. If not passed,
the directory name will be `{class name}_{timestamp}`.
with_timestamp: flag to add timestamp string to the last of
directory name.
logdir: root directory name to save logs.
verbose: flag to show logged information on stdout.
show_progress: flag to show progress bar for iterations.
tensorboard_dir: directory to save logged information in
tensorboard (additional to the csv data). if ``None``, the
directory will not be created.
eval_episodes: list of episodes to test.
save_interval: interval to save parameters.
scorers: list of scorer functions used with `eval_episodes`.
shuffle: flag to shuffle transitions on each epoch.
callback: callable function that takes ``(algo, epoch, total_step)``
, which is called every step.
Returns:
list of result tuples (epoch, metrics) per epoch.
"""
results = list(
self.fitter(
dataset,
n_epochs,
n_steps,
n_steps_per_epoch,
save_metrics,
experiment_name,
with_timestamp,
logdir,
verbose,
show_progress,
tensorboard_dir,
eval_episodes,
save_interval,
scorers,
shuffle,
callback,
)
)
return results
def fitter(
self,
dataset: Union[List[Episode], MDPDataset],
n_epochs: Optional[int] = None,
n_steps: Optional[int] = None,
n_steps_per_epoch: int = 10000,
save_metrics: bool = True,
experiment_name: Optional[str] = None,
with_timestamp: bool = True,
logdir: str = "d3rlpy_logs",
verbose: bool = True,
show_progress: bool = True,
tensorboard_dir: Optional[str] = None,
eval_episodes: Optional[List[Episode]] = None,
save_interval: int = 1,
scorers: Optional[
Dict[str, Callable[[Any, List[Episode]], float]]
] = None,
shuffle: bool = True,
callback: Optional[Callable[["LearnableBase", int, int], None]] = None,
) -> Generator[Tuple[int, Dict[str, float]], None, None]:
"""Iterate over epochs steps to train with the given dataset. At each
iteration algo methods and properties can be changed or queried.
.. code-block:: python
for epoch, metrics in algo.fitter(episodes):
my_plot(metrics)
algo.save_model(my_path)
Args:
dataset: list of episodes to train.
n_epochs: the number of epochs to train.
n_steps: the number of steps to train.
n_steps_per_epoch: the number of steps per epoch. This value will
be ignored when ``n_steps`` is ``None``.
save_metrics: flag to record metrics in files. If False,
the log directory is not created and the model parameters are
not saved during training.
experiment_name: experiment name for logging. If not passed,
the directory name will be `{class name}_{timestamp}`.
with_timestamp: flag to add timestamp string to the last of
directory name.
logdir: root directory name to save logs.
verbose: flag to show logged information on stdout.
show_progress: flag to show progress bar for iterations.
tensorboard_dir: directory to save logged information in
tensorboard (additional to the csv data). if ``None``, the
directory will not be created.
eval_episodes: list of episodes to test.
save_interval: interval to save parameters.
scorers: list of scorer functions used with `eval_episodes`.
shuffle: flag to shuffle transitions on each epoch.
callback: callable function that takes ``(algo, epoch, total_step)``
, which is called every step.
Returns:
iterator yielding current epoch and metrics dict.
"""
if isinstance(dataset, MDPDataset):
episodes = dataset.episodes
else:
episodes = dataset
# check action space
if self.get_action_type() == ActionSpace.BOTH:
pass
elif len(episodes[0].actions.shape) > 1:
assert (
self.get_action_type() == ActionSpace.CONTINUOUS
), CONTINUOUS_ACTION_SPACE_MISMATCH_ERROR
else:
assert (
self.get_action_type() == ActionSpace.DISCRETE
), DISCRETE_ACTION_SPACE_MISMATCH_ERROR
iterator: TransitionIterator
if n_epochs is None and n_steps is not None:
assert n_steps >= n_steps_per_epoch
n_epochs = n_steps // n_steps_per_epoch
iterator = RandomIterator(
episodes,
n_steps_per_epoch,
batch_size=self._batch_size,
n_steps=self._n_steps,
gamma=self._gamma,
n_frames=self._n_frames,
real_ratio=self._real_ratio,
generated_maxlen=self._generated_maxlen,
)
LOG.debug("RandomIterator is selected.")
elif n_epochs is not None and n_steps is None:
iterator = RoundIterator(
episodes,
batch_size=self._batch_size,
n_steps=self._n_steps,
gamma=self._gamma,
n_frames=self._n_frames,
real_ratio=self._real_ratio,
generated_maxlen=self._generated_maxlen,
shuffle=shuffle,
)
LOG.debug("RoundIterator is selected.")
else:
raise ValueError("Either of n_epochs or n_steps must be given.")
# setup logger
logger = self._prepare_logger(
save_metrics,
experiment_name,
with_timestamp,
logdir,
verbose,
tensorboard_dir,
)
# add reference to active logger to algo class during fit
self._active_logger = logger
# initialize scaler
if self._scaler:
LOG.debug("Fitting scaler...", scaler=self._scaler.get_type())
self._scaler.fit(episodes)
# initialize action scaler
if self._action_scaler:
LOG.debug(
"Fitting action scaler...",
action_scaler=self._action_scaler.get_type(),
)
self._action_scaler.fit(episodes)
# initialize reward scaler
if self._reward_scaler:
LOG.debug(
"Fitting reward scaler...",
reward_scaler=self._reward_scaler.get_type(),
)
self._reward_scaler.fit(episodes)
# instantiate implementation
if self._impl is None:
LOG.debug("Building models...")
transition = iterator.transitions[0]
action_size = transition.get_action_size()
observation_shape = tuple(transition.get_observation_shape())
self.create_impl(
self._process_observation_shape(observation_shape), action_size
)
LOG.debug("Models have been built.")
else:
LOG.warning("Skip building models since they're already built.")
# save hyperparameters
self.save_params(logger)
# refresh evaluation metrics
self._eval_results = defaultdict(list)
# refresh loss history
self._loss_history = defaultdict(list)
# training loop
total_step = 0
for epoch in range(1, n_epochs + 1):
# dict to add incremental mean losses to epoch
epoch_loss = defaultdict(list)
range_gen = tqdm(
range(len(iterator)),
disable=not show_progress,
desc=f"Epoch {int(epoch)}/{n_epochs}",
)
iterator.reset()
for itr in range_gen:
# generate new transitions with dynamics models
new_transitions = self.generate_new_data(
transitions=iterator.transitions,
)
if new_transitions:
iterator.add_generated_transitions(new_transitions)
LOG.debug(
f"{len(new_transitions)} transitions are generated.",
real_transitions=len(iterator.transitions),
fake_transitions=len(iterator.generated_transitions),
)
with logger.measure_time("step"):
# pick transitions
with logger.measure_time("sample_batch"):
batch = next(iterator)
# update parameters
with logger.measure_time("algorithm_update"):
loss = self.update(batch)
# record metrics
for name, val in loss.items():
logger.add_metric(name, val)
epoch_loss[name].append(val)
# update progress postfix with losses
if itr % 10 == 0:
mean_loss = {
k: np.mean(v) for k, v in epoch_loss.items()
}
range_gen.set_postfix(mean_loss)
total_step += 1
# call callback if given
if callback:
callback(self, epoch, total_step)
# save loss to loss history dict
self._loss_history["epoch"].append(epoch)
self._loss_history["step"].append(total_step)
for name, vals in epoch_loss.items():
if vals:
self._loss_history[name].append(np.mean(vals))
if scorers and eval_episodes:
self._evaluate(eval_episodes, scorers, logger)
# save metrics
metrics = logger.commit(epoch, total_step)
# save model parameters
if epoch % save_interval == 0:
logger.save_model(total_step, self)
yield epoch, metrics
# drop reference to active logger since out of fit there is no active
# logger
self._active_logger = None
def create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
"""Instantiate implementation objects with the dataset shapes.
This method will be used internally when `fit` method is called.
Args:
observation_shape: observation shape.
action_size: dimension of action-space.
"""
if self._impl:
LOG.warn("Parameters will be reinitialized.")
self._create_impl(observation_shape, action_size)
def _create_impl(
self, observation_shape: Sequence[int], action_size: int
) -> None:
raise NotImplementedError
def build_with_dataset(self, dataset: MDPDataset) -> None:
"""Instantiate implementation object with MDPDataset object.
Args:
dataset: dataset.
"""
observation_shape = dataset.get_observation_shape()
self.create_impl(
self._process_observation_shape(observation_shape),
dataset.get_action_size(),
)
def build_with_env(self, env: gym.Env) -> None:
"""Instantiate implementation object with OpenAI Gym object.
Args:
env: gym-like environment.
"""
observation_shape = env.observation_space.shape
self.create_impl(
self._process_observation_shape(observation_shape),
get_action_size_from_env(env),
)
def _process_observation_shape(
self, observation_shape: Sequence[int]
) -> Sequence[int]:
if len(observation_shape) == 3:
n_channels = observation_shape[0]
image_size = observation_shape[1:]
# frame stacking for image observation
observation_shape = (self._n_frames * n_channels, *image_size)
return observation_shape
def update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
"""Update parameters with mini-batch of data.
Args:
batch: mini-batch data.
Returns:
dictionary of metrics.
"""
loss = self._update(batch)
self._grad_step += 1
return loss
def _update(self, batch: TransitionMiniBatch) -> Dict[str, float]:
raise NotImplementedError
def generate_new_data(
self, transitions: List[Transition]
) -> Optional[List[Transition]]:
"""Returns generated transitions for data augmentation.
This method is for model-based RL algorithms.
Args:
transitions: list of transitions.
Returns:
list of new transitions.
"""
return None
def _prepare_logger(
self,
save_metrics: bool,
experiment_name: Optional[str],
with_timestamp: bool,
logdir: str,
verbose: bool,
tensorboard_dir: Optional[str],
) -> D3RLPyLogger:
if experiment_name is None:
experiment_name = self.__class__.__name__
logger = D3RLPyLogger(
experiment_name,
save_metrics=save_metrics,
root_dir=logdir,
verbose=verbose,
tensorboard_dir=tensorboard_dir,
with_timestamp=with_timestamp,
)
return logger
def _evaluate(
self,
episodes: List[Episode],
scorers: Dict[str, Callable[[Any, List[Episode]], float]],
logger: D3RLPyLogger,
) -> None:
for name, scorer in scorers.items():
# evaluation with test data
test_score = scorer(self, episodes)
# logging metrics
logger.add_metric(name, test_score)
# store metric locally
if test_score is not None:
self._eval_results[name].append(test_score)
def save_params(self, logger: D3RLPyLogger) -> None:
"""Saves configurations as params.json.
Args:
logger: logger object.
"""
assert self._impl is not None, IMPL_NOT_INITIALIZED_ERROR
# get hyperparameters without impl
params = {}
with disable_parallel():
for k, v in self.get_params(deep=False).items():
if isinstance(v, (ImplBase, LearnableBase)):
continue
params[k] = v
# save algorithm name
params["algorithm"] = self.__class__.__name__
# save shapes
params["observation_shape"] = self._impl.observation_shape
params["action_size"] = self._impl.action_size
# serialize objects
params = _serialize_params(params)
logger.add_params(params)
def get_action_type(self) -> ActionSpace:
"""Returns action type (continuous or discrete).
Returns:
action type.
"""
raise NotImplementedError
@property
def batch_size(self) -> int:
"""Batch size to train.
Returns:
int: batch size.
"""
return self._batch_size
@batch_size.setter
def batch_size(self, batch_size: int) -> None:
self._batch_size = batch_size
@property
def n_frames(self) -> int:
"""Number of frames to stack.
This is only for image observation.
Returns:
int: number of frames to stack.
"""
return self._n_frames
@n_frames.setter
def n_frames(self, n_frames: int) -> None:
self._n_frames = n_frames
@property
def n_steps(self) -> int:
"""N-step TD backup.
Returns:
int: N-step TD backup.
"""
return self._n_steps
@n_steps.setter
def n_steps(self, n_steps: int) -> None:
self._n_steps = n_steps
@property
def gamma(self) -> float:
"""Discount factor.
Returns:
float: discount factor.
"""
return self._gamma
@gamma.setter
def gamma(self, gamma: float) -> None:
self._gamma = gamma
@property
def scaler(self) -> Optional[Scaler]:
"""Preprocessing scaler.
Returns:
Optional[Scaler]: preprocessing scaler.
"""
return self._scaler
@scaler.setter
def scaler(self, scaler: Scaler) -> None:
self._scaler = scaler
@property
def action_scaler(self) -> Optional[ActionScaler]:
"""Preprocessing action scaler.
Returns:
Optional[ActionScaler]: preprocessing action scaler.
"""
return self._action_scaler
@action_scaler.setter
def action_scaler(self, action_scaler: ActionScaler) -> None:
self._action_scaler = action_scaler
@property
def reward_scaler(self) -> Optional[RewardScaler]:
"""Preprocessing reward scaler.
Returns:
Optional[RewardScaler]: preprocessing reward scaler.
"""
return self._reward_scaler
@reward_scaler.setter
def reward_scaler(self, reward_scaler: RewardScaler) -> None:
self._reward_scaler = reward_scaler
@property
def impl(self) -> Optional[ImplBase]:
"""Implementation object.
Returns:
Optional[ImplBase]: implementation object.
"""
return self._impl
@impl.setter
def impl(self, impl: ImplBase) -> None:
self._impl = impl
@property
def observation_shape(self) -> Optional[Sequence[int]]:
"""Observation shape.
Returns:
Optional[Sequence[int]]: observation shape.
"""
if self._impl:
return self._impl.observation_shape
return None
@property
def action_size(self) -> Optional[int]:
"""Action size.
Returns:
Optional[int]: action size.
"""
if self._impl:
return self._impl.action_size
return None
@property
def active_logger(self) -> Optional[D3RLPyLogger]:
"""Active D3RLPyLogger object.
This will be only available during training.
Returns:
logger object.
"""
return self._active_logger
def set_active_logger(self, logger: D3RLPyLogger) -> None:
"""Set active D3RLPyLogger object
Args:
logger: logger object.
"""
self._active_logger = logger
@property
def grad_step(self) -> int:
"""Total gradient step counter.
This value will keep counting after ``fit`` and ``fit_online``
methods finish.
Returns:
total gradient step counter.
"""
return self._grad_step
def set_grad_step(self, grad_step: int) -> None:
"""Set total gradient step counter.
This method can be used to restart the middle of training with an
arbitrary gradient step counter, which has effects on periodic
functions such as the target update.
Args:
grad_step: total gradient step counter.
"""
self._grad_step = grad_step
| 30.675147
| 80
| 0.576651
|
a68807a204cd4378180400afc4a8d7373c7a168f
| 139
|
py
|
Python
|
src/main/python/601/design_lab_2.1.py
|
Fiegellan/Data-Engineer-Preperation
|
9178ea2381a1225d65c0b1761f9c38d2d8272f8a
|
[
"MIT"
] | null | null | null |
src/main/python/601/design_lab_2.1.py
|
Fiegellan/Data-Engineer-Preperation
|
9178ea2381a1225d65c0b1761f9c38d2d8272f8a
|
[
"MIT"
] | null | null | null |
src/main/python/601/design_lab_2.1.py
|
Fiegellan/Data-Engineer-Preperation
|
9178ea2381a1225d65c0b1761f9c38d2d8272f8a
|
[
"MIT"
] | 1
|
2019-03-17T02:02:24.000Z
|
2019-03-17T02:02:24.000Z
|
def fib(x):
if x == 0:
return 0
elif x == 1:
return 1
else:
return fib(x-1) + fib(x-2)
print fib(10)
| 12.636364
| 34
| 0.438849
|
38eac4d497cbf5b539d211a1231955f44b02ee97
| 2,596
|
py
|
Python
|
osf/migrations/0126_update_social_data_format.py
|
birdbrained/osf.io
|
ca70cf9fdacc2f3771038c8e5bc1c19e7126fd50
|
[
"Apache-2.0"
] | 1
|
2019-12-23T04:30:20.000Z
|
2019-12-23T04:30:20.000Z
|
osf/migrations/0126_update_social_data_format.py
|
birdbrained/osf.io
|
ca70cf9fdacc2f3771038c8e5bc1c19e7126fd50
|
[
"Apache-2.0"
] | 16
|
2020-03-24T16:30:32.000Z
|
2022-03-03T22:39:45.000Z
|
osf/migrations/0126_update_social_data_format.py
|
birdbrained/osf.io
|
ca70cf9fdacc2f3771038c8e5bc1c19e7126fd50
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-25 20:39
from __future__ import unicode_literals
import logging
import progressbar
from django.db import migrations, connection
logger = logging.getLogger(__name__)
FIELDS_TO_MIGRATE = [
'github',
'linkedIn',
'twitter'
]
class Migration(migrations.Migration):
def update_social_fields(state, schema):
for field in FIELDS_TO_MIGRATE:
sql = """
UPDATE osf_osfuser
SET social = social || json_build_object(
'{0}', CASE WHEN (osf_osfuser.social ->> '{0}') = '' THEN '[]'
WHEN (osf_osfuser.social ->> '{0}') IS NOT NULL
AND json_typeof(osf_osfuser.social::json -> '{0}') != 'array'
THEN json_build_array(osf_osfuser.social ->> '{0}')
ELSE (osf_osfuser.social -> '{0}')::json
END
)::jsonb
WHERE osf_osfuser.social ? '{0}';
""".format(field)
with connection.cursor() as cursor:
logger.info('Setting social fields for {}...'.format(field))
cursor.execute(sql)
def reset_social_fields(state, schema):
OSFUser = state.get_model('osf', 'osfuser')
users_with_social = OSFUser.objects.filter(social__has_any_keys=FIELDS_TO_MIGRATE)
users_to_update = users_with_social.count()
logger.info('Updating social fields for {} users'.format(users_to_update))
progress_bar = progressbar.ProgressBar(maxval=users_to_update or 100).start()
users_updated = 0
for user in users_with_social:
old_social = {}
for key, value in user.social.items():
if key in FIELDS_TO_MIGRATE:
if len(value) > 1:
raise ValueError('Current social list field has more than one value, cannot reset to just one value.')
old_social[key] = value[0]
else:
old_social[key] = value
user.social = old_social
user.save()
users_updated += 1
progress_bar.update(users_updated)
progress_bar.finish()
logger.info('Updated social field for {} users'.format(users_updated))
dependencies = [
('osf', '0125_merge_20180824_1856'),
]
operations = [
migrations.RunPython(update_social_fields, reset_social_fields)
]
| 35.561644
| 126
| 0.557396
|
0d79f9915228fde49bce9066b70dc46b1b3ffad3
| 17,499
|
py
|
Python
|
scripts/dual_IDG.py
|
ssykira/optic-nerve
|
0457df5c3cd58416361b107be3282745b6d4acbd
|
[
"MIT"
] | 2
|
2019-07-29T09:36:10.000Z
|
2019-08-26T08:30:37.000Z
|
scripts/dual_IDG.py
|
ssykira/optic-nerve
|
0457df5c3cd58416361b107be3282745b6d4acbd
|
[
"MIT"
] | null | null | null |
scripts/dual_IDG.py
|
ssykira/optic-nerve
|
0457df5c3cd58416361b107be3282745b6d4acbd
|
[
"MIT"
] | 1
|
2019-12-18T17:30:14.000Z
|
2019-12-18T17:30:14.000Z
|
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
Based on Keras code
Modified by He Xie 08/2016, Artem Sevastopolsky 10/2016
For image segmentation problem data augmentation.
Transform train img data and mask img data simultaneously and in the same fashion.
Omit flow from directory function.
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
# from six.moves import range
import os
import threading
from keras import backend as K
def random_channel_shift(x, intensity, channel_index=0):
x = np.rollaxis(x, channel_index, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x, transform_matrix, channel_index=0, fill_mode='nearest', cval=0.):
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix,
final_offset, order=0, mode=fill_mode, cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, dim_ordering='default', scale=True):
from PIL import Image
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering == 'th':
x = x.transpose(1, 2, 0)
if scale:
x += max(-np.min(x), 0)
x /= np.max(x)
x *= 255
if x.shape[2] == 3:
# RGB
return Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return Image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise Exception('Unsupported channel number: ', x.shape[2])
def img_to_array(img, dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if dim_ordering not in ['th', 'tf']:
raise Exception('Unknown dim_ordering: ', dim_ordering)
# image has dim_ordering (height, width, channel)
x = np.asarray(img, dtype='float32')
if len(x.shape) == 3:
if dim_ordering == 'th':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if dim_ordering == 'th':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise Exception('Unsupported image shape: ', x.shape)
return x
class DualImageDataGenerator(object):
'''Generate minibatches with
real-time data augmentation.
Assume X is train img, Y is train label (same size as X with only 0 and 255 for values)
# Arguments
featurewise_center: set input mean to 0 over the dataset. Only to X
samplewise_center: set each sample mean to 0. Only to X
featurewise_std_normalization: divide inputs by std of the dataset. Only to X
samplewise_std_normalization: divide each input by its std. Only to X
zca_whitening: apply ZCA whitening. Only to X
rotation_range: degrees (0 to 180). To X and Y
width_shift_range: fraction of total width. To X and Y
height_shift_range: fraction of total height. To X and Y
shear_range: shear intensity (shear angle in radians). To X and Y
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range. To X and Y
channel_shift_range: shift range for each channels. Only to X
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'. For Y, always fill with constant 0
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally. To X and Y
vertical_flip: whether to randomly flip images vertically. To X and Y
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided (before applying
any other transformation). Only to X
dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
(the depth) is at index 1, in 'tf' mode it is at index 3.
It defaults to the `image_dim_ordering` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "th".
'''
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
dim_ordering='default'):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.__dict__.update(locals())
self.mean = None
self.std = None
self.principal_components = None
self.rescale = rescale
if dim_ordering not in {'tf', 'th'}:
raise Exception('dim_ordering should be "tf" (channel after row and '
'column) or "th" (channel before row and column). '
'Received arg: ', dim_ordering)
self.dim_ordering = dim_ordering
if dim_ordering == 'th':
self.channel_index = 1
self.row_index = 2
self.col_index = 3
if dim_ordering == 'tf':
self.channel_index = 3
self.row_index = 1
self.col_index = 2
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise Exception('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow(self, X, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_format='jpeg'):
return NumpyArrayIterator(
X, y, self,
batch_size=batch_size, shuffle=shuffle, seed=seed,
dim_ordering=self.dim_ordering,
save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format)
def standardize(self, x):
# Only applied to X
if self.rescale:
x *= self.rescale
# x is a single image, so it doesn't have image number at index 0
img_channel_index = self.channel_index - 1
if self.samplewise_center:
x -= np.mean(x, axis=img_channel_index, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, axis=img_channel_index, keepdims=True) + 1e-7)
if self.featurewise_center:
x -= self.mean
if self.featurewise_std_normalization:
x /= (self.std + 1e-7)
if self.zca_whitening:
flatx = np.reshape(x, (x.size))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
return x
def random_transform(self, x, y):
# Need to modify to transform both X and Y ---- to do
# x is a single image, so it doesn't have image number at index 0
img_row_index = self.row_index - 1
img_col_index = self.col_index - 1
img_channel_index = self.channel_index - 1
# use composition of homographies to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range, self.rotation_range)
else:
theta = 0
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) * x.shape[img_row_index]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) * x.shape[img_col_index]
else:
ty = 0
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = np.dot(np.dot(np.dot(rotation_matrix, translation_matrix), shear_matrix), zoom_matrix)
h, w = x.shape[img_row_index], x.shape[img_col_index]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_index,
fill_mode=self.fill_mode, cval=self.cval)
# For y, mask data, fill mode constant, cval = 0
y = apply_transform(y, transform_matrix, img_channel_index,
fill_mode="constant", cval=0)
if self.channel_shift_range != 0:
x = random_channel_shift(x, self.channel_shift_range, img_channel_index)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_index)
y = flip_axis(y, img_col_index)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_index)
y = flip_axis(y, img_row_index)
# TODO:
# channel-wise normalization
# barrel/fisheye
return x, y
def fit(self, X,
augment=False,
rounds=1,
seed=None):
'''Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
# Arguments
X: Numpy array, the data to fit on.
augment: whether to fit on randomly augmented samples
rounds: if `augment`,
how many augmentation passes to do over the data
seed: random seed.
# Only applied to X
'''
X = np.copy(X)
if augment:
aX = np.zeros(tuple([rounds * X.shape[0]] + list(X.shape)[1:]))
for r in range(rounds):
for i in range(X.shape[0]):
aX[i + r * X.shape[0]] = self.random_transform(X[i])
X = aX
if self.featurewise_center:
self.mean = np.mean(X, axis=0)
X -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(X, axis=0)
X /= (self.std + 1e-7)
if self.zca_whitening:
flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
sigma = np.dot(flatX.T, flatX) / flatX.shape[1]
U, S, V = linalg.svd(sigma)
self.principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
class Iterator(object):
def __init__(self, N, batch_size, shuffle, seed):
self.N = N
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(N, batch_size, shuffle, seed)
def reset(self):
self.batch_index = 0
def _flow_index(self, N, batch_size=32, shuffle=False, seed=None):
# ensure self.batch_index is 0
self.reset()
while 1:
if self.batch_index == 0:
index_array = np.arange(N)
if shuffle:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
index_array = np.random.permutation(N)
current_index = (self.batch_index * batch_size) % N
if N >= current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = N - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __iter__(self):
# needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
# ?
return self.next(*args, **kwargs)
class NumpyArrayIterator(Iterator):
def __init__(self, X, y, image_data_generator,
batch_size=32, shuffle=False, seed=None,
dim_ordering='default',
save_to_dir=None, save_prefix='', save_format='jpeg'):
if len(X) != len(y):
raise Exception('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' % (np.asarray(X).shape, np.asarray(y).shape))
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
self.X = X
self.y = y
self.image_data_generator = image_data_generator
self.dim_ordering = dim_ordering
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(X.shape[0], batch_size, shuffle, seed)
def next(self):
# for python 2.x.
# Keeps under lock only the mechanism which advances
# the indexing of each batch
# see http://anandology.com/blog/using-iterators-and-generators/
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock so it can be done in parallel
batch_x = np.zeros(tuple([current_batch_size] + list(self.X.shape)[1:]))
batch_y = np.zeros(tuple([current_batch_size] + list(self.y.shape)[1:]))
for i, j in enumerate(index_array):
x = self.X[j]
label = self.y[j]
x, label = self.image_data_generator.random_transform(x.astype('float32'), label.astype("float32"))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
batch_y[i] = label
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.dim_ordering, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
mask = array_to_img(batch_y[i], self.dim_ordering, scale=True)
fname = '{prefix}_{index}_{hash}_mask.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
mask.save(os.path.join(self.save_to_dir, fname))
return batch_x, batch_y
| 40.885514
| 113
| 0.573233
|
f58e4c879a6a95a925d5063390cfe2ee4bf5423e
| 780
|
py
|
Python
|
12.0-flask/src/simple_dev_sample.py
|
zehemz/clases-python-101
|
633cb5f0cbc85e64e242514f0394754a5bed0513
|
[
"Apache-2.0"
] | null | null | null |
12.0-flask/src/simple_dev_sample.py
|
zehemz/clases-python-101
|
633cb5f0cbc85e64e242514f0394754a5bed0513
|
[
"Apache-2.0"
] | null | null | null |
12.0-flask/src/simple_dev_sample.py
|
zehemz/clases-python-101
|
633cb5f0cbc85e64e242514f0394754a5bed0513
|
[
"Apache-2.0"
] | null | null | null |
'''
This module represents the (otherwise anonymous) scope in which the interpreter’s main program executes — commands read either from standard input, from a script file, or from an interactive prompt. It is this environment in which the idiomatic “conditional script” stanza causes a script to run:
Instalación de flask:
$ pip install Flask
Ejecución de flask:
$ python simple_dev_sample.py
Esto nos proveerá de una instancia de programa corriendo y escuchando en el puerto 5000
de manera local
* Running on http://localhost:5000/
'''
from flask import Flask
app = Flask(__name__)
if __name__ == "__main__":
#Main script
print("Running flask example ...")
#Variable global, ejecuta run.
app.run()
@app.route("/")
def hello():
return "Hello World!"
| 26
| 296
| 0.739744
|
e9ab4cd22c3d93750a4dcea194de1d9bd906138d
| 7,299
|
py
|
Python
|
src/converter.py
|
xryuseix/proofreader
|
b4c523d05324cf771acee688d51cfea8d6d6d114
|
[
"MIT"
] | null | null | null |
src/converter.py
|
xryuseix/proofreader
|
b4c523d05324cf771acee688d51cfea8d6d6d114
|
[
"MIT"
] | 5
|
2020-04-28T18:13:26.000Z
|
2020-05-17T19:09:42.000Z
|
src/converter.py
|
xryuseix/Proofreader
|
b4c523d05324cf771acee688d51cfea8d6d6d114
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os, sys, re
import read_file as File
# .,を、。に変換
def dot_to_comma(text):
replaced_text = re.sub(",", r"、", text)
return re.sub(".", r"。", replaced_text)
# word_listを参照して警告
def word_to_word(text, file, search, root):
if not os.path.isfile("%sword_list.csv" % (root)):
return text
word_list = File.readFile("%sword_list.csv" % (root), True)
# find_listを開く
if search:
find_list_path = "%sfind_list.txt" % (root)
if not os.path.isfile(find_list_path):
find_list = []
else:
find_list = File.readFile(find_list_path).split("\n")
text_arr = text.splitlines()
warning_list = []
find_out_list = []
for i, text in enumerate(text_arr):
for word in word_list: # 文字列警告
re_obj = re.search(word[0], text)
if re_obj:
warning_list.append([i + 1, re_obj.start(), re_obj.group(), word[1]])
if search: # 文字列探索
for word in find_list:
re_obj = re.search(word, text)
if re_obj:
find_out_list.append([i + 1, re_obj.start(), word])
if not search:
for c in warning_list:
print(
"\033[33mWARNING\033[0m: %s:%s:%s: (%s) => (%s)"
% (file, c[0], c[1], c[2], c[3])
)
if search:
for c in find_out_list:
print("\033[36mFOUND!!\033[0m: %s:%s:%s: (%s)" % (file, c[0], c[1], c[2]))
return "\n".join(text_arr)
# 数字を三桁ごとに区切ってカンマ
class DigitComma:
def __init__(self, text: str):
self.text = text
# 数字を三桁ごとに区切ってカンマ
def __digit_comma(self, num: str):
num = num.group()
integer_decimal = num.split(".")
commad_num = re.sub(
"(\d)(?=(\d\d\d)+(?!\d))", r"\1,", integer_decimal[0]
) # 整数部
if len(integer_decimal) > 1:
commad_num += "." + integer_decimal[1] # 小数部
return commad_num
# textから数値の場所のみを切り出す
def cut_out(self):
# 数値を切り出してカンマを挿入
return re.sub(r"\d+[.,\d]*\d+", self.__digit_comma, self.text)
# 数値の前後と行頭にスペースを入れる
class SpaceConvert:
def __init__(self, text: str, special_noun_list: list):
self.text = text
self.special_noun_list = special_noun_list
# 数値の前後と行頭にスペースを入れる
def __add_space(self, text: str):
# 数値の前に空白
text = re.sub(r"([^\n\d, \.])((?:\d+\.?\d*|\.\d+))", r"\1 \2", text)
# 数値の後ろに空白
text = re.sub(r"([\+\-]?(?:\d+\.?\d*|\.\d+))([^\n\d, \.])", r"\1 \2", text)
# 記号の前後に空白
op = r"\+\-\*"
text = re.sub(r"([^%s\n ])([%s]+)" % (op, op), r"\1 \2", text)
text = re.sub(r"([%s]+)([^%s ])" % (op, op), r"\1 \2", text)
# 英字の後ろに空白
symbol = r'_\.\^,:\/%<>"\'=\[\]\(\)'
word = r"a-zA-Z\d" + symbol
text = re.sub(r"([a-zA-Z][%s]*)([^\n%s ])" % (word, word), r"\1 \2", text)
# 先頭以外の英字の前に空白
text = re.sub(r"([^\n%s ])([a-zA-Z][%s]*)" % (word, word), r"\1 \2", text)
# "[日本語][スペース]?[演算子][スペース][数値]"を"[日本語][スペース][演算子][数値]"にする (あ+ 1 → あ +1)
ja = r"亜-熙ぁ-んァ-ヶ"
text = re.sub(r"([%s]) ?([%s]+) (\d)" % (ja, op), r"\1 \2\3", text)
# "[改行][スペース]?[演算子][スペース][数値]"を"[改行][演算子][数値]"にする (+ 1 → +1)
text = re.sub(r"\n ?([%s]+) (\d)" % (op), r"\n\1\2", text)
return text
# 前後に空白が入ってはいけない場合,削除する
def __erase_invalid_spaces(self, text: str):
# 累乗記号 : 前後のスペースを消す(xor記号の場合は^を使わない)
text = text.replace(r" ^ ", r"^")
# アンダーバー : 前後またはその片方のスペースを消す
text = text.replace("_ ", "_").replace(" _", "_")
# Python 3 を Python3 にする
text = re.sub(r"([A-Za-z]) (\d)", r"\1\2", text)
# special_noun_listに入っている,間に不要な空白が入っている単語を修正する
for noun in self.special_noun_list:
# 入力をエスケープ
need_escape = r"\\\*\+\.\?\(\)\{\}\[\]\^\$\|"
noun_invalid_reg: str = re.sub("([%s])" % (need_escape), r"\\\1", noun)
noun_invalid_reg = re.sub(r"(?<!\\)(.)", r" ?\1", noun_invalid_reg)[2:]
# 置換
text = re.sub(noun_invalid_reg, noun, text)
return text
# タグ前後の不要なスペースを削除
def __erase_invalid_before_patterns_spaces(self, text: str):
text = re.sub(r" +<", r" <", text) # タグの前
text = re.sub(r"> +", r"> ", text) # タグの後
return text
# 文字列を除外パターンで分離
def split_text(self):
converted_text = ""
# 変換対象外にするパターン一覧
rm_patterns_range = [r"</?pre>", r"</?code>", "```"]
# 除外パターンでテキストを分割
text_arr = re.split("|".join(rm_patterns_range), self.text)
# text_arrの各文字列がどのパターンの中にあるか
ptns_in_text = [
m.group() for m in re.finditer("|".join(rm_patterns_range), self.text)
]
# 現在囲まれているタグ一覧
ptn_state = []
for doc, ptn in zip(text_arr, ptns_in_text):
# 終了タグと開始タグを一緒にする
ptn_prot = ptn.replace("/", "")
# 除外パターンに囲われていない時
if not ptn_state:
# 数値にスペースを入れる
doc = self.__add_space(doc)
# 数値にカンマを入れる
dc = DigitComma(doc)
doc = dc.cut_out()
doc = self.__erase_invalid_spaces(doc)
converted_text += doc
# 除外パターンの開始
if not ptn_prot in ptn_state:
ptn_state.append(ptn_prot)
# <code>の前にスペースを入れる
if ptn_prot == "<code>" and converted_text[-1] != "\n":
converted_text += " "
# タグを整形結果に追加
converted_text += ptn
# 除外パターンの終了
else:
ptn_state.remove(ptn_prot)
# タグを整形結果に追加
converted_text += ptn
# <code>の後にスペースを入れる
if ptn_prot == "<code>":
converted_text += " "
else:
# テキストのブロック数と除外パターンの数が不一致の場合
if len(text_arr) > len(ptns_in_text) and not ptn_state:
# 数値にスペースを入れる
doc = self.__add_space(text_arr[-1])
# 数値にカンマを入れる
dc = DigitComma(doc)
doc = dc.cut_out()
converted_text += self.__erase_invalid_spaces(doc)
# タグ前後の不要なスペースを削除
converted_text = self.__erase_invalid_before_patterns_spaces(converted_text)
return converted_text
def converter(file, search):
text = File.readFile(file)
root = sys.argv[0][:-14]
if not os.path.isfile("%sspecial_noun_list.txt" % (root)):
special_noun_list = []
else:
special_noun_list = File.readFile("%sspecial_noun_list.txt" % (root)).split(
"\n"
)
if not search:
# 数値の前後,行頭の英単語の後にスペースを入れる
sc = SpaceConvert(text, special_noun_list)
text = sc.split_text()
# ,を、に変更する
text = dot_to_comma(text)
# 指定した単語のWARNINGを出す
text = word_to_word(text, file, search, root)
with open(file, mode="w") as f:
f.write(text)
if __name__ == "__main__":
# s = "A12 ^ 12AA<pre>Z_ 1Z 1 _ 23 - 456 Z</pre>CC- 1234C+ 12```ZZZ```AAA"
s = "abc1d_aaa貼り付けco11111deでき入1123.456888 力 <code>る </code> よ\n<code></code>あっっ"
print(s)
sc = SpaceConvert(s)
print(sc.split_text())
| 32.730942
| 86
| 0.511029
|
c09fe77938eb67c406590e65bee0751046c56ae5
| 17,762
|
py
|
Python
|
elementpath/xpath2/xpath2_constructors.py
|
felixonmars/elementpath
|
62584c7335a8188ebc7eecdcbf0cee52daebe301
|
[
"MIT"
] | null | null | null |
elementpath/xpath2/xpath2_constructors.py
|
felixonmars/elementpath
|
62584c7335a8188ebc7eecdcbf0cee52daebe301
|
[
"MIT"
] | null | null | null |
elementpath/xpath2/xpath2_constructors.py
|
felixonmars/elementpath
|
62584c7335a8188ebc7eecdcbf0cee52daebe301
|
[
"MIT"
] | null | null | null |
#
# Copyright (c), 2018-2021, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
XPath 2.0 implementation - part 3 (XSD constructors and multi-role tokens)
"""
from ..exceptions import ElementPathError, ElementPathSyntaxError
from ..namespaces import XSD_NAMESPACE
from ..datatypes import xsd10_atomic_types, xsd11_atomic_types, GregorianDay, \
GregorianMonth, GregorianMonthDay, GregorianYear10, GregorianYear, \
GregorianYearMonth10, GregorianYearMonth, Duration, DayTimeDuration, \
YearMonthDuration, Date10, Date, DateTime10, DateTime, DateTimeStamp, \
Time, UntypedAtomic, QName, HexBinary, Base64Binary, BooleanProxy
from ..xpath_token import XPathToken
from .xpath2_functions import XPath2Parser
register = XPath2Parser.register
unregister = XPath2Parser.unregister
method = XPath2Parser.method
constructor = XPath2Parser.constructor
###
# Constructors for string-based XSD types
@constructor('normalizedString')
@constructor('token')
@constructor('language')
@constructor('NMTOKEN')
@constructor('Name')
@constructor('NCName')
@constructor('ID')
@constructor('IDREF')
@constructor('ENTITY')
@constructor('anyURI')
def cast(self, value):
try:
return xsd10_atomic_types[self.symbol](value)
except ValueError as err:
raise self.error('FORG0001', err)
###
# Constructors for numeric XSD types
@constructor('decimal')
@constructor('double')
@constructor('float')
def cast(self, value):
try:
if self.parser.xsd_version == '1.0':
return xsd10_atomic_types[self.symbol](value)
return xsd11_atomic_types[self.symbol](value)
except ValueError as err:
if isinstance(value, (str, UntypedAtomic)):
raise self.error('FORG0001', err)
raise self.error('FOCA0002', err)
@constructor('integer')
@constructor('nonNegativeInteger')
@constructor('positiveInteger')
@constructor('nonPositiveInteger')
@constructor('negativeInteger')
@constructor('long')
@constructor('int')
@constructor('short')
@constructor('byte')
@constructor('unsignedLong')
@constructor('unsignedInt')
@constructor('unsignedShort')
@constructor('unsignedByte')
def cast(self, value):
try:
return xsd10_atomic_types[self.symbol](value)
except ValueError:
msg = 'could not convert {!r} to xs:{}'.format(value, self.symbol)
if isinstance(value, (str, bytes, UntypedAtomic, bool)):
raise self.error('FORG0001', msg) from None
raise self.error('FOCA0002', msg) from None
except OverflowError as err:
raise self.error('FOCA0002', err) from None
###
# Constructors for datetime XSD types
@constructor('date')
def cast(self, value):
cls = Date if self.parser.xsd_version == '1.1' else Date10
if isinstance(value, cls):
return value
try:
if isinstance(value, UntypedAtomic):
return cls.fromstring(value.value)
elif isinstance(value, DateTime10):
return cls(value.year, value.month, value.day, value.tzinfo)
return cls.fromstring(value)
except OverflowError as err:
raise self.error('FODT0001', err) from None
except ValueError as err:
raise self.error('FORG0001', err)
@constructor('gDay')
def cast(self, value):
if isinstance(value, GregorianDay):
return value
try:
if isinstance(value, UntypedAtomic):
return GregorianDay.fromstring(value.value)
elif isinstance(value, (Date10, DateTime10)):
return GregorianDay(value.day, value.tzinfo)
return GregorianDay.fromstring(value)
except ValueError as err:
raise self.error('FORG0001', err)
@constructor('gMonth')
def cast(self, value):
if isinstance(value, GregorianMonth):
return value
try:
if isinstance(value, UntypedAtomic):
return GregorianMonth.fromstring(value.value)
elif isinstance(value, (Date10, DateTime10)):
return GregorianMonth(value.month, value.tzinfo)
return GregorianMonth.fromstring(value)
except ValueError as err:
raise self.error('FORG0001', err)
@constructor('gMonthDay')
def cast(self, value):
if isinstance(value, GregorianMonthDay):
return value
try:
if isinstance(value, UntypedAtomic):
return GregorianMonthDay.fromstring(value.value)
elif isinstance(value, (Date10, DateTime10)):
return GregorianMonthDay(value.month, value.day, value.tzinfo)
return GregorianMonthDay.fromstring(value)
except ValueError as err:
raise self.error('FORG0001', err)
@constructor('gYear')
def cast(self, value):
cls = GregorianYear if self.parser.xsd_version == '1.1' else GregorianYear10
if isinstance(value, cls):
return value
try:
if isinstance(value, UntypedAtomic):
return cls.fromstring(value.value)
elif isinstance(value, (Date10, DateTime10)):
return cls(value.year, value.tzinfo)
return cls.fromstring(value)
except OverflowError as err:
raise self.error('FODT0001', err) from None
except ValueError as err:
raise self.error('FORG0001', err)
@constructor('gYearMonth')
def cast(self, value):
cls = GregorianYearMonth \
if self.parser.xsd_version == '1.1' else GregorianYearMonth10
if isinstance(value, cls):
return value
try:
if isinstance(value, UntypedAtomic):
return cls.fromstring(value.value)
elif isinstance(value, (Date10, DateTime10)):
return cls(value.year, value.month, value.tzinfo)
return cls.fromstring(value)
except OverflowError as err:
raise self.error('FODT0001', err) from None
except ValueError as err:
raise self.error('FORG0001', err)
@constructor('time')
def cast(self, value):
if isinstance(value, Time):
return value
try:
if isinstance(value, UntypedAtomic):
return Time.fromstring(value.value)
elif isinstance(value, DateTime10):
return Time(value.hour, value.minute, value.second,
value.microsecond, value.tzinfo)
return Time.fromstring(value)
except ValueError as err:
raise self.error('FORG0001', err)
@method('date')
@method('gDay')
@method('gMonth')
@method('gMonthDay')
@method('gYear')
@method('gYearMonth')
@method('time')
def evaluate(self, context=None):
arg = self.data_value(self.get_argument(context))
if arg is None:
return []
try:
return self.cast(arg)
except TypeError as err:
raise self.error('FORG0006', err) from None
except OverflowError as err:
raise self.error('FODT0001', err) from None
###
# Constructors for time durations XSD types
@constructor('duration')
def cast(self, value):
if isinstance(value, Duration):
return value
try:
if isinstance(value, UntypedAtomic):
return Duration.fromstring(value.value)
return Duration.fromstring(value)
except OverflowError as err:
raise self.error('FODT0002', err) from None
except ValueError as err:
raise self.error('FORG0001', err)
@constructor('yearMonthDuration')
def cast(self, value):
if isinstance(value, YearMonthDuration):
return value
elif isinstance(value, Duration):
return YearMonthDuration(months=value.months)
try:
if isinstance(value, UntypedAtomic):
return YearMonthDuration.fromstring(value.value)
return YearMonthDuration.fromstring(value)
except OverflowError as err:
raise self.error('FODT0002', err) from None
except ValueError as err:
raise self.error('FORG0001', err)
@constructor('dayTimeDuration')
def cast(self, value):
if isinstance(value, DayTimeDuration):
return value
elif isinstance(value, Duration):
return DayTimeDuration(seconds=value.seconds)
try:
if isinstance(value, UntypedAtomic):
return DayTimeDuration.fromstring(value.value)
return DayTimeDuration.fromstring(value)
except OverflowError as err:
raise self.error('FODT0002', err) from None
except ValueError as err:
raise self.error('FORG0001', err) from None
@constructor('dateTimeStamp')
def cast(self, value):
if isinstance(value, DateTimeStamp):
return value
elif isinstance(value, DateTime10):
value = str(value)
try:
if isinstance(value, UntypedAtomic):
return DateTimeStamp.fromstring(value.value)
return DateTimeStamp.fromstring(value)
except ValueError as err:
raise self.error('FORG0001', err) from None
@method('dateTimeStamp')
def evaluate(self, context=None):
arg = self.data_value(self.get_argument(context))
if arg is None:
return []
if isinstance(arg, UntypedAtomic):
return self.cast(arg.value)
return self.cast(str(arg))
@method('dateTimeStamp')
def nud(self):
if self.parser.xsd_version == '1.0':
raise self.wrong_syntax("xs:dateTimeStamp is not recognized unless XSD 1.1 is enabled")
try:
self.parser.advance('(')
self[0:] = self.parser.expression(5),
if self.parser.next_token.symbol == ',':
raise self.wrong_nargs('Too many arguments: expected at most 1 argument')
self.parser.advance(')')
self.value = None
except SyntaxError:
raise self.error('XPST0017') from None
return self
###
# Constructors for binary XSD types
@constructor('base64Binary')
def cast(self, value):
try:
return Base64Binary(value)
except ValueError as err:
raise self.error('FORG0001', err) from None
except TypeError as err:
raise self.error('XPTY0004', err) from None
@constructor('hexBinary')
def cast(self, value):
try:
return HexBinary(value)
except ValueError as err:
raise self.error('FORG0001', err) from None
except TypeError as err:
raise self.error('XPTY0004', err) from None
@method('base64Binary')
@method('hexBinary')
def evaluate(self, context=None):
arg = self.data_value(self.get_argument(context))
if arg is None:
return []
try:
return self.cast(arg)
except ElementPathError as err:
err.token = self
raise
@constructor('NOTATION')
def cast(self, value):
raise NotImplementedError("No value is castable to xs:NOTATION")
@method('NOTATION')
def nud(self):
self.parser.advance('(')
if self.parser.next_token.symbol == ')':
raise self.error('XPST0017', 'expected exactly one argument')
self[0:] = self.parser.expression(5),
if self.parser.next_token.symbol != ')':
raise self.error('XPST0017', 'expected exactly one argument')
self.parser.advance()
self.value = None
raise self.error('XPST0017', "no constructor function exists for xs:NOTATION")
###
# Multi role-tokens constructors (function or constructor)
#
# Case 1: In XPath 2.0 the 'boolean' keyword is used both for boolean() function and
# for boolean() constructor.
unregister('boolean')
@constructor('boolean', bp=90, label=('function', 'constructor function'))
def cast(self, value):
try:
return BooleanProxy(value)
except ValueError as err:
raise self.error('FORG0001', err) from None
except TypeError as err:
raise self.error('XPTY0004', err) from None
@method('boolean')
def nud(self):
self.parser.advance('(')
if self.parser.next_token.symbol == ')':
raise self.wrong_nargs('Too few arguments: expected at least 1 argument')
self[0:] = self.parser.expression(5),
if self.parser.next_token.symbol == ',':
raise self.wrong_nargs('Too many arguments: expected at most 1 argument')
self.parser.advance(')')
self.value = None
return self
@method('boolean')
def evaluate(self, context=None):
if self.label == 'function':
return self.boolean_value([x for x in self[0].select(context)])
# xs:boolean constructor
arg = self.data_value(self.get_argument(context))
if arg is None:
return []
try:
return self.cast(arg)
except ElementPathError as err:
err.token = self
raise
###
# Case 2: In XPath 2.0 the 'string' keyword is used both for fn:string() and xs:string().
unregister('string')
register('string', lbp=90, rbp=90, label=('function', 'constructor function'), # pragma: no cover
pattern=r'\bstring(?=\s*\(|\s*\(\:.*\:\)\()', cast=XPathToken.string_value)
@method('string')
def nud(self):
try:
self.parser.advance('(')
if self.label != 'function' or self.parser.next_token.symbol != ')':
self[0:] = self.parser.expression(5),
self.parser.advance(')')
except ElementPathSyntaxError as err:
err.code = self.error_code('XPST0017')
raise
self.value = None
return self
@method('string')
def evaluate(self, context=None):
if self.label == 'function':
if not self:
if context is None:
raise self.missing_context()
return self.string_value(context.item)
return self.string_value(self.get_argument(context))
else:
item = self.get_argument(context)
return [] if item is None else self.string_value(item)
# Case 3 and 4: In XPath 2.0 the XSD 'QName' and 'dateTime' types have special
# constructor functions so the 'QName' keyword is used both for fn:QName() and
# xs:QName(), the same for 'dateTime' keyword.
#
# In those cases the label at parse time is set by the nud method, in dependence
# of the number of args.
#
@constructor('QName', bp=90, label=('function', 'constructor function'))
def cast(self, value):
if isinstance(value, QName):
return value
elif isinstance(value, UntypedAtomic):
return self.cast_to_qname(value.value)
elif isinstance(value, str):
return self.cast_to_qname(value)
else:
raise self.error('XPTY0004', 'the argument has an invalid type %r' % type(value))
@constructor('dateTime', bp=90, label=('function', 'constructor function'))
def cast(self, value):
cls = DateTime if self.parser.xsd_version == '1.1' else DateTime10
if isinstance(value, cls):
return value
try:
if isinstance(value, UntypedAtomic):
return cls.fromstring(value.value)
elif isinstance(value, Date10):
return cls(value.year, value.month, value.day, tzinfo=value.tzinfo)
return cls.fromstring(value)
except OverflowError as err:
raise self.error('FODT0001', err) from None
except ValueError as err:
raise self.error('FORG0001', err) from None
@method('QName')
@method('dateTime')
def nud(self):
try:
self.parser.advance('(')
self[0:] = self.parser.expression(5),
if self.parser.next_token.symbol == ',':
if self.label != 'function':
raise self.error('XPST0017', 'unexpected 2nd argument')
self.label = 'function'
self.parser.advance(',')
self[1:] = self.parser.expression(5),
elif self.label != 'constructor function' or self.namespace != XSD_NAMESPACE:
raise self.error('XPST0017', '2nd argument missing')
else:
self.label = 'constructor function'
self.parser.advance(')')
except SyntaxError:
raise self.error('XPST0017') from None
self.value = None
return self
@method('QName')
def evaluate(self, context=None):
if self.label == 'constructor function':
arg = self.data_value(self.get_argument(context))
return [] if arg is None else self.cast(arg)
else:
uri = self.get_argument(context)
qname = self.get_argument(context, index=1)
try:
return QName(uri, qname)
except TypeError as err:
raise self.error('XPTY0004', err)
except ValueError as err:
raise self.error('FOCA0002', err)
@method('dateTime')
def evaluate(self, context=None):
if self.label == 'constructor function':
arg = self.data_value(self.get_argument(context))
if arg is None:
return []
try:
return self.cast(arg)
except ValueError as err:
raise self.error('FORG0001', err) from None
except TypeError as err:
raise self.error('FORG0006', err) from None
else:
dt = self.get_argument(context, cls=Date10)
tm = self.get_argument(context, 1, cls=Time)
if dt is None or tm is None:
return []
elif dt.tzinfo == tm.tzinfo or tm.tzinfo is None:
tzinfo = dt.tzinfo
elif dt.tzinfo is None:
tzinfo = tm.tzinfo
else:
raise self.error('FORG0008')
if self.parser.xsd_version == '1.1':
return DateTime(dt.year, dt.month, dt.day, tm.hour, tm.minute,
tm.second, tm.microsecond, tzinfo)
return DateTime10(dt.year, dt.month, dt.day, tm.hour, tm.minute,
tm.second, tm.microsecond, tzinfo)
@constructor('untypedAtomic')
def cast(self, value):
return UntypedAtomic(value)
@method('untypedAtomic')
def evaluate(self, context=None):
arg = self.data_value(self.get_argument(context))
if arg is None:
return []
elif isinstance(arg, UntypedAtomic):
return arg
else:
return self.cast(arg)
| 30.5189
| 98
| 0.656627
|
1a4b1fa2a88c6d3f4f11f66beb64bdb562d95994
| 18,819
|
py
|
Python
|
c2rust-refactor/doc/literate/diff.py
|
marcograss/c2rust
|
b432c3836d8cf3cb5866c0d98d93c06c900abafb
|
[
"BSD-3-Clause"
] | 2,337
|
2018-04-20T00:34:36.000Z
|
2022-03-30T21:08:36.000Z
|
c2rust-refactor/doc/literate/diff.py
|
marcograss/c2rust
|
b432c3836d8cf3cb5866c0d98d93c06c900abafb
|
[
"BSD-3-Clause"
] | 324
|
2018-06-20T04:14:12.000Z
|
2022-03-31T16:45:17.000Z
|
c2rust-refactor/doc/literate/diff.py
|
marcograss/c2rust
|
b432c3836d8cf3cb5866c0d98d93c06c900abafb
|
[
"BSD-3-Clause"
] | 146
|
2018-06-22T20:16:11.000Z
|
2022-03-16T18:04:30.000Z
|
from collections import namedtuple, deque
import difflib
import pygments.formatters
import pygments.lexers
import pygments.token
import re
from typing import List, Tuple, Optional, Iterator, Iterable
from literate.annot import Span, Annot, SpanMerger, \
cut_annot, merge_annot, sub_annot, fill_annot
from literate.file import File, Line, Diff, DiffBlock, Hunk, OutputLine
from literate.points import Point, cut_annot_at_points
# Regex for finding runs of identical non-space characters
RUN_RE = re.compile(r'([^ \n])\1*')
def parse_intra_annot(s: str) -> Annot[str]:
'''Parse an `ndiff` detail (`?`) line and convert it to an annotation
indicating intraline edits in the text of the preceding line. The
annotation labels inserted, deleted, and changed characters with `'ins'`,
`'del'`, and `'chg'` respectively.'''
spans = []
for m in RUN_RE.finditer(s):
c = m.group(1)
# Map the symbols used by `ndiff` to something more meaningful.
label = {
'+': 'ins',
'-': 'del',
'^': 'chg',
}[c]
spans.append(Span(m.start(), m.end(), label))
return spans
DiffLine = Tuple[bool, bool, Optional[Annot[str]], Optional[Annot[str]]]
def diff_lines(old_lines: List[str], new_lines: List[str]) -> Iterator[DiffLine]:
'''Compute a diff of `old` and `new`, and yield a sequence of (old_line,
new_line, old_detail, new_detail). Each `line` is a boolean indicating
whether there is a line present in the old/new file, and each `detail` is
an intraline edit annotation (see `parse_intra_annot`).
Possible outputs:
- (True, True, None, None): Unmodified/context line
- (True, False, None, None): Deletion of a line from the old text.
- (False, True, None, None): Insertion of a line in the new text.
- (True, True, [...], [...]): Changed line, modified via the indicated
intraline insertions and deletions.
'''
# We buffer up to two previous result tuples. This lets us handle
# intraline change markers, and in particular, the nasty '-+?' case, where
# we don't find out that we're in an intraline change ('?') until we've
# seen both the '-' and '+' lines.
buf = deque()
for dl in difflib.ndiff(old_lines, new_lines):
prefix = dl[0:2]
if prefix == ' ':
# Context line. Flush the whole buffer.
while buf:
yield buf.popleft()
yield (True, True, None, None)
elif prefix == '- ':
while buf:
yield buf.popleft()
buf.append((True, False, None, None))
elif prefix == '+ ':
# Try to fold into a previous intraline edit quad, if one exists.
if len(buf) > 0:
old_line, new_line, old_detail, new_detail = buf[-1]
if not new_line and old_detail is not None:
# Previously saw a '-' and a '?'. Fold in this '+'.
assert not new_line
buf[-1] = (old_line, True, old_detail, None)
continue
# If there's no old_detail ('?'), then we aren't in an
# intraline edit. If there's a new_line, then the intraline
# edit is already finished. In either case, we want to do the
# default action of just adding the '+' on its own.
while len(buf) > 2:
yield buf.popleft()
buf.append((False, True, None, None))
elif prefix == '? ':
detail = parse_intra_annot(dl[2:])
# Add this detail to the previous buffered line. We may also need
# to merge a pair of previous '-' and '+' lines, if we didn't
# previously know that they were part of an intraline change quad.
assert len(buf) > 0
old_line, new_line, old_detail, new_detail = buf.pop()
if new_line:
if old_line:
# The previous line is a rollup of a '-' and a '+'.
# (Context lines are not included in the buffer.)
assert old_detail is not None
buf.append((True, True, old_detail, detail))
else:
# The previous line is just a '+'. There must be a '-'
# before it, so roll up both of those together with the new
# detail.
old_line2, new_line2, old_detail2, new_detail2 = buf.pop()
assert old_line2
assert not new_line2
assert old_detail2 is None
assert new_detail2 is None
buf.append((True, True, None, detail))
else:
# The previous line is just a '-'. Roll this detail into it.
# Next we should see a '+', which will get rolled in, so this
# bogus (True, False, [...], None) entry will never be yielded.
buf.append((True, False, detail, None))
# Flush any remaining buffered entries.
while buf:
yield buf.popleft()
def adjust_closing_brace(old_lines: List[str], new_lines: List[str],
diff: Iterable[DiffLine]) -> Iterator[DiffLine]:
'''Adjust the output of `diff_lines` to turn this:
fn f() {
...
+}
+fn g() {
+ ...
}
into this:
fn f() {
...
}
+fn g() {
+ ...
+}
'''
# Specifically: at the end of every run of insertions or deletions, if the
# first context line after the run consists of solely a '}' character (with
# whitespace), then we scan from the top of the run for an identical
# inserted line. If found, we change the earlier line from an insertion to
# context, and change the context line to an insertion.
mode = None
buf = []
buf_start = None
old_i = -1
new_i = -1
for dl in diff:
old_line, new_line, old_detail, new_detail = dl
if old_line and not new_line:
new_mode = 'del'
old_i += 1
elif not old_line and new_line:
new_mode = 'ins'
new_i += 1
else:
new_mode = None
old_i += 1
new_i += 1
if new_mode != mode:
if new_mode is None:
# Switching from ins or del mode to context mode. If the
# current line is a '}', we try to do the block adjustment.
check_lines = new_lines if mode == 'ins' else old_lines
i = new_i if mode == 'ins' else old_i
if check_lines[i].strip() == '}':
# Yield everything from buf, while scanning for an earlier
# matching line.
found_dl = None
for j, buf_dl in enumerate(buf):
if check_lines[buf_start + j] == check_lines[i]:
found_dl = buf_dl
yield (True, True, None, None)
# We're stopping early, so yield the remaining
# elements.
yield from buf[j + 1:]
break
else:
yield buf_dl
if found_dl:
yield found_dl
else:
yield (True, True, None, None)
else:
yield from buf
yield dl
mode = None
buf = []
buf_start = None
# We already yielded the correct info, so don't fall through to
# the default logic.
continue
else:
if mode is not None:
yield from buf
mode = new_mode
buf = []
buf_start = new_i if mode == 'ins' else old_i
if mode is None:
yield dl
else:
buf.append(dl)
# There are no more lines, so there can't be a `}` line following `buf` to
# trigger our heuristic. That means we can blindly dump everything in
# `buf`.
yield from buf
WORD_BREAK_RE = re.compile(r'\b')
def token_annot(line: Line) -> Annot[None]:
'''Annotate the tokens of `l`. Each token (and some sub-token strings)
gets a separate span. This is a helper function for
`calc_tokenized_intra`.'''
annot = fill_annot(line.highlight, len(line.text))
# Special cases: treat word boundaries inside strings and comments as token
# breaks. This essentially gives us the behavior of `git`'s `--word-diff`
# feature.
extra_cuts = []
for span in annot:
# We don't handle String subtypes (only String itself) because we don't
# want to break up `\x00` and similar escapes.
if span.label == pygments.token.String or \
span.label in pygments.token.Comment:
text = line.text[span.start : span.end]
for m in WORD_BREAK_RE.finditer(text):
extra_cuts.append(Point(span.start + m.start()))
return cut_annot_at_points(annot, extra_cuts)
def calc_tokenized_intra(l1: Line, l2: Line) -> Tuple[Annot[str], Annot[str]]:
'''Calculate token-based intraline edit annotations for `l1` and `l2`.
`difflib.ndiff` does a pretty good job of matching up similar lines, but it
computes intraline changes character-by-character, which often produces bad
results. For example, it might turn `unsafe` into `malloc` by replacing
`uns` -> `m` and `fe` -> `lloc`, instead of doing `unsafe` -> `malloc` in
one go.
Here we calculate some intraline edits that are easier to read, using the
tokenization provided by `pygments` to align edit boundaries to the
boundaries of source tokens.'''
annot1 = token_annot(l1)
annot2 = token_annot(l2)
tokens1 = [l1.text[s.start : s.end] for s in annot1]
tokens2 = [l2.text[s.start : s.end] for s in annot2]
intra1 = []
intra2 = []
sm = difflib.SequenceMatcher(a=tokens1, b=tokens2)
for tag, i1, i2, j1, j2 in sm.get_opcodes():
if tag == 'equal':
continue
while i1 < i2 and tokens1[i1].isspace():
i1 += 1
while i2 > i1 and tokens1[i2 - 1].isspace():
i2 -= 1
while j1 < j2 and tokens2[j1].isspace():
j1 += 1
while j2 > j1 and tokens2[j2 - 1].isspace():
j2 -= 1
if i1 != i2:
intra1.append(Span(annot1[i1].start, annot1[i2 - 1].end,
'chg' if tag == 'replace' else 'del'))
if j1 != j2:
intra2.append(Span(annot2[j1].start, annot2[j2 - 1].end,
'chg' if tag == 'replace' else 'ins'))
return (intra1, intra2)
def diff_files(f1: File, f2: File) -> Diff:
'''Diff two files, returning a `Diff` between them and also setting the
`intra` annotation on the lines of both files.'''
dls = diff_lines(f1.line_text, f2.line_text)
dls = adjust_closing_brace(f1.line_text, f2.line_text, dls)
# Accumulator for diff blocks.
diff_blocks = []
# Start and current position of the current block.
old_start = 0
old_cur = 0
new_start = 0
new_cur = 0
# Is the current block a change? (If not, it's context.)
changed = True
def flush():
nonlocal old_start, new_start
# This check means we can blindly call `flush()` without worrying about
# cluttering the output with zero-length blocks.
if old_cur - old_start > 0 or new_cur - new_start > 0:
diff_blocks.append(DiffBlock(changed,
Span(old_start, old_cur),
Span(new_start, new_cur)))
old_start = old_cur
new_start = new_cur
for old_line, new_line, old_detail, new_detail in dls:
next_changed = not (old_line and new_line and
old_detail is None and new_detail is None)
has_intra = old_detail is not None or new_detail is not None
if next_changed != changed:
flush()
if has_intra:
# Emit each `intra` line as its own block, to ensure they're
# aligned in the output.
flush()
intra1, intra2 = calc_tokenized_intra(
f1.lines[old_cur], f2.lines[new_cur])
if len(intra1) > 0:
f1.lines[old_cur].set_intra(intra1)
if len(intra2) > 0:
f2.lines[new_cur].set_intra(intra2)
flush()
if old_line:
old_cur += 1
if new_line:
new_cur += 1
changed = next_changed
flush()
return Diff(f1, f2, diff_blocks)
def context_annot(blocks: List[DiffBlock], new: bool, context_lines: int) -> Annot[None]:
'''Generate an annotation of the old or new file's lines, indicating which
lines are changes or context for changes (within `context_lines`
distance).'''
result = SpanMerger()
for (changed, old_span, new_span) in blocks:
if not changed:
continue
span = new_span if new else old_span
result.add(Span(
span.start - context_lines,
span.end + context_lines))
return result.finish()
def split_hunks(blocks: List[DiffBlock]) -> List[Hunk]:
'''Split the output of `filter_unchanged` into hunks, anywhere there's a
gap in the old or new line numbers.'''
last_old = 0
last_new = 0
cur = []
hunks = []
def flush():
nonlocal cur
if len(cur) > 0:
hunks.append(Hunk(cur))
cur = []
for b in blocks:
changed, old_span, new_span = b
if old_span.start != last_old or new_span.start != last_new:
flush()
cur.append(b)
last_old = old_span.end
last_new = new_span.end
flush()
return hunks
def annotate_blocks(blocks: List[DiffBlock]) \
-> Tuple[Annot[Span[None]], Annot[Span[None]]]:
'''Return annotations on the old and new files, labeling each line with the
block that contains it.'''
old = []
new = []
for b in blocks:
old.append(Span(b.old_span.start, b.old_span.end, b))
new.append(Span(b.new_span.start, b.new_span.end, b))
return old, new
def build_diff_hunks(d: Diff, context_diff: bool=True):
'''Build a list of output hunks, and assign it to `d.hunks`.
If `d.old_file` or `d.new_file` has a `keep_mark_lines` annotation, all
annotated lines will be kept as additional context.'''
# Find the set of lines each file wants to keep.
def calc_file_keep(f, is_new):
if context_diff:
keep = context_annot(d.blocks, is_new, 5)
if f.keep_mark_lines is not None:
keep = merge_annot(keep, f.keep_mark_lines)
else:
if len(f.line_annot) > 0:
keep = [Span(0, f.line_annot[-1].end)]
else:
keep = []
if f.drop_irrelevant_lines is not None:
keep = sub_annot(keep, f.drop_irrelevant_lines)
return keep
keep_old = calc_file_keep(d.old_file, False)
keep_new = calc_file_keep(d.new_file, True)
# In unchanged blocks, add each file's keep lines to the other file's set.
# This works because unchanged blocks have the same number of lines on each
# side.
old_blocks, new_blocks = annotate_blocks(d.blocks)
extra_keep_old = []
extra_keep_new = []
for block_span, keep_spans in cut_annot(keep_old, old_blocks):
if block_span.label.changed:
continue
base = block_span.label.new_span.start
extra_keep_new.extend(s + base for s in keep_spans)
for block_span, keep_spans in cut_annot(keep_new, new_blocks):
if block_span.label.changed:
continue
base = block_span.label.old_span.start
extra_keep_old.extend(s + base for s in keep_spans)
keep_old = merge_annot(keep_old, extra_keep_old)
keep_new = merge_annot(keep_new, extra_keep_new)
# For changed blocks, we can't match up lines from different files, so we
# just hope for the best. (Normally all changed lines are kept, so there's
# no need to match - the only exception is when the `irrelevant_*_regex`
# options are set.)
# Build the filtered list of blocks. There can be different numbers of
# blocks on the old and new sides. We use a fairly naive strategy to match
# them up, but it generally seems to work okay.
blocks = []
for (old_block, old_keeps), (new_block, new_keeps) in zip(
cut_annot(keep_old, old_blocks),
cut_annot(keep_new, new_blocks)):
# `old_blocks` and `new_blocks` have corresponding entries (based on
# the same block) at corresponding positions.
assert old_block.label is new_block.label
block = old_block.label
# Match up `old_keeps` and `new_keeps` entries by position. In most
# cases, the two lists will have the same length.
for old_keep, new_keep in zip(old_keeps, new_keeps):
blocks.append(DiffBlock(block.changed,
old_keep + block.old_span.start,
new_keep + block.new_span.start))
for old_keep in old_keeps[len(new_keeps):]:
blocks.append(DiffBlock(block.changed,
old_keep + block.old_span.start,
Span(block.new_span.end, block.new_span.end)))
for new_keep in new_keeps[len(old_keeps):]:
blocks.append(DiffBlock(block.changed,
Span(block.old_span.end, block.old_span.end),
new_keep + block.new_span.start))
# Split the new blocks into hunks, and save them in the `Diff`.
hunks = split_hunks(blocks)
d.set_hunks(hunks)
def hunk_output_lines(h: Hunk) -> List[OutputLine]:
result = []
for changed, old_span, new_span in h.blocks:
common_lines = min(len(old_span), len(new_span))
for i in range(0, common_lines):
result.append(OutputLine(changed, old_span.start + i, new_span.start + i))
for i in range(common_lines, len(old_span)):
result.append(OutputLine(changed, old_span.start + i, None))
for i in range(common_lines, len(new_span)):
result.append(OutputLine(changed, None, new_span.start + i))
return result
def build_output_lines(d: Diff):
'''Build a list of two-column output lines for each hunk of `d`, and set
the `Hunk.output_lines` fields.'''
for h in d.hunks:
output_lines = hunk_output_lines(h)
h.set_output_lines(output_lines)
| 37.638
| 89
| 0.580955
|
2e81de17de3944a25d618607675c46a2ec850257
| 971
|
py
|
Python
|
sunless_web/migrations/0022_auto_20180526_1619.py
|
bluedisk/SunlessSeaKo
|
1e6d498ff7e735b8d272dd0bca6c17741a2faedb
|
[
"MIT"
] | 2
|
2019-02-19T11:53:29.000Z
|
2021-02-18T23:57:20.000Z
|
sunless_web/migrations/0022_auto_20180526_1619.py
|
bluedisk/SunlessSeaKo
|
1e6d498ff7e735b8d272dd0bca6c17741a2faedb
|
[
"MIT"
] | 4
|
2018-05-26T13:18:27.000Z
|
2018-05-26T13:19:50.000Z
|
sunless_web/migrations/0022_auto_20180526_1619.py
|
bluedisk/SunlessSeaKo
|
1e6d498ff7e735b8d272dd0bca6c17741a2faedb
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.5 on 2018-05-26 07:19
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sunless_web', '0021_auto_20180526_1607'),
]
operations = [
migrations.RenameModel(
old_name='EntityFile',
new_name='EntityCate',
),
migrations.RenameField(
model_name='entitycate',
old_name='filename',
new_name='name',
),
migrations.RemoveField(
model_name='entity',
name='file',
),
migrations.AddField(
model_name='entity',
name='cate',
field=models.ForeignKey(default='events', on_delete=django.db.models.deletion.CASCADE,
related_name='entities', to='sunless_web.EntityCate', verbose_name='소속 파일'),
preserve_default=False,
),
]
| 28.558824
| 112
| 0.566426
|
010a03d5dde055ea5698c4114064a6fa6aa0f3a0
| 24,708
|
py
|
Python
|
openbb_terminal/cryptocurrency/crypto_controller.py
|
tehcoderer/GamestonkTerminal
|
54a1b6f545a0016c576e9e00eef5c003d229dacf
|
[
"MIT"
] | null | null | null |
openbb_terminal/cryptocurrency/crypto_controller.py
|
tehcoderer/GamestonkTerminal
|
54a1b6f545a0016c576e9e00eef5c003d229dacf
|
[
"MIT"
] | null | null | null |
openbb_terminal/cryptocurrency/crypto_controller.py
|
tehcoderer/GamestonkTerminal
|
54a1b6f545a0016c576e9e00eef5c003d229dacf
|
[
"MIT"
] | null | null | null |
"""Cryptocurrency Context Controller"""
__docformat__ = "numpy"
# pylint: disable=R0904, C0302, R1710, W0622, C0201, C0301
import argparse
import logging
import os
from typing import List
from binance.client import Client
from prompt_toolkit.completion import NestedCompleter
import openbb_terminal.config_terminal as cfg
from openbb_terminal import feature_flags as obbff
from openbb_terminal.cryptocurrency.cryptocurrency_helpers import (
FIND_KEYS,
display_all_coins,
find,
plot_chart,
)
from openbb_terminal.cryptocurrency.due_diligence import (
binance_model,
binance_view,
coinbase_model,
coinpaprika_view,
finbrain_crypto_view,
pycoingecko_model,
pycoingecko_view,
)
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
EXPORT_BOTH_RAW_DATA_AND_FIGURES,
EXPORT_ONLY_RAW_DATA_ALLOWED,
check_positive,
parse_known_args_and_warn,
)
from openbb_terminal.menu import session
from openbb_terminal.parent_classes import CryptoBaseController
from openbb_terminal.rich_config import console
# pylint: disable=import-outside-toplevel
logger = logging.getLogger(__name__)
CRYPTO_SOURCES = {
"bin": "Binance",
"cg": "CoinGecko",
"cp": "CoinPaprika",
"cb": "Coinbase",
"yf": "YahooFinance",
}
class CryptoController(CryptoBaseController):
"""Crypto Controller"""
CHOICES_COMMANDS = [
"headlines",
"chart",
"load",
"find",
"prt",
"resources",
]
CHOICES_MENUS = [
"ta",
"dd",
"ov",
"disc",
"onchain",
"defi",
"tools",
"nft",
"pred",
"qa",
]
DD_VIEWS_MAPPING = {
"cg": pycoingecko_view,
"cp": coinpaprika_view,
"bin": binance_view,
}
PATH = "/crypto/"
FILE_PATH = os.path.join(os.path.dirname(__file__), "README.md")
def __init__(self, queue: List[str] = None):
"""Constructor"""
super().__init__(queue)
if session and obbff.USE_PROMPT_TOOLKIT:
choices: dict = {c: {} for c in self.controller_choices}
choices["load"]["--source"] = {c: {} for c in CRYPTO_SOURCES.keys()}
choices["find"]["--source"] = {c: {} for c in CRYPTO_SOURCES.keys()}
choices["find"]["-k"] = {c: {} for c in FIND_KEYS}
choices["headlines"] = {c: {} for c in finbrain_crypto_view.COINS}
# choices["prt"]["--vs"] = {c: {} for c in coingecko_coin_ids} # list is huge. makes typing buggy
choices["support"] = self.SUPPORT_CHOICES
self.completer = NestedCompleter.from_nested_dict(choices)
def print_help(self):
"""Print help"""
source_txt = CRYPTO_SOURCES.get(self.source, "?") if self.source != "" else ""
has_ticker_start = "" if self.coin else "[unvl]"
has_ticker_end = "" if self.coin else "[/unvl]"
help_text = f"""[cmds]
load load a specific cryptocurrency for analysis
find find coins[/cmds]
[param]Coin: [/param]{self.coin}
[param]Source: [/param]{source_txt}
[cmds]
headlines crypto sentiment from 15+ major news headlines [src][Finbrain][/src]{has_ticker_start}
chart view a candle chart for a specific cryptocurrency
prt potential returns tool - check how much upside if ETH reaches BTC market cap{has_ticker_end}
[/cmds][menu]
> disc discover trending cryptocurrencies, e.g.: top gainers, losers, top sentiment
> ov overview of the cryptocurrencies, e.g.: market cap, DeFi, latest news, top exchanges, stables
> onchain information on different blockchains, e.g.: eth gas fees, whale alerts, DEXes info
> defi decentralized finance information, e.g.: dpi, llama, tvl, lending, borrow, funding
> tools explore different tools e.g.: apytoapr, il
> nft non-fungible tokens, e.g.: today drops{has_ticker_start}
> dd due-diligence for loaded coin, e.g.: coin information, social media, market stats
> ta technical analysis for loaded coin, e.g.: ema, macd, rsi, adx, bbands, obv
> pred prediction techniques, e.g.: regression, arima, rnn, lstm, conv1d, monte carlo
> qa quantitative analysis e.g.: decompose, cusum, residuals analysis[/menu]
{has_ticker_end}
"""
console.print(text=help_text, menu="Cryptocurrency")
@log_start_end(log=logger)
def call_prt(self, other_args):
"""Process prt command"""
if self.coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="prt",
description="Potential Returns Tool"
"Tool to check returns if loaded coin reaches provided price or other crypto market cap"
"Uses CoinGecko to grab coin data (price and market cap).",
)
parser.add_argument(
"--vs",
help="Coin to compare with",
dest="vs",
type=str,
required="-h" not in other_args,
)
parser.add_argument(
"-p",
"--price",
help="Desired price",
dest="price",
type=int,
default=None,
)
parser.add_argument(
"-t",
"--top",
help="Compare with top N coins",
dest="top",
type=int,
default=None,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "--vs")
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
if ns_parser.vs:
coin_found = pycoingecko_model.check_coin(ns_parser.vs)
if not coin_found:
console.print(
f"VS Coin '{ns_parser.vs}' not found in CoinGecko\n"
)
return
pycoingecko_view.display_coin_potential_returns(
self.coin_map_df["CoinGecko"],
coin_found,
ns_parser.top,
ns_parser.price,
)
else:
console.print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
@log_start_end(log=logger)
def call_chart(self, other_args):
"""Process chart command"""
if self.coin:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="chart",
description="""Display chart for loaded coin. You can specify currency vs which you want
to show chart and also number of days to get data for.""",
)
if self.source == "cp":
parser.add_argument(
"--vs",
default="usd",
dest="vs",
help="Currency to display vs coin",
choices=["usd", "btc", "BTC", "USD"],
type=str,
)
parser.add_argument(
"-d",
"--days",
default=365,
dest="days",
help="Number of days to get data for",
type=check_positive,
)
if self.source == "cg":
parser.add_argument(
"--vs", default="usd", dest="vs", help="Currency to display vs coin"
)
parser.add_argument(
"-d",
"--days",
default=30,
dest="days",
help="Number of days to get data for",
)
if self.source == "bin":
client = Client(cfg.API_BINANCE_KEY, cfg.API_BINANCE_SECRET)
interval_map = {
"1day": client.KLINE_INTERVAL_1DAY,
"3day": client.KLINE_INTERVAL_3DAY,
"1hour": client.KLINE_INTERVAL_1HOUR,
"2hour": client.KLINE_INTERVAL_2HOUR,
"4hour": client.KLINE_INTERVAL_4HOUR,
"6hour": client.KLINE_INTERVAL_6HOUR,
"8hour": client.KLINE_INTERVAL_8HOUR,
"12hour": client.KLINE_INTERVAL_12HOUR,
"1week": client.KLINE_INTERVAL_1WEEK,
"1min": client.KLINE_INTERVAL_1MINUTE,
"3min": client.KLINE_INTERVAL_3MINUTE,
"5min": client.KLINE_INTERVAL_5MINUTE,
"15min": client.KLINE_INTERVAL_15MINUTE,
"30min": client.KLINE_INTERVAL_30MINUTE,
"1month": client.KLINE_INTERVAL_1MONTH,
}
_, quotes = binance_model.show_available_pairs_for_given_symbol(
self.coin
)
parser.add_argument(
"--vs",
help="Quote currency (what to view coin vs)",
dest="vs",
type=str,
default="USDT",
choices=quotes,
)
parser.add_argument(
"-i",
"--interval",
help="Interval to get data",
choices=list(interval_map.keys()),
dest="interval",
default="1day",
type=str,
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
default=100,
help="Number to get",
type=check_positive,
)
if self.source == "cb":
interval_map = {
"1min": 60,
"5min": 300,
"15min": 900,
"1hour": 3600,
"6hour": 21600,
"24hour": 86400,
"1day": 86400,
}
_, quotes = coinbase_model.show_available_pairs_for_given_symbol(
self.coin
)
if len(quotes) < 0:
console.print(
f"Couldn't find any quoted coins for provided symbol {self.coin}"
)
return
parser.add_argument(
"--vs",
help="Quote currency (what to view coin vs)",
dest="vs",
type=str,
default="USDT" if "USDT" in quotes else quotes[0],
choices=quotes,
)
parser.add_argument(
"-i",
"--interval",
help="Interval to get data",
choices=list(interval_map.keys()),
dest="interval",
default="1day",
type=str,
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
default=100,
help="Number to get",
type=check_positive,
)
if self.source == "yf":
interval_map = {
"1min": "1m",
"2min": "2m",
"5min": "5m",
"15min": "15m",
"30min": "30m",
"60min": "60m",
"90min": "90m",
"1hour": "1h",
"1day": "1d",
"5day": "5d",
"1week": "1wk",
"1month": "1mo",
"3month": "3mo",
}
parser.add_argument(
"--vs",
default="USD",
dest="vs",
help="Currency to display vs coin",
choices=[
"CAD",
"CNY",
"ETH",
"EUR",
"GBP",
"INR",
"JPY",
"KRW",
"RUB",
"USD",
"AUD",
"BTC",
],
type=str,
)
parser.add_argument(
"-i",
"--interval",
help="Interval to get data",
choices=list(interval_map.keys()),
dest="interval",
default="1day",
type=str,
)
parser.add_argument(
"-l",
"--limit",
dest="limit",
default=100,
help="Number to get",
type=check_positive,
)
parser.add_argument(
"-d",
"--days",
default=30,
dest="days",
help="Number of days to get data for",
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
if self.source in ["bin", "cb"]:
limit = ns_parser.limit
interval = ns_parser.interval
days = 0
elif self.source == "yf":
limit = ns_parser.limit
interval = interval_map[ns_parser.interval]
days = ns_parser.days
else:
limit = 0
interval = "1day"
days = ns_parser.days
plot_chart(
coin_map_df=self.coin_map_df,
limit=limit,
interval=interval,
days=days,
currency=ns_parser.vs,
source=self.source,
)
@log_start_end(log=logger)
def call_ta(self, _):
"""Process ta command"""
from openbb_terminal.cryptocurrency.technical_analysis.ta_controller import (
TechnicalAnalysisController,
)
# TODO: Play with this to get correct usage
if self.coin:
if self.current_currency != "" and not self.current_df.empty:
self.queue = self.load_class(
TechnicalAnalysisController,
stock=self.current_df,
coin=self.symbol,
start=self.current_df.index[0],
interval="",
queue=self.queue,
)
else:
console.print("No coin selected. Use 'load' to load a coin.\n")
@log_start_end(log=logger)
def call_tools(self, _):
"""Process tools command"""
from openbb_terminal.cryptocurrency.tools.tools_controller import (
ToolsController,
)
self.queue = self.load_class(ToolsController, self.queue)
@log_start_end(log=logger)
def call_disc(self, _):
"""Process disc command"""
from openbb_terminal.cryptocurrency.discovery.discovery_controller import (
DiscoveryController,
)
self.queue = self.load_class(DiscoveryController, self.queue)
@log_start_end(log=logger)
def call_ov(self, _):
"""Process ov command"""
from openbb_terminal.cryptocurrency.overview.overview_controller import (
OverviewController,
)
self.queue = self.load_class(OverviewController, self.queue)
@log_start_end(log=logger)
def call_defi(self, _):
"""Process defi command"""
from openbb_terminal.cryptocurrency.defi.defi_controller import DefiController
self.queue = self.load_class(DefiController, self.queue)
@log_start_end(log=logger)
def call_headlines(self, other_args):
"""Process sentiment command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="headlines",
description="""Display sentiment analysis from FinBrain for chosen Cryptocurrencies""",
)
parser.add_argument(
"-c",
"--coin",
default="BTC",
type=str,
dest="coin",
help="Symbol of coin to load data for, ~100 symbols are available",
choices=finbrain_crypto_view.COINS,
)
if other_args and "-" not in other_args[0][0]:
other_args.insert(0, "-c")
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
finbrain_crypto_view.display_crypto_sentiment_analysis(
coin=ns_parser.coin, export=ns_parser.export
)
@log_start_end(log=logger)
def call_dd(self, _):
"""Process dd command"""
if self.coin:
from openbb_terminal.cryptocurrency.due_diligence.dd_controller import (
DueDiligenceController,
)
self.queue = self.load_class(
DueDiligenceController,
self.coin,
self.source,
self.symbol,
self.coin_map_df,
queue=self.queue,
)
else:
console.print("No coin selected. Use 'load' to load a coin.\n")
@log_start_end(log=logger)
def call_qa(self, _):
"""Process pred command"""
if self.coin:
from openbb_terminal.cryptocurrency.quantitative_analysis import (
qa_controller,
)
if self.current_interval != "1day":
console.print("Only interval `1day` is possible for now.\n")
else:
self.queue = self.load_class(
qa_controller.QaController,
self.coin,
self.current_df,
self.queue,
)
@log_start_end(log=logger)
def call_pred(self, _):
"""Process pred command"""
if obbff.ENABLE_PREDICT:
if self.coin:
try:
from openbb_terminal.cryptocurrency.prediction_techniques import (
pred_controller,
)
if self.current_interval != "1day":
console.print("Only interval `1day` is possible for now.\n")
else:
self.queue = self.load_class(
pred_controller.PredictionTechniquesController,
self.coin,
self.current_df,
self.queue,
)
except ImportError:
logger.exception("Tensorflow not available")
console.print("[red]Run pip install tensorflow to continue[/red]\n")
else:
console.print(
"No coin selected. Use 'load' to load the coin you want to look at.\n"
)
else:
console.print(
"Predict is disabled. Check ENABLE_PREDICT flag on feature_flags.py",
"\n",
)
@log_start_end(log=logger)
def call_onchain(self, _):
"""Process onchain command"""
from openbb_terminal.cryptocurrency.onchain.onchain_controller import (
OnchainController,
)
self.queue = self.load_class(OnchainController, self.queue)
@log_start_end(log=logger)
def call_nft(self, _):
"""Process nft command"""
from openbb_terminal.cryptocurrency.nft.nft_controller import NFTController
self.queue = self.load_class(NFTController, self.queue)
# TODO: merge the two views that this command calls. (find + previously called coins)
@log_start_end(log=logger)
def call_find(self, other_args):
"""Process find command"""
parser = argparse.ArgumentParser(
prog="find",
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""
Find similar coin by name, symbol, or id. If you don't remember exact name or id of the Coin at CoinGecko,
Binance, Coinbase or CoinPaprika you can use this command to display coins with similar name, symbol or id
to your search query.
Example of usage: coin name is something like "polka". So I can try: find -c polka -k name -t 25
It will search for coin that has similar name to polka and display top 25 matches.
-c, --coin stands for coin - you provide here your search query
-k, --key it's a searching key. You can search by symbol, id or name of coin
-l, --limit it displays top N number of records.
coins: Shows list of coins available on CoinGecko, CoinPaprika and Binance.If you provide name of
coin then in result you will see ids of coins with best match for all mentioned services.
If you provide ALL keyword in your search query, then all coins will be displayed. To move over coins you
can use pagination mechanism with skip, top params. E.g. coins ALL --skip 100 --limit 30 then all coins
from 100 to 130 will be displayed. By default skip = 0, limit = 10.
If you won't provide source of the data everything will be displayed (CoinGecko, CoinPaprika, Binance).
If you want to search only in given source then use --source flag. E.g. if you want to find coin with name
uniswap on CoinPaprika then use: coins uniswap --source cp --limit 10
""",
)
parser.add_argument(
"-c",
"--coin",
help="Symbol Name or Id of Coin",
dest="coin",
required="-h" not in other_args,
type=str,
)
parser.add_argument(
"-k",
"--key",
dest="key",
help="Specify by which column you would like to search: symbol, name, id",
type=str,
choices=FIND_KEYS,
default="symbol",
)
parser.add_argument(
"-l",
"--limit",
default=10,
dest="limit",
help="Number of records to display",
type=check_positive,
)
parser.add_argument(
"--source",
dest="source",
choices=CRYPTO_SOURCES.keys(),
default="cg",
help="Source of data.",
type=str,
)
parser.add_argument(
"-s",
"--skip",
default=0,
dest="skip",
help="Skip n of records",
type=check_positive,
)
if other_args and not other_args[0][0] == "-":
other_args.insert(0, "-c")
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
# TODO: merge find + display_all_coins
if ns_parser:
find(
coin=ns_parser.coin,
source=ns_parser.source,
key=ns_parser.key,
top=ns_parser.limit,
export=ns_parser.export,
)
display_all_coins(
coin=ns_parser.coin,
source=ns_parser.source,
top=ns_parser.limit,
skip=ns_parser.skip,
show_all=bool("ALL" in other_args),
export=ns_parser.export,
)
| 35.449067
| 118
| 0.496236
|
732afe7bb5014bc6299202562ff7bd0adc01d8c7
| 2,414
|
py
|
Python
|
test/azure/version-tolerant/Expected/AcceptanceTests/CustomUrlPagingVersionTolerant/custombaseurlpagingversiontolerant/_configuration.py
|
changlong-liu/autorest.python
|
1f03e4c6a11934d385fab050dc44041f1e91e9ff
|
[
"MIT"
] | null | null | null |
test/azure/version-tolerant/Expected/AcceptanceTests/CustomUrlPagingVersionTolerant/custombaseurlpagingversiontolerant/_configuration.py
|
changlong-liu/autorest.python
|
1f03e4c6a11934d385fab050dc44041f1e91e9ff
|
[
"MIT"
] | null | null | null |
test/azure/version-tolerant/Expected/AcceptanceTests/CustomUrlPagingVersionTolerant/custombaseurlpagingversiontolerant/_configuration.py
|
changlong-liu/autorest.python
|
1f03e4c6a11934d385fab050dc44041f1e91e9ff
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from ._version import VERSION
class AutoRestParameterizedHostTestPagingClientConfiguration(
Configuration
): # pylint: disable=too-many-instance-attributes
"""Configuration for AutoRestParameterizedHostTestPagingClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param host: A string value that is used as a global part of the parameterized host.
:type host: str
"""
def __init__(self, host: str = "host", **kwargs: Any) -> None:
super(AutoRestParameterizedHostTestPagingClientConfiguration, self).__init__(**kwargs)
if host is None:
raise ValueError("Parameter 'host' must not be None.")
self.host = host
kwargs.setdefault("sdk_moniker", "autorestparameterizedhosttestpagingclient/{}".format(VERSION))
self._configure(**kwargs)
def _configure(
self, **kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
| 47.333333
| 108
| 0.686827
|
cc7b1f0eac367bcd0aba12f3acc8c89875634cf6
| 10,655
|
py
|
Python
|
madrona/kmlapp/views.py
|
movermeyer/madrona
|
fcdced0a03408754b88a3d88f416e04d500c32d4
|
[
"BSD-3-Clause"
] | 9
|
2015-03-09T11:04:21.000Z
|
2022-01-16T09:45:36.000Z
|
madrona/kmlapp/views.py
|
movermeyer/madrona
|
fcdced0a03408754b88a3d88f416e04d500c32d4
|
[
"BSD-3-Clause"
] | 1
|
2020-04-24T14:38:43.000Z
|
2020-04-24T14:38:43.000Z
|
madrona/kmlapp/views.py
|
movermeyer/madrona
|
fcdced0a03408754b88a3d88f416e04d500c32d4
|
[
"BSD-3-Clause"
] | 2
|
2016-12-06T15:31:35.000Z
|
2018-03-04T20:04:44.000Z
|
from django.shortcuts import render_to_response
from django.contrib.auth.models import *
from django.template.loader import get_template
from django.template import Context
from django.http import HttpResponse, Http404
from madrona.common import default_mimetypes as mimetypes
from madrona.common import utils
from django.http import Http404
from madrona.common.utils import load_session, get_logger
from django.contrib.gis.db import models
from django.core.exceptions import FieldError
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from madrona.features import get_feature_models, get_collection_models, get_feature_by_uid
from madrona.features.models import FeatureCollection, Feature
try:
set
except NameError:
from sets import Set as set
log = get_logger()
def get_styles(features, collections, links=True):
"""
Based on which features and collection are provided,
the styles for all features are determined here
"""
models = []
models.extend([f.kml_style for f in features])
models.extend([c.kml_style for c in collections])
if not links:
# Collections will be represented by Folders, not NetworkLinks
# So every feature n the entire tree will be in this KML Doc
# We need to recurse down to determine what's in there
for c in collections:
children = c.feature_set(recurse=True)
models.extend([child.kml_style for child in children])
unique_set = set(models)
return list(unique_set)
def get_user_data(user):
"""
Organizes user's Features and FeatureCollections.
Only returns objects owned by user, not shared
Returns only the features/collections at the top level,
nested child features will be handled later through
recursive calls to feature_set.
"""
toplevel_features = []
toplevel_collections = []
for fmodel in get_feature_models():
unattached = list(fmodel.objects.filter(user=user, content_type=None, object_id=None))
toplevel_features.extend(unattached)
for cmodel in get_collection_models():
collections_top = list(cmodel.objects.filter(user=user, content_type=None, object_id=None))
toplevel_collections.extend(collections_top)
return toplevel_features, toplevel_collections
def get_data_for_feature(user, uid):
try:
f = get_feature_by_uid(uid)
except:
return False, HttpResponse("Feature %s does not exist" % uid, status=404)
viewable, response = f.is_viewable(user)
if not viewable:
return viewable, response
features = []
collections = []
if isinstance(f, FeatureCollection):
obj_id = f.pk
ct = ContentType.objects.get_for_model(f.__class__)
for fmodel in get_feature_models():
unattached = list(fmodel.objects.filter(content_type=ct,object_id=obj_id))
features.extend(unattached)
for cmodel in get_collection_models():
collections_top = list(cmodel.objects.filter(content_type=ct,object_id=obj_id))
collections.extend(collections_top)
elif isinstance(f, Feature):
features.append(f)
return features, collections
def get_public_data():
"""
No login necessary, everyone sees these
Public groups defined in settings.SHARING_TO_PUBLIC_GROUPS
"""
from django.conf import settings
public_groups = Group.objects.filter(name__in=settings.SHARING_TO_PUBLIC_GROUPS)
features = []
collections = []
for fmodel in get_feature_models():
unattached = list(fmodel.objects.filter(sharing_groups__in=public_groups))
features.extend(unattached)
for cmodel in get_collection_models():
collections_top = list(cmodel.objects.filter(sharing_groups__in=public_groups))
collections.extend(collections_top)
return features, collections
def get_shared_data(shareuser, sharegroup, user):
sg = Group.objects.get(pk=sharegroup)
su = User.objects.get(pk=shareuser)
features = []
collections = []
for fmodel in get_feature_models():
# Find top level features shared with user
# top-level == not belonging to any collections
# have to use content_type and object_id fields to determine
unattached = list(
fmodel.objects.shared_with_user(user,filter_groups=[sg])
.filter(user=su, content_type=None,object_id=None)
)
features.extend(unattached)
for cmodel in get_collection_models():
collections_top = list(
cmodel.objects.shared_with_user(user,filter_groups=[sg])
.filter(user=su, content_type=None,object_id=None)
)
collections.extend(collections_top)
return features, collections
def create_kmz(kml, zippath):
"""
Given a KML string and a "/" seperated path like "FOLDERNAME/doc.kml",
creates a zipped KMZ archive buffer that can be written directly to a
django response object
"""
import tempfile
from cStringIO import StringIO
import zipfile
# write out the kml to tempfile
#The Problem: for Windows, we need to close the file before we can access it again below (via zipout.write)
# this caused a Permissions Error when running from the local dev server (on Windows)
# as Windows considered the unclosed file to already be in use (and therefore unaccessible)
#The Solution: adding 'delete=False' to tempfile.NamedTemporaryFiles for developing environments using Python 2.6(sf 2-16-10)
# this will only happen if the user is using Python 2.6, previous versions of Python will treat the code as it was
# (this delete parameter isn't available until python 2.6)
#if the development environment is using 2.5 or earlier, then the temp file will still be closed via kmlfile.close()
#if the development environment is using 2.6 then the temporary file is deleted manually via os.unlink(kmlfile.name) (see below)
#This was reported (and perhaps more fully explained) in Issue 263
python26 = True
try:
kmlfile = tempfile.NamedTemporaryFile(delete=False)
except:
kmlfile = tempfile.NamedTemporaryFile()
python26 = False
kmlfile.write(kml.encode('utf-8'))
kmlfile.flush()
if python26:
kmlfile.close()
# zip it up into a kmz
kmzbuffer = StringIO()
zipout = zipfile.ZipFile(kmzbuffer,'w',zipfile.ZIP_DEFLATED)
zipout.write(kmlfile.name, zippath.encode('ascii'))
zipout.close()
# close out the tempfile
if python26:
import os
os.unlink(kmlfile.name)
else:
kmlfile.close()
# grab the content of the stringIO buffer
kmz = kmzbuffer.getvalue()
# close out the stringIO buffer
kmzbuffer.close()
return kmz
from django.views.decorators.cache import cache_control
@cache_control(no_cache=True)
def create_kml(request, input_username=None, input_uid=None,
input_shareuser=None, input_sharegroup=None, links=False, kmz=False,
session_key='0'):
"""
Returns a KML/KMZ containing Feautures/FeatureCollections owned by user
"""
load_session(request, session_key)
user = request.user
if input_username and user.username != input_username:
log.warn("Input username from URL is %r but request.user.username is %r" % (input_username, user.username))
return HttpResponse('Access denied', status=401)
if input_username:
features, collections = get_user_data(user)
elif input_uid:
features, collections = get_data_for_feature(user, input_uid)
elif input_shareuser and input_sharegroup:
features, collections = get_shared_data(input_shareuser, input_sharegroup, user)
else:
raise Http404
if not features and isinstance(collections, HttpResponse):
return collections # We got an http error going on
styles = get_styles(features,collections,links)
t = get_template('kmlapp/myshapes.kml')
context = Context({
'user': user,
'features': features,
'collections': collections,
'use_network_links': links,
'request_path': request.path,
'styles': styles,
'session_key': session_key,
'shareuser': input_shareuser,
'sharegroup': input_sharegroup,
'feature_id': input_uid,
})
kml = t.render(context)
mime = mimetypes.KML
if kmz:
mime = mimetypes.KMZ
kml = create_kmz(kml, 'mm/doc.kml')
response = HttpResponse(kml, mimetype=mime)
response['Content-Disposition'] = 'attachment'
return response
@cache_control(no_cache=True)
def create_shared_kml(request, input_username, kmz=False, session_key='0'):
"""
Returns a KML/KMZ containing shared MPAs (organized into folders by groups and users who have shared them)
"""
load_session(request, session_key)
user = request.user
if input_username and user.username != input_username:
return HttpResponse('Access denied', status=401)
from madrona.features import groups_users_sharing_with
sharing_with = groups_users_sharing_with(user)
t = get_template('kmlapp/shared.kml')
kml = t.render(Context({'user': request.user, 'groups_users': sharing_with, 'request_path': request.path, 'session_key': session_key}))
mime = mimetypes.KML
if kmz:
mime = mimetypes.KMZ
kml = create_kmz(kml, 'mm/doc.kml')
response = HttpResponse(kml, mimetype=mime)
response['Content-Disposition'] = 'attachment'
return response
def shared_public(request, kmz=False, session_key='0'):
"""
Shows all publically shared arrays
Must be shared with a special set of public groups
defined in settings.SHARING_TO_PUBLIC_GROUPS
"""
load_session(request, session_key)
user = request.user
features, collections = get_public_data()
styles = get_styles(features,collections)
# determine content types for sharing
t = get_template('kmlapp/public.kml')
kml = t.render(Context({'loggedin_user': request.user, 'user': request.user,
'features': features, 'collections': collections, 'styles': styles,
'use_network_links': True, 'request_path': request.path,
'session_key': session_key}))
mime = mimetypes.KML
if kmz:
mime = mimetypes.KMZ
kml = create_kmz(kml, 'mm/doc.kml')
response = HttpResponse(kml, mimetype=mime)
response['Content-Disposition'] = 'attachment'
return response
| 37.125436
| 139
| 0.694322
|
c3da59bec46bde629a6b2bc0722b66276cc27a3f
| 22,405
|
py
|
Python
|
UMLRT2Kiltera_MM/transformation_reduced/Himesis/HState2HProcDef.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
UMLRT2Kiltera_MM/transformation_reduced/Himesis/HState2HProcDef.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
UMLRT2Kiltera_MM/transformation_reduced/Himesis/HState2HProcDef.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
from core.himesis import Himesis
import cPickle as pickle
from uuid import UUID
class HState2HProcDef(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HState2HProcDef.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HState2HProcDef, self).__init__(name='HState2HProcDef', num_nodes=120, edges=[])
# Add the edges
self.add_edges([(1, 92), (92, 8), (8, 93), (93, 18), (8, 94), (94, 19), (8, 95), (95, 20), (8, 96), (96, 11), (11, 97), (97, 15), (15, 98), (98, 7), (11, 99), (99, 16), (16, 100), (100, 9), (9, 101), (101, 13), (9, 102), (102, 12), (12, 103), (103, 17), (17, 104), (104, 14), (44, 32), (32, 80), (45, 33), (33, 81), (46, 34), (34, 82), (47, 35), (35, 83), (48, 36), (36, 84), (49, 37), (37, 85), (50, 38), (38, 86), (51, 39), (39, 87), (52, 40), (40, 88), (53, 41), (41, 89), (54, 42), (42, 90), (55, 43), (43, 91), (8, 21), (21, 57), (18, 22), (22, 58), (19, 23), (23, 59), (20, 24), (24, 60), (15, 25), (25, 61), (16, 26), (26, 62), (13, 27), (27, 63), (17, 28), (28, 64), (14, 29), (29, 65), (1, 30), (30, 66), (11, 31), (31, 67), (5, 0), (0, 105), (0, 106), (0, 107), (0, 108), (0, 109), (0, 110), (0, 111), (0, 112), (0, 113), (0, 114), (0, 115), (0, 116), (0, 117), (0, 118), (0, 119), (105, 1), (110, 1), (1, 10), (6, 2), (2, 4), (4, 3), (3, 56), (10, 4), (6, 5), (113, 11), (117, 12), (116, 13), (119, 14), (44, 68), (45, 69), (46, 70), (47, 71), (48, 72), (49, 73), (50, 74), (51, 75), (52, 76), (53, 77), (54, 78), (55, 79), (111, 7), (106, 8), (107, 18), (108, 19), (109, 20), (112, 15), (114, 16), (115, 9), (118, 17), (68, 56), (69, 57), (70, 58), (71, 59), (72, 60), (73, 61), (74, 62), (75, 63), (76, 64), (77, 65), (78, 66), (79, 67)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'UMLRT2Kiltera_MM'
p2
a.""")
self["name"] = """State2HProcDef"""
self["GUID__"] = UUID('c9b98a9f-3ced-495d-abed-18098af469ce')
# Set the node attributes
self.vs[0]["mm__"] = """ApplyModel"""
self.vs[0]["GUID__"] = UUID('c4ebe780-4dda-4193-abfe-1cf60931cf95')
self.vs[1]["name"] = """localdef1"""
self.vs[1]["classtype"] = """LocalDef"""
self.vs[1]["mm__"] = """LocalDef"""
self.vs[1]["cardinality"] = """1"""
self.vs[1]["GUID__"] = UUID('957094bb-7101-47b7-8704-df11182d4402')
self.vs[2]["mm__"] = """match_contains"""
self.vs[2]["GUID__"] = UUID('f614a131-19c7-4f60-9764-94c946e58207')
self.vs[3]["mm__"] = """hasAttribute_S"""
self.vs[3]["GUID__"] = UUID('b8863e77-6af6-4271-83cf-f837da316564')
self.vs[4]["name"] = """state1"""
self.vs[4]["classtype"] = """State"""
self.vs[4]["mm__"] = """State"""
self.vs[4]["cardinality"] = """+"""
self.vs[4]["GUID__"] = UUID('264c4013-8001-4c3b-87e6-e9e679dd7843')
self.vs[5]["mm__"] = """paired_with"""
self.vs[5]["GUID__"] = UUID('527cf59b-09cc-4ab5-8c6e-c91f1ec6c212')
self.vs[6]["mm__"] = """MatchModel"""
self.vs[6]["GUID__"] = UUID('c565c79c-1e9a-4b86-b3c1-d25278fbd0c4')
self.vs[7]["name"] = """null1"""
self.vs[7]["classtype"] = """Null"""
self.vs[7]["mm__"] = """Null"""
self.vs[7]["cardinality"] = """1"""
self.vs[7]["GUID__"] = UUID('88e3915a-a880-4a3d-ad55-c9b5e288cd07')
self.vs[8]["name"] = """procdef1"""
self.vs[8]["classtype"] = """ProcDef"""
self.vs[8]["mm__"] = """ProcDef"""
self.vs[8]["cardinality"] = """1"""
self.vs[8]["GUID__"] = UUID('7e6e52ca-30de-4478-81e9-7a13d925b974')
self.vs[9]["name"] = """seq1"""
self.vs[9]["classtype"] = """Seq"""
self.vs[9]["mm__"] = """Seq"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = UUID('45ca5c92-26e5-4b34-a335-80845327541d')
self.vs[10]["type"] = """ruleDef"""
self.vs[10]["mm__"] = """backward_link"""
self.vs[10]["GUID__"] = UUID('1aca5e8b-b626-4a91-beed-410f6fee460a')
self.vs[11]["name"] = """listen1"""
self.vs[11]["classtype"] = """Listen"""
self.vs[11]["mm__"] = """Listen"""
self.vs[11]["cardinality"] = """1"""
self.vs[11]["GUID__"] = UUID('cee60ea3-e4d6-4411-b496-1a33aa9835a5')
self.vs[12]["name"] = """listen2"""
self.vs[12]["classtype"] = """Listen"""
self.vs[12]["mm__"] = """Listen"""
self.vs[12]["cardinality"] = """1"""
self.vs[12]["GUID__"] = UUID('acda3dff-f44d-4a8d-b6dc-b1ff936a1089')
self.vs[13]["name"] = """triggerT1"""
self.vs[13]["classtype"] = """Trigger_T"""
self.vs[13]["mm__"] = """Trigger_T"""
self.vs[13]["cardinality"] = """1"""
self.vs[13]["GUID__"] = UUID('eb493f3e-981f-4fb5-9ad3-b1a1fb5782b3')
self.vs[14]["name"] = """triggerT2"""
self.vs[14]["classtype"] = """Trigger_T"""
self.vs[14]["mm__"] = """Trigger_T"""
self.vs[14]["cardinality"] = """1"""
self.vs[14]["GUID__"] = UUID('f8618612-2d1d-48f4-baaf-d0aa71e0b095')
self.vs[15]["name"] = """listenbranch1"""
self.vs[15]["classtype"] = """ListenBranch"""
self.vs[15]["mm__"] = """ListenBranch"""
self.vs[15]["cardinality"] = """1"""
self.vs[15]["GUID__"] = UUID('c1c067f2-5eb4-4ee5-96be-49a363368dc5')
self.vs[16]["name"] = """listenbranch2"""
self.vs[16]["classtype"] = """ListenBranch"""
self.vs[16]["mm__"] = """ListenBranch"""
self.vs[16]["cardinality"] = """1"""
self.vs[16]["GUID__"] = UUID('4754c4f4-fdeb-49d0-b521-6ef5f2ae3f4c')
self.vs[17]["name"] = """listenbranch3"""
self.vs[17]["classtype"] = """ListenBranch"""
self.vs[17]["mm__"] = """ListenBranch"""
self.vs[17]["cardinality"] = """1"""
self.vs[17]["GUID__"] = UUID('07ec92c0-7650-4a8e-98f8-1dfa5f3ce055')
self.vs[18]["name"] = """name1"""
self.vs[18]["classtype"] = """Name"""
self.vs[18]["mm__"] = """Name"""
self.vs[18]["cardinality"] = """1"""
self.vs[18]["GUID__"] = UUID('1d7083d4-f529-4d56-9f67-072c911be12d')
self.vs[19]["name"] = """name2"""
self.vs[19]["classtype"] = """Name"""
self.vs[19]["mm__"] = """Name"""
self.vs[19]["cardinality"] = """1"""
self.vs[19]["GUID__"] = UUID('9165afaa-5baa-4ed4-8cf8-c719572bdbfe')
self.vs[20]["name"] = """name3"""
self.vs[20]["classtype"] = """Name"""
self.vs[20]["mm__"] = """Name"""
self.vs[20]["cardinality"] = """1"""
self.vs[20]["GUID__"] = UUID('213bf0fe-e6fc-49ef-bc93-ca2d477eb185')
self.vs[21]["mm__"] = """hasAttribute_T"""
self.vs[21]["GUID__"] = UUID('648df2ee-ee8b-4a4e-a23e-d5f99c018d51')
self.vs[22]["mm__"] = """hasAttribute_T"""
self.vs[22]["GUID__"] = UUID('f9a93cd2-8c30-4446-971f-651081f12dab')
self.vs[23]["mm__"] = """hasAttribute_T"""
self.vs[23]["GUID__"] = UUID('bcfe2e43-743d-43be-a9ce-6958972c8bb2')
self.vs[24]["mm__"] = """hasAttribute_T"""
self.vs[24]["GUID__"] = UUID('d40d572c-fd11-4838-991f-1fb2e35206c3')
self.vs[25]["mm__"] = """hasAttribute_T"""
self.vs[25]["GUID__"] = UUID('77897619-f415-4fe7-a98c-ff1e3360555e')
self.vs[26]["mm__"] = """hasAttribute_T"""
self.vs[26]["GUID__"] = UUID('dc74e4de-f6ff-4571-970b-9756e71f0a06')
self.vs[27]["mm__"] = """hasAttribute_T"""
self.vs[27]["GUID__"] = UUID('991b204c-ef20-49b1-bdad-c7339a3ff8b4')
self.vs[28]["mm__"] = """hasAttribute_T"""
self.vs[28]["GUID__"] = UUID('0cbb3783-fa62-4301-9e9b-aa21e95ac550')
self.vs[29]["mm__"] = """hasAttribute_T"""
self.vs[29]["GUID__"] = UUID('97a1e512-cc60-4871-8049-3fee09bda745')
self.vs[30]["mm__"] = """hasAttribute_T"""
self.vs[30]["GUID__"] = UUID('567e7d49-da49-412e-9f47-5d41023f858f')
self.vs[31]["mm__"] = """hasAttribute_T"""
self.vs[31]["GUID__"] = UUID('a9f55267-1d6b-4e31-a58b-22f0ffb4c15f')
self.vs[32]["mm__"] = """rightExpr"""
self.vs[32]["GUID__"] = UUID('7d2944db-08b2-4f20-9a99-71456f7aff90')
self.vs[33]["mm__"] = """rightExpr"""
self.vs[33]["GUID__"] = UUID('9183c4ab-dc4f-4aec-8df1-ddf1f15bbec2')
self.vs[34]["mm__"] = """rightExpr"""
self.vs[34]["GUID__"] = UUID('75d020a5-7185-4d4f-bfa1-71b33680fcc3')
self.vs[35]["mm__"] = """rightExpr"""
self.vs[35]["GUID__"] = UUID('98e4ca91-0063-4cc8-8755-1ab8d1da8469')
self.vs[36]["mm__"] = """rightExpr"""
self.vs[36]["GUID__"] = UUID('17956f1f-e2a6-4765-bd63-e6202c8315aa')
self.vs[37]["mm__"] = """rightExpr"""
self.vs[37]["GUID__"] = UUID('146e89a9-bf39-4133-bac1-5a4b4f53e452')
self.vs[38]["mm__"] = """rightExpr"""
self.vs[38]["GUID__"] = UUID('a670605a-a946-4662-8d10-97034bc6e312')
self.vs[39]["mm__"] = """rightExpr"""
self.vs[39]["GUID__"] = UUID('ec62d436-db5a-43f1-bd06-1c077b66fb3b')
self.vs[40]["mm__"] = """rightExpr"""
self.vs[40]["GUID__"] = UUID('3cd35e10-48b2-4bf7-9b8c-414d07a790e0')
self.vs[41]["mm__"] = """rightExpr"""
self.vs[41]["GUID__"] = UUID('13d6bce3-e399-4fb9-a40a-be6bdf18e0dd')
self.vs[42]["mm__"] = """rightExpr"""
self.vs[42]["GUID__"] = UUID('46c0f6d2-0745-44fb-aee5-c45a15d2b332')
self.vs[43]["mm__"] = """rightExpr"""
self.vs[43]["GUID__"] = UUID('621229c7-3603-42d8-9469-feefecd00066')
self.vs[44]["name"] = """eq1"""
self.vs[44]["mm__"] = """Equation"""
self.vs[44]["GUID__"] = UUID('ef6f863f-d1f8-4c63-b68e-77dcdb16afe0')
self.vs[45]["name"] = """eq2"""
self.vs[45]["mm__"] = """Equation"""
self.vs[45]["GUID__"] = UUID('5bdc5b24-b1f4-4698-8032-00de143e9b8e')
self.vs[46]["name"] = """eq3"""
self.vs[46]["mm__"] = """Equation"""
self.vs[46]["GUID__"] = UUID('343fdda6-5b2b-4ac7-89ac-48ecbf57a213')
self.vs[47]["name"] = """eq4"""
self.vs[47]["mm__"] = """Equation"""
self.vs[47]["GUID__"] = UUID('87c48d09-99c9-4c82-8f08-2c013ba1c411')
self.vs[48]["name"] = """eq5"""
self.vs[48]["mm__"] = """Equation"""
self.vs[48]["GUID__"] = UUID('713657b3-9801-42f5-ab0e-f80eda9bac1f')
self.vs[49]["name"] = """eq6"""
self.vs[49]["mm__"] = """Equation"""
self.vs[49]["GUID__"] = UUID('c51e8c57-4222-4e36-abcd-ce1730f50c97')
self.vs[50]["name"] = """eq7"""
self.vs[50]["mm__"] = """Equation"""
self.vs[50]["GUID__"] = UUID('282b9799-7ff0-44f1-ab31-26a681b4a18f')
self.vs[51]["name"] = """eq8"""
self.vs[51]["mm__"] = """Equation"""
self.vs[51]["GUID__"] = UUID('d9c9e3a3-7d1b-4344-8213-462a9642f55c')
self.vs[52]["name"] = """eq9"""
self.vs[52]["mm__"] = """Equation"""
self.vs[52]["GUID__"] = UUID('af60f366-fc92-430a-a3f8-783660f63559')
self.vs[53]["name"] = """eq10"""
self.vs[53]["mm__"] = """Equation"""
self.vs[53]["GUID__"] = UUID('c2d09128-0554-4a30-85ef-dfba8835a797')
self.vs[54]["name"] = """eq11"""
self.vs[54]["mm__"] = """Equation"""
self.vs[54]["GUID__"] = UUID('f6e28d1b-728d-4d9b-b0eb-6f3f8461765a')
self.vs[55]["name"] = """eq12"""
self.vs[55]["mm__"] = """Equation"""
self.vs[55]["GUID__"] = UUID('a99fd112-d7e2-412d-9756-9d2a0a540a4c')
self.vs[56]["name"] = """isComposite"""
self.vs[56]["mm__"] = """Attribute"""
self.vs[56]["Type"] = """'Bool'"""
self.vs[56]["GUID__"] = UUID('d4ce77e3-1757-4ab4-8e69-1d61cbbd2e1c')
self.vs[57]["name"] = """name"""
self.vs[57]["mm__"] = """Attribute"""
self.vs[57]["Type"] = """'String'"""
self.vs[57]["GUID__"] = UUID('875bd734-fa6d-4393-a55d-75672f36e41a')
self.vs[58]["name"] = """literal"""
self.vs[58]["mm__"] = """Attribute"""
self.vs[58]["Type"] = """'String'"""
self.vs[58]["GUID__"] = UUID('3212c20d-117f-4f0a-a881-90948fa722c5')
self.vs[59]["name"] = """literal"""
self.vs[59]["mm__"] = """Attribute"""
self.vs[59]["Type"] = """'String'"""
self.vs[59]["GUID__"] = UUID('3c0037c7-34a2-419d-98fd-57d2af95dbf0')
self.vs[60]["name"] = """literal"""
self.vs[60]["mm__"] = """Attribute"""
self.vs[60]["Type"] = """'String'"""
self.vs[60]["GUID__"] = UUID('6cf1ad01-09b8-4800-abc0-93675732f2ea')
self.vs[61]["name"] = """channel"""
self.vs[61]["mm__"] = """Attribute"""
self.vs[61]["Type"] = """'String'"""
self.vs[61]["GUID__"] = UUID('cdf2170c-3fb1-4eec-9850-e0b4d02c7f49')
self.vs[62]["name"] = """channel"""
self.vs[62]["mm__"] = """Attribute"""
self.vs[62]["Type"] = """'String'"""
self.vs[62]["GUID__"] = UUID('7c6d1eef-888e-4a0b-bd7f-be84a81a9f5b')
self.vs[63]["name"] = """channel"""
self.vs[63]["mm__"] = """Attribute"""
self.vs[63]["Type"] = """'String'"""
self.vs[63]["GUID__"] = UUID('daf9618a-f9d4-4b03-a116-eee67554619d')
self.vs[64]["name"] = """channel"""
self.vs[64]["mm__"] = """Attribute"""
self.vs[64]["Type"] = """'String'"""
self.vs[64]["GUID__"] = UUID('0f4f6497-dab1-4daa-b2aa-2b516f11782d')
self.vs[65]["name"] = """channel"""
self.vs[65]["mm__"] = """Attribute"""
self.vs[65]["Type"] = """'String'"""
self.vs[65]["GUID__"] = UUID('59d0854e-046b-4eed-82f0-3e01c41d7364')
self.vs[66]["name"] = """pivot"""
self.vs[66]["mm__"] = """Attribute"""
self.vs[66]["Type"] = """'String'"""
self.vs[66]["GUID__"] = UUID('d35e7fa8-4dca-474b-a604-3d0a6bf475c4')
self.vs[67]["name"] = """pivot"""
self.vs[67]["mm__"] = """Attribute"""
self.vs[67]["Type"] = """'String'"""
self.vs[67]["GUID__"] = UUID('e1cecb58-0ba5-4e92-847d-35bd83e506fb')
self.vs[68]["mm__"] = """leftExpr"""
self.vs[68]["GUID__"] = UUID('490c6edb-3532-4611-8e13-ffdd400a1171')
self.vs[69]["mm__"] = """leftExpr"""
self.vs[69]["GUID__"] = UUID('99ea6c88-3e7c-4e1a-a145-1d803265c563')
self.vs[70]["mm__"] = """leftExpr"""
self.vs[70]["GUID__"] = UUID('6ad9a534-e0d0-4918-83eb-fbb1fb42aab7')
self.vs[71]["mm__"] = """leftExpr"""
self.vs[71]["GUID__"] = UUID('65fabf71-cf72-43a5-967a-6c65ba98bf11')
self.vs[72]["mm__"] = """leftExpr"""
self.vs[72]["GUID__"] = UUID('acb4af4d-f61e-4ef6-b07e-7c73eb612d13')
self.vs[73]["mm__"] = """leftExpr"""
self.vs[73]["GUID__"] = UUID('77356899-8723-4d36-b4c6-204c0fd53ec2')
self.vs[74]["mm__"] = """leftExpr"""
self.vs[74]["GUID__"] = UUID('12a6616c-08fc-4302-97fe-f0bab6232ef3')
self.vs[75]["mm__"] = """leftExpr"""
self.vs[75]["GUID__"] = UUID('84144e3e-d49d-4610-ac70-fc3ba7a78017')
self.vs[76]["mm__"] = """leftExpr"""
self.vs[76]["GUID__"] = UUID('65a2f57d-f505-4aed-b901-daa4cb8c858e')
self.vs[77]["mm__"] = """leftExpr"""
self.vs[77]["GUID__"] = UUID('f2fdf0c4-158f-48e9-8217-400a0443fc3f')
self.vs[78]["mm__"] = """leftExpr"""
self.vs[78]["GUID__"] = UUID('73acaa3b-8499-4e53-9136-0a9f9e84ca79')
self.vs[79]["mm__"] = """leftExpr"""
self.vs[79]["GUID__"] = UUID('b6639c41-1c47-495c-997e-f0ec5c99544d')
self.vs[80]["name"] = """true"""
self.vs[80]["mm__"] = """Constant"""
self.vs[80]["Type"] = """'Bool'"""
self.vs[80]["GUID__"] = UUID('4bf7f87d-6220-4da2-aa93-10e1fa0b17eb')
self.vs[81]["name"] = """H"""
self.vs[81]["mm__"] = """Constant"""
self.vs[81]["Type"] = """'String'"""
self.vs[81]["GUID__"] = UUID('37964822-e8ba-4636-939e-16adab6e5dc5')
self.vs[82]["name"] = """exit_in"""
self.vs[82]["mm__"] = """Constant"""
self.vs[82]["Type"] = """'String'"""
self.vs[82]["GUID__"] = UUID('8a925018-5dc0-4bae-a7b5-c4e62a582197')
self.vs[83]["name"] = """exack_in"""
self.vs[83]["mm__"] = """Constant"""
self.vs[83]["Type"] = """'String'"""
self.vs[83]["GUID__"] = UUID('0224dbc3-96e1-4fd2-88d6-c451fd4d4d04')
self.vs[84]["name"] = """sh_in"""
self.vs[84]["mm__"] = """Constant"""
self.vs[84]["Type"] = """'String'"""
self.vs[84]["GUID__"] = UUID('2e131930-44c0-4b7a-80c7-490f08b98d24')
self.vs[85]["name"] = """sh_in"""
self.vs[85]["mm__"] = """Constant"""
self.vs[85]["Type"] = """'String'"""
self.vs[85]["GUID__"] = UUID('a020e333-8f08-4182-8e98-ff846a411f4d')
self.vs[86]["name"] = """exit"""
self.vs[86]["mm__"] = """Constant"""
self.vs[86]["Type"] = """'String'"""
self.vs[86]["GUID__"] = UUID('6bce0315-1b57-4513-8af3-8ecb89bdf5ef')
self.vs[87]["name"] = """exit_in"""
self.vs[87]["mm__"] = """Constant"""
self.vs[87]["Type"] = """'String'"""
self.vs[87]["GUID__"] = UUID('c12271ea-5c52-42af-944c-adcedf70278a')
self.vs[88]["name"] = """exack_in"""
self.vs[88]["mm__"] = """Constant"""
self.vs[88]["Type"] = """'String'"""
self.vs[88]["GUID__"] = UUID('4b7e1e82-2013-412f-927f-6b95f987c4b2')
self.vs[89]["name"] = """exack"""
self.vs[89]["mm__"] = """Constant"""
self.vs[89]["Type"] = """'String'"""
self.vs[89]["GUID__"] = UUID('42db34d0-e993-4400-aa6e-e3524fcdc93f')
self.vs[90]["name"] = """localdefcompstate"""
self.vs[90]["mm__"] = """Constant"""
self.vs[90]["Type"] = """'String'"""
self.vs[90]["GUID__"] = UUID('c0c414d3-71fc-4a6a-a96a-8ea77ff5c227')
self.vs[91]["name"] = """listenhproc"""
self.vs[91]["mm__"] = """Constant"""
self.vs[91]["Type"] = """'String'"""
self.vs[91]["GUID__"] = UUID('9c323a46-1015-4470-868d-ad6998f961e8')
self.vs[92]["associationType"] = """def"""
self.vs[92]["mm__"] = """directLink_T"""
self.vs[92]["GUID__"] = UUID('b42713fe-166a-4615-bf49-c2d847cbcc66')
self.vs[93]["associationType"] = """channelNames"""
self.vs[93]["mm__"] = """directLink_T"""
self.vs[93]["GUID__"] = UUID('1d36c993-397f-42dc-a20f-90823be162b4')
self.vs[94]["associationType"] = """channelNames"""
self.vs[94]["mm__"] = """directLink_T"""
self.vs[94]["GUID__"] = UUID('8959bbfa-4c92-4cd3-a6f3-d2358a5c878c')
self.vs[95]["associationType"] = """channelNames"""
self.vs[95]["mm__"] = """directLink_T"""
self.vs[95]["GUID__"] = UUID('1e0cb1f5-1a2a-45c4-ae55-bf431298d2f0')
self.vs[96]["associationType"] = """p"""
self.vs[96]["mm__"] = """directLink_T"""
self.vs[96]["GUID__"] = UUID('92c264f5-453e-437e-8b88-c84115951f9e')
self.vs[97]["associationType"] = """branches"""
self.vs[97]["mm__"] = """directLink_T"""
self.vs[97]["GUID__"] = UUID('46247179-13f3-4f56-938e-efad4ef8d0ce')
self.vs[98]["associationType"] = """p"""
self.vs[98]["mm__"] = """directLink_T"""
self.vs[98]["GUID__"] = UUID('cf0e65c2-05ff-4e90-b6be-64e4a2a6bca4')
self.vs[99]["associationType"] = """branches"""
self.vs[99]["mm__"] = """directLink_T"""
self.vs[99]["GUID__"] = UUID('2c67bb2b-2dc1-4941-9c6b-c8734694d0f8')
self.vs[100]["associationType"] = """p"""
self.vs[100]["mm__"] = """directLink_T"""
self.vs[100]["GUID__"] = UUID('2df1aebd-c4fa-4934-95c6-81ef80096c37')
self.vs[101]["associationType"] = """p"""
self.vs[101]["mm__"] = """directLink_T"""
self.vs[101]["GUID__"] = UUID('35a0a975-0ed3-4d31-8b24-6f5270e567f8')
self.vs[102]["associationType"] = """p"""
self.vs[102]["mm__"] = """directLink_T"""
self.vs[102]["GUID__"] = UUID('b8a78e5b-1aaf-416c-be5d-67e07f4a3187')
self.vs[103]["associationType"] = """branches"""
self.vs[103]["mm__"] = """directLink_T"""
self.vs[103]["GUID__"] = UUID('ca8630fd-f565-4e8c-a804-f9175932584f')
self.vs[104]["associationType"] = """p"""
self.vs[104]["mm__"] = """directLink_T"""
self.vs[104]["GUID__"] = UUID('75b6dc51-42e4-4f0e-884f-fefe2d32a5b4')
self.vs[105]["mm__"] = """apply_contains"""
self.vs[105]["GUID__"] = UUID('fbee6762-5ede-4156-9562-1ee0e1ce4b7a')
self.vs[106]["mm__"] = """apply_contains"""
self.vs[106]["GUID__"] = UUID('4816109f-f72c-49dc-8596-5c423175ceb9')
self.vs[107]["mm__"] = """apply_contains"""
self.vs[107]["GUID__"] = UUID('a6277216-5bf1-47a1-9622-8c410bcac5d2')
self.vs[108]["mm__"] = """apply_contains"""
self.vs[108]["GUID__"] = UUID('7a7e4288-0836-4f12-9f13-a95cb48e9d04')
self.vs[109]["mm__"] = """apply_contains"""
self.vs[109]["GUID__"] = UUID('c64c3258-54aa-4718-9bf3-760c9fd7344a')
self.vs[110]["mm__"] = """apply_contains"""
self.vs[110]["GUID__"] = UUID('093b3597-0204-43f6-b05a-afaf7369748a')
self.vs[111]["mm__"] = """apply_contains"""
self.vs[111]["GUID__"] = UUID('433eaa35-d916-4230-ae95-4682955c1ed8')
self.vs[112]["mm__"] = """apply_contains"""
self.vs[112]["GUID__"] = UUID('3562a3ae-36a1-4aaa-8125-63b61fa43697')
self.vs[113]["mm__"] = """apply_contains"""
self.vs[113]["GUID__"] = UUID('aaefe157-c161-4519-a618-7d3e56d4aff8')
self.vs[114]["mm__"] = """apply_contains"""
self.vs[114]["GUID__"] = UUID('ef075067-1e3f-491c-91e7-308075a1a390')
self.vs[115]["mm__"] = """apply_contains"""
self.vs[115]["GUID__"] = UUID('4334ab4a-0df1-4295-a5b7-ee804e906f5e')
self.vs[116]["mm__"] = """apply_contains"""
self.vs[116]["GUID__"] = UUID('27ece724-de6d-4186-9d6b-2c8fb9d71089')
self.vs[117]["mm__"] = """apply_contains"""
self.vs[117]["GUID__"] = UUID('67c0e949-6687-4181-b5b6-2525a3d2f20c')
self.vs[118]["mm__"] = """apply_contains"""
self.vs[118]["GUID__"] = UUID('209d41f5-5e71-4992-9b2b-9459bade7646')
self.vs[119]["mm__"] = """apply_contains"""
self.vs[119]["GUID__"] = UUID('93a7ddd9-b11d-4bd2-8785-eec038f2ef86')
| 57.744845
| 1,359
| 0.537826
|
93238639fd2963f368d9ec70b1b0dcaf69edac4a
| 1,736
|
py
|
Python
|
const.py
|
youssefhoummad/tetris
|
beda6947bbf734672da453c2404e6946f44759b9
|
[
"MIT"
] | null | null | null |
const.py
|
youssefhoummad/tetris
|
beda6947bbf734672da453c2404e6946f44759b9
|
[
"MIT"
] | null | null | null |
const.py
|
youssefhoummad/tetris
|
beda6947bbf734672da453c2404e6946f44759b9
|
[
"MIT"
] | null | null | null |
B = BLOCK_SIZE = 30
SURFACE_WIDTH = 300
SURFACE_HEIGHT = 600
S = [['.....',
'.....',
'..00.',
'.00..',
'.....'],
['.....',
'..0..',
'..00.',
'...0.',
'.....' ]]
Z = [['.....',
'.....',
'.00..',
'..00.',
'.....'],
['.....',
'..,0.',
'..00.',
'..0..',
'.....' ]]
I = [['.....',
'..0..',
'..0..',
'..0..',
'.....'],
['.....',
'.....',
'.000.',
'.....',
'.....' ]]
O = [['.....',
'.....',
'.00..',
'.00..',
'.....']]
J = [['.....',
'.....',
'.0...',
'.000.',
'.....'],
['.....',
'..00.',
'..0..',
'..0..',
'.....' ],
['.....',
'.000.',
'...0.',
'.....',
'.....' ],
['.....',
'..0..',
'..0..',
'.00..',
'.....' ],]
L = [['.....',
'...0.',
'.000.',
'.....',
'.....'],
['.....',
'..0..',
'..0..',
'..00.',
'.....'],
['.....',
'.....',
'.000.',
'.0...',
'.....'],
['.....',
'.00..',
'..0..',
'..0..',
'.....']]
T = [['.....',
'..0..',
'.000.',
'.....',
'.....'],
['.....',
'..0..',
'..00.',
'..0..',
'.....'],
['.....',
'.....',
'.000.',
'..0..',
'.....'],
['.....',
'..0..',
'.00..',
'..0..',
'.....']]
SHAPES = [S, Z, I, O, L, T]
# red green blue pink gray orange
SHAPES_FILL = ['#f44336', '#4CAF50', '#2196F3', '#F50057', '#607D8B', '#FF9800']
| 16.533333
| 80
| 0.12212
|
41aa3c7403731d9854e1166bf116715b2fe4c459
| 10,268
|
py
|
Python
|
generate.py
|
gaoxiao/handwriting-generation
|
5e684c1adee7fb83be309f9d9c001b559b567666
|
[
"MIT"
] | null | null | null |
generate.py
|
gaoxiao/handwriting-generation
|
5e684c1adee7fb83be309f9d9c001b559b567666
|
[
"MIT"
] | null | null | null |
generate.py
|
gaoxiao/handwriting-generation
|
5e684c1adee7fb83be309f9d9c001b559b567666
|
[
"MIT"
] | null | null | null |
import os
import pickle
import argparse
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.mlab as mlab
from matplotlib import animation
import seaborn
from collections import namedtuple
parser = argparse.ArgumentParser()
parser.add_argument('--model', dest='model_path', type=str, default=os.path.join('pretrained', 'model-29'))
parser.add_argument('--text', dest='text', type=str, default=None)
parser.add_argument('--style', dest='style', type=int, default=None)
parser.add_argument('--bias', dest='bias', type=float, default=1.)
parser.add_argument('--force', dest='force', action='store_true', default=False)
parser.add_argument('--animation', dest='animation', action='store_true', default=False)
parser.add_argument('--noinfo', dest='info', action='store_false', default=False)
parser.add_argument('--save', dest='save', type=str, default=None)
args = parser.parse_args()
def sample(e, mu1, mu2, std1, std2, rho):
cov = np.array([[std1 * std1, std1 * std2 * rho],
[std1 * std2 * rho, std2 * std2]])
mean = np.array([mu1, mu2])
x, y = np.random.multivariate_normal(mean, cov)
end = np.random.binomial(1, e)
return np.array([x, y, end])
def split_strokes(points):
points = np.array(points)
strokes = []
b = 0
for e in range(len(points)):
if points[e, 2] == 1.:
strokes += [points[b: e + 1, :2].copy()]
b = e + 1
return strokes
def cumsum(points):
sums = np.cumsum(points[:, :2], axis=0)
return np.concatenate([sums, points[:, 2:]], axis=1)
def sample_text(sess, args_text, translation, style=None):
fields = ['coordinates', 'sequence', 'bias', 'e', 'pi', 'mu1', 'mu2', 'std1', 'std2',
'rho', 'window', 'kappa', 'phi', 'finish', 'zero_states']
vs = namedtuple('Params', fields)(
*[tf.get_collection(name)[0] for name in fields]
)
text = np.array([translation.get(c, 0) for c in args_text])
coord = np.array([0., 0., 1.])
coords = [coord]
# Prime the model with the author style if requested
prime_len, style_len = 0, 0
if style is not None:
# Priming consist of joining to a real pen-position and character sequences the synthetic sequence to generate
# and set the synthetic pen-position to a null vector (the positions are sampled from the MDN)
style_coords, style_text = style
prime_len = len(style_coords)
style_len = len(style_text)
prime_coords = list(style_coords)
coord = prime_coords[0] # Set the first pen stroke as the first element to process
text = np.r_[style_text, text] # concatenate on 1 axis the prime text + synthesis character sequence
sequence_prime = np.eye(len(translation), dtype=np.float32)[style_text]
sequence_prime = np.expand_dims(np.concatenate([sequence_prime, np.zeros((1, len(translation)))]), axis=0)
sequence = np.eye(len(translation), dtype=np.float32)[text]
sequence = np.expand_dims(np.concatenate([sequence, np.zeros((1, len(translation)))]), axis=0)
phi_data, window_data, kappa_data, stroke_data = [], [], [], []
sess.run(vs.zero_states)
sequence_len = len(args_text) + style_len
for s in range(1, 60 * sequence_len + 1):
is_priming = s < prime_len
print('\r[{:5d}] sampling... {}'.format(s, 'priming' if is_priming else 'synthesis'), end='')
e, pi, mu1, mu2, std1, std2, rho, \
finish, phi, window, kappa = sess.run([vs.e, vs.pi, vs.mu1, vs.mu2,
vs.std1, vs.std2, vs.rho, vs.finish,
vs.phi, vs.window, vs.kappa],
feed_dict={
vs.coordinates: coord[None, None, ...],
vs.sequence: sequence_prime if is_priming else sequence,
vs.bias: args.bias
})
if is_priming:
# Use the real coordinate if priming
coord = prime_coords[s]
else:
# Synthesis mode
phi_data += [phi[0, :]]
window_data += [window[0, :]]
kappa_data += [kappa[0, :]]
# ---
g = np.random.choice(np.arange(pi.shape[1]), p=pi[0])
coord = sample(e[0, 0], mu1[0, g], mu2[0, g],
std1[0, g], std2[0, g], rho[0, g])
coords += [coord]
stroke_data += [[mu1[0, g], mu2[0, g], std1[0, g], std2[0, g], rho[0, g], coord[2]]]
if not args.force and finish[0, 0] > 0.8:
print('\nFinished sampling!\n')
break
coords = np.array(coords)
coords[-1, 2] = 1.
return phi_data, window_data, kappa_data, stroke_data, coords
def main():
with open(os.path.join('data', 'translation.pkl'), 'rb') as file:
translation = pickle.load(file)
rev_translation = {v: k for k, v in translation.items()}
charset = [rev_translation[i] for i in range(len(rev_translation))]
charset[0] = ''
config = tf.ConfigProto(
device_count={'GPU': 0}
)
with tf.Session(config=config) as sess:
saver = tf.train.import_meta_graph(args.model_path + '.meta')
saver.restore(sess, args.model_path)
while True:
if args.text is not None:
args_text = args.text
else:
args_text = input('What to generate: ')
style = None
if args.style is not None:
style = None
with open(os.path.join('data', 'styles.pkl'), 'rb') as file:
styles = pickle.load(file)
if args.style > len(styles[0]):
raise ValueError('Requested style is not in style list')
style = [styles[0][args.style], styles[1][args.style]]
phi_data, window_data, kappa_data, stroke_data, coords = sample_text(sess, args_text, translation, style)
strokes = np.array(stroke_data)
epsilon = 1e-8
strokes[:, :2] = np.cumsum(strokes[:, :2], axis=0)
minx, maxx = np.min(strokes[:, 0]), np.max(strokes[:, 0])
miny, maxy = np.min(strokes[:, 1]), np.max(strokes[:, 1])
if args.info:
delta = abs(maxx - minx) / 400.
x = np.arange(minx, maxx, delta)
y = np.arange(miny, maxy, delta)
x_grid, y_grid = np.meshgrid(x, y)
z_grid = np.zeros_like(x_grid)
for i in range(strokes.shape[0]):
gauss = mlab.bivariate_normal(x_grid, y_grid, mux=strokes[i, 0], muy=strokes[i, 1],
sigmax=strokes[i, 2], sigmay=strokes[i, 3],
sigmaxy=0.) # strokes[i, 4]
z_grid += gauss * np.power(strokes[i, 2] + strokes[i, 3], 0.4) / (np.max(gauss) + epsilon)
fig, ax = plt.subplots(2, 2)
ax[0, 0].imshow(z_grid, interpolation='bilinear', aspect='auto', cmap=cm.jet)
ax[0, 0].grid(False)
ax[0, 0].set_title('Densities')
ax[0, 0].set_aspect('equal')
for stroke in split_strokes(cumsum(np.array(coords))):
ax[0, 1].plot(stroke[:, 0], -stroke[:, 1])
ax[0, 1].set_title('Handwriting')
ax[0, 1].set_aspect('equal')
phi_img = np.vstack(phi_data).T[::-1, :]
ax[1, 0].imshow(phi_img, interpolation='nearest', aspect='auto', cmap=cm.jet)
ax[1, 0].set_yticks(np.arange(0, len(args_text) + 1))
ax[1, 0].set_yticklabels(list(' ' + args_text[::-1]), rotation='vertical', fontsize=8)
ax[1, 0].grid(False)
ax[1, 0].set_title('Phi')
window_img = np.vstack(window_data).T
ax[1, 1].imshow(window_img, interpolation='nearest', aspect='auto', cmap=cm.jet)
ax[1, 1].set_yticks(np.arange(0, len(charset)))
ax[1, 1].set_yticklabels(list(charset), rotation='vertical', fontsize=8)
ax[1, 1].grid(False)
ax[1, 1].set_title('Window')
plt.show()
else:
fig, ax = plt.subplots(1, 1)
for stroke in split_strokes(cumsum(np.array(coords))):
plt.plot(stroke[:, 0], -stroke[:, 1], 'r')
ax.set_title('Handwriting')
ax.set_aspect('equal')
plt.show()
if args.animation:
fig, ax = plt.subplots(1, 1, frameon=False, figsize=(2 * (maxx - minx + 2) / (maxy - miny + 1), 2))
ax.set_xlim(minx - 1., maxx + 1.)
ax.set_ylim(-maxy - 0.5, -miny + 0.5)
ax.set_aspect('equal')
ax.axis('off')
# ax.hold(True)
plt.draw()
plt.show(False)
background = fig.canvas.copy_from_bbox(ax.bbox)
sumed = cumsum(coords)
def _update(i):
c1, c2 = sumed[i: i+2]
fig.canvas.restore_region(background)
if c1[2] == 1. and c2[2] == 1.:
line, = ax.plot([c2[0], c2[0]], [-c2[1], -c2[1]])
elif c1[2] != 1.:
line, = ax.plot([c1[0], c2[0]], [-c1[1], -c2[1]])
else:
line, = ax.plot([c1[0], c1[0]], [-c1[1], -c1[1]])
fig.canvas.blit(ax.bbox)
return line,
anim = animation.FuncAnimation(fig, _update, frames=len(sumed) - 2,
interval=16, blit=True, repeat=False)
if args.save is not None:
anim.save(args.save, fps=60, extra_args=['-vcodec', 'libx264'])
plt.show()
if args.text is not None:
break
if __name__ == '__main__':
main()
| 41.57085
| 118
| 0.523763
|
426bfe69a3bf724a1170331e637f3380fcaf80e5
| 35
|
py
|
Python
|
stam_pytorch/__init__.py
|
lab176344/STAM-pytorch
|
1ea4fd218d1567e94d27581e48e3b8ce50f985ed
|
[
"MIT"
] | 100
|
2021-03-28T21:50:30.000Z
|
2022-03-21T06:34:59.000Z
|
stam_pytorch/__init__.py
|
lab176344/STAM-pytorch
|
1ea4fd218d1567e94d27581e48e3b8ce50f985ed
|
[
"MIT"
] | 5
|
2021-03-29T13:24:04.000Z
|
2022-02-01T20:03:00.000Z
|
stam_pytorch/__init__.py
|
lab176344/STAM-pytorch
|
1ea4fd218d1567e94d27581e48e3b8ce50f985ed
|
[
"MIT"
] | 13
|
2021-03-29T01:36:14.000Z
|
2022-03-30T05:59:24.000Z
|
from stam_pytorch.stam import STAM
| 17.5
| 34
| 0.857143
|
efe83e4fba52286da6a14089f9972879eb399e62
| 3,786
|
py
|
Python
|
tests/garage/torch/algos/test_ddpg.py
|
igor-krawczuk/garage
|
aa86ce710c6d01380477d6feddc0e38427b1e3b4
|
[
"MIT"
] | null | null | null |
tests/garage/torch/algos/test_ddpg.py
|
igor-krawczuk/garage
|
aa86ce710c6d01380477d6feddc0e38427b1e3b4
|
[
"MIT"
] | null | null | null |
tests/garage/torch/algos/test_ddpg.py
|
igor-krawczuk/garage
|
aa86ce710c6d01380477d6feddc0e38427b1e3b4
|
[
"MIT"
] | null | null | null |
"""This script creates a test that fails when DDPG performance is too low."""
import gym
import pytest
import torch
from torch.nn import functional as F # NOQA
from garage.envs import normalize
from garage.envs.base import GarageEnv
from garage.experiment import deterministic, LocalRunner
from garage.np.exploration_strategies import OUStrategy
from garage.replay_buffer import SimpleReplayBuffer
from garage.torch.algos import DDPG
from garage.torch.policies import DeterministicMLPPolicy
from garage.torch.q_functions import ContinuousMLPQFunction
from tests.fixtures import snapshot_config
class TestDDPG:
"""Test class for DDPG."""
@pytest.mark.mujoco
@pytest.mark.large
def test_ddpg_double_pendulum(self):
"""Test DDPG with Pendulum environment."""
deterministic.set_seed(0)
runner = LocalRunner(snapshot_config)
env = GarageEnv(gym.make('InvertedDoublePendulum-v2'))
action_noise = OUStrategy(env.spec, sigma=0.2)
policy = DeterministicMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu,
output_nonlinearity=torch.tanh)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu)
replay_buffer = SimpleReplayBuffer(env_spec=env.spec,
size_in_transitions=int(1e6),
time_horizon=100)
algo = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=20,
n_train_steps=50,
min_buffer_size=int(1e4),
exploration_strategy=action_noise,
target_update_tau=1e-2,
discount=0.9)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=100)
assert last_avg_ret > 45
env.close()
@pytest.mark.mujoco
@pytest.mark.large
def test_ddpg_pendulum(self):
"""Test DDPG with Pendulum environment.
This environment has a [-3, 3] action_space bound.
"""
deterministic.set_seed(0)
runner = LocalRunner(snapshot_config)
env = GarageEnv(normalize(gym.make('InvertedPendulum-v2')))
action_noise = OUStrategy(env.spec, sigma=0.2)
policy = DeterministicMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu,
output_nonlinearity=torch.tanh)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=F.relu)
replay_buffer = SimpleReplayBuffer(env_spec=env.spec,
size_in_transitions=int(1e6),
time_horizon=100)
algo = DDPG(env_spec=env.spec,
policy=policy,
qf=qf,
replay_buffer=replay_buffer,
steps_per_epoch=20,
n_train_steps=50,
min_buffer_size=int(1e4),
exploration_strategy=action_noise,
target_update_tau=1e-2,
discount=0.9)
runner.setup(algo, env)
last_avg_ret = runner.train(n_epochs=10, batch_size=100)
assert last_avg_ret > 10
env.close()
| 37.485149
| 77
| 0.559694
|
60b19e6bcf5095763b0c383ac66e3fc7f68041dc
| 23,584
|
py
|
Python
|
training.py
|
davidwfong/ViolinMelodyCNNs
|
79fbfc4f03f081ef4b3145e341d1aec49e934a9e
|
[
"Apache-2.0"
] | 6
|
2018-06-20T10:29:22.000Z
|
2018-06-29T22:56:00.000Z
|
training.py
|
davidwfong/ViolinMelodyCNNs
|
79fbfc4f03f081ef4b3145e341d1aec49e934a9e
|
[
"Apache-2.0"
] | 1
|
2018-09-09T22:08:45.000Z
|
2019-04-17T11:12:16.000Z
|
training.py
|
davidwfong/ViolinMelodyCNNs
|
79fbfc4f03f081ef4b3145e341d1aec49e934a9e
|
[
"Apache-2.0"
] | null | null | null |
#IMPORT RELEVANT MODULES
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
import preprocessing
from sklearn import model_selection
from keras.models import Model
from keras.utils import np_utils, plot_model
from keras.layers import Flatten, Dropout, Activation, BatchNormalization, Concatenate
from keras.layers import Input, Dense, Conv2D, Conv2DTranspose, AveragePooling2D, MaxPooling2D
from keras.losses import categorical_crossentropy
from keras.models import load_model
from keras import optimizers
from keras import callbacks
from keras.callbacks import ModelCheckpoint
import keras.backend as K
#-------------------------------------------------------------------------------------------------------------------
#FUNCTIONS
def initialiseWithSeed(num):
seed = num
initialisation = np.random.seed(seed)
return initialisation
def getInputShape(rows=192,columns=11,channels=2):
inputShape = (rows,columns,channels)
return inputShape
def splitData(XFull,yFull,valSetSize,outputToStratify):
X_Train, X_Val, Y_Train, Y_Val = model_selection.train_test_split(XFull, yFull,
test_size=valSetSize,
stratify=None)
print("Train-test split complete with "+str(valSetSize)+" validation set")
return X_Train, X_Val, Y_Train, Y_Val
def encodeLabels(yTrain, yTest, numClasses):
yTrainEncoded = np_utils.to_categorical(yTrain, numClasses)
yTestEncoded = np_utils.to_categorical(yTest, numClasses)
return yTrainEncoded, yTestEncoded
def encodeLabelsSingle(y, numClasses):
yEncoded = np_utils.to_categorical(y, numClasses)
return yEncoded
def buildSingleMECNN(rows,columns,channels):
inputShape = getInputShape(rows,columns,channels)
fH_l1 = 4
fW_l1 = 3
nC_l1 = 64
sH_l1 = 1
sW_l1 = 1
fH_l2 = 4
fW_l2 = 3
nC_l2 = 64
sH_l2 = 1
sW_l2 = 1
fH_l2C = 1
fW_l2C = 2
sH_l2C = 1
sW_l2C = 2
fH_l2D = 2
fW_l2D = 1
sH_l2D = 2
sW_l2D = 1
fH_l3 = 4
fW_l3 = 3
nC_l3 = 128
sH_l3 = 1
sW_l3 = 1
fH_l3C = 1
fW_l3C = 2
sH_l3C = 1
sW_l3C = 2
fH_l3D = 2
fW_l3D = 1
sH_l3D = 2
sW_l3D = 1
pDropout_l4 = 0.25
fH_l4 = 12
fW_l4 = 3
nC_l4 = 256
sH_l4 = 1
sW_l4 = 1
fH_l4C = 1
fW_l4C = 2
sH_l4C = 1
sW_l4C = 2
pDropout_l5 = 0.50
hiddenUnits_l5 = 392
pDropout_l6 = 0.50
C = 49
input_l0 = Input(shape=inputShape)
Conv_l1 = Conv2D(nC_l1, kernel_size=(fH_l1,fW_l1), strides=(sH_l1,sW_l1),
padding='same', activation='relu',
kernel_initializer='glorot_uniform', bias_initializer='zeros')(input_l0)
BatchNorm_l1B = BatchNormalization(axis=-1)(Conv_l1)
Conv_l2 = Conv2D(nC_l2, kernel_size=(fH_l2,fW_l2), strides=(sH_l2,sW_l2),
padding='same', activation='relu',
kernel_initializer='glorot_uniform', bias_initializer='zeros')(BatchNorm_l1B)
BatchNorm_l2B = BatchNormalization(axis=-1)(Conv_l2)
AP_l2C = AveragePooling2D(pool_size=(fH_l2C,fW_l2C), strides=(sH_l2C,sW_l2C),
padding='valid')(BatchNorm_l2B)
MP_l2D = MaxPooling2D(pool_size=(fH_l2D,fW_l2D), strides=(sH_l2D,sW_l2D),
padding='valid')(AP_l2C)
Conv_l3 = Conv2D(nC_l3, kernel_size=(fH_l3,fW_l3), strides=(sH_l3,sW_l3),
padding='same', activation='relu',
kernel_initializer='glorot_uniform', bias_initializer='zeros')(MP_l2D)
BatchNorm_l3B = BatchNormalization(axis=-1)(Conv_l3)
AP_l3C = AveragePooling2D(pool_size=(fH_l3C,fW_l3C), strides=(sH_l3C,sW_l3C),
padding='valid')(BatchNorm_l3B)
MP_l3D = MaxPooling2D(pool_size=(fH_l3D,fW_l3D), strides=(sH_l3D,sW_l3D),
padding='valid')(AP_l3C)
Dropout_l4A = Dropout(pDropout_l4)(MP_l3D)
Conv_l4 = Conv2D(nC_l4, kernel_size=(fH_l4,fW_l4), strides=(sH_l4,sW_l4),
padding='same', activation='relu',
kernel_initializer='glorot_uniform', bias_initializer='zeros')(Dropout_l4A)
BatchNorm_l4B = BatchNormalization(axis=-1)(Conv_l4)
AP_l4C = AveragePooling2D(pool_size=(fH_l4C,fW_l4C), strides=(sH_l4C,sW_l4C),
padding='valid')(BatchNorm_l4B)
Flatten_l5A = Flatten()(AP_l4C)
Dropout_l5B = Dropout(pDropout_l5)(Flatten_l5A)
FC_l5 = Dense(hiddenUnits_l5, activation=None,
kernel_initializer='glorot_uniform', bias_initializer='zeros')(Dropout_l5B)
Dropout_l6A = Dropout(pDropout_l6)(FC_l5)
FC_l6 = Dense(C, activation='softmax',
kernel_initializer='glorot_uniform', bias_initializer='zeros')(Dropout_l6A)
modelBuilt = Model(inputs=[input_l0], outputs=[FC_l6])
print(modelBuilt.summary())
return modelBuilt
def setHyperparamsPolyMECNN():
E = 25
batchSize = 64
learningRate = 0.001
learningDecay = 1e-06
beta = 0.90
optimiser = optimizers.SGD(lr=learningRate, decay=learningDecay,
momentum=beta, nesterov=True)
"""
beta1 = 0.90
beta2 = 0.999
optimiser = optimizers.Adam(lr=learningRate, decay=learningDecay,
beta_1=beta1, beta_2=beta2)
"""
lossFunction = 'categorical_crossentropy'
return E, batchSize, optimiser, lossFunction
def setHyperparamsMonoMECNN():
E = 50
batchSize = 64
learningRate = 0.001
learningDecay = 1e-06
momentum = 0.90
optimiser = optimizers.SGD(lr=learningRate, decay=learningDecay,
momentum=momentum, nesterov=True)
"""
beta1 = 0.90
beta2 = 0.999
optimiser = optimizers.Adam(lr=learningRate, decay=learningDecay,
beta_1=beta1, beta_2=beta2)
"""
lossFunction = 'categorical_crossentropy'
return E, batchSize, optimiser, lossFunction
def buildMTMECNN(rows,columns,channels):
inputShape = getInputShape(rows,columns,channels)
fH_l1 = 4
fW_l1 = 3
nC_l1 = 64
sH_l1 = 1
sW_l1 = 1
fH_l2 = 4
fW_l2 = 3
nC_l2 = 64
sH_l2 = 1
sW_l2 = 1
fH_l2C = 1
fW_l2C = 2
sH_l2C = 1
sW_l2C = 2
fH_l2D = 2
fW_l2D = 1
sH_l2D = 2
sW_l2D = 1
fH_l3 = 4
fW_l3 = 3
nC_l3 = 128
sH_l3 = 1
sW_l3 = 1
fH_l3C = 1
fW_l3C = 2
sH_l3C = 1
sW_l3C = 2
fH_l3D = 2
fW_l3D = 1
sH_l3D = 2
sW_l3D = 1
pDropout_l4 = 0.25
fH_l4 = 12
fW_l4 = 3
nC_l4 = 256
sH_l4 = 1
sW_l4 = 1
fH_l4C = 1
fW_l4C = 2
sH_l4C = 1
sW_l4C = 2
pDropout_l5 = 0.50
hiddenUnits_l5 = 392
pDropout_l6 = 0.50
C = 49
input_l0_Mono = Input(shape=inputShape)
input_l0_Poly = Input(shape=inputShape)
Conv_l1_Mono = Conv2D(nC_l1, kernel_size=(fH_l1,fW_l1), strides=(sH_l1,sW_l1),
padding='same',
activation='relu')(input_l0_Mono)
Conv_l1_Poly = Conv2D(nC_l1, kernel_size=(fH_l1,fW_l1), strides=(sH_l1,sW_l1),
padding='same',
activation='relu')(input_l0_Poly)
BatchNorm_l1B_Mono = BatchNormalization(axis=-1)(Conv_l1_Mono)
BatchNorm_l1B_Poly = BatchNormalization(axis=-1)(Conv_l1_Poly)
Conv_l2_Mono = Conv2D(nC_l2, kernel_size=(fH_l2,fW_l2), strides=(sH_l2,sW_l2),
padding='same', activation='relu')(BatchNorm_l1B_Mono)
Conv_l2_Poly = Conv2D(nC_l2, kernel_size=(fH_l2,fW_l2), strides=(sH_l2,sW_l2),
padding='same', activation='relu')(BatchNorm_l1B_Poly)
BatchNorm_l2B_Mono = BatchNormalization(axis=-1)(Conv_l2_Mono)
BatchNorm_l2B_Poly = BatchNormalization(axis=-1)(Conv_l2_Poly)
AP_l2C_Mono = AveragePooling2D(pool_size=(fH_l2C,fW_l2C), strides=(sH_l2C,sW_l2C),
padding='valid')(BatchNorm_l2B_Mono)
AP_l2C_Poly = AveragePooling2D(pool_size=(fH_l2C,fW_l2C), strides=(sH_l2C,sW_l2C),
padding='valid')(BatchNorm_l2B_Poly)
MP_l2D_Mono = MaxPooling2D(pool_size=(fH_l2D,fW_l2D), strides=(sH_l2D,sW_l2D),
padding='valid')(AP_l2C_Mono)
MP_l2D_Poly = MaxPooling2D(pool_size=(fH_l2D,fW_l2D), strides=(sH_l2D,sW_l2D),
padding='valid')(AP_l2C_Poly)
Conv_l3_Mono = Conv2D(nC_l3, kernel_size=(fH_l3,fW_l3), strides=(sH_l3,sW_l3),
padding='same', activation='relu')(MP_l2D_Mono)
Conv_l3_Poly = Conv2D(nC_l3, kernel_size=(fH_l3,fW_l3), strides=(sH_l3,sW_l3),
padding='same', activation='relu')(MP_l2D_Poly)
BatchNorm_l3B_Mono = BatchNormalization(axis=-1)(Conv_l3_Mono)
BatchNorm_l3B_Poly = BatchNormalization(axis=-1)(Conv_l3_Poly)
AP_l3C_Mono = AveragePooling2D(pool_size=(fH_l3C,fW_l3C), strides=(sH_l3C,sW_l3C),
padding='valid')(BatchNorm_l3B_Mono)
AP_l3C_Poly = AveragePooling2D(pool_size=(fH_l3C,fW_l3C), strides=(sH_l3C,sW_l3C),
padding='valid')(BatchNorm_l3B_Poly)
MP_l3D_Mono = MaxPooling2D(pool_size=(fH_l3D,fW_l3D), strides=(sH_l3D,sW_l3D),
padding='valid')(AP_l3C_Mono)
MP_l3D_Poly = MaxPooling2D(pool_size=(fH_l3D,fW_l3D), strides=(sH_l3D,sW_l3D),
padding='valid')(AP_l3C_Poly)
Dropout_l4A_Mono = Dropout(pDropout_l4)(MP_l3D_Mono)
Dropout_l4A_Poly = Dropout(pDropout_l4)(MP_l3D_Poly)
Conv_l4_Mono = Conv2D(nC_l4, kernel_size=(fH_l4,fW_l4), strides=(sH_l4,sW_l4),
padding='same', activation='relu')(Dropout_l4A_Mono)
Conv_l4_Poly = Conv2D(nC_l4, kernel_size=(fH_l4,fW_l4), strides=(sH_l4,sW_l4),
padding='same', activation='relu')(Dropout_l4A_Poly)
BatchNorm_l4B_Mono = BatchNormalization(axis=-1)(Conv_l4_Mono)
BatchNorm_l4B_Poly = BatchNormalization(axis=-1)(Conv_l4_Poly)
AP_l4C_Mono = AveragePooling2D(pool_size=(fH_l4C,fW_l4C), strides=(sH_l4C,sW_l4C),
padding='valid')(BatchNorm_l4B_Mono)
AP_l4C_Poly = AveragePooling2D(pool_size=(fH_l4C,fW_l4C), strides=(sH_l4C,sW_l4C),
padding='valid')(BatchNorm_l4B_Poly)
Flatten_l5A_Mono = Flatten()(AP_l4C_Mono)
Flatten_l5A_Poly = Flatten()(AP_l4C_Poly)
Dropout_l5B_Mono = Dropout(pDropout_l5)(Flatten_l5A_Mono)
Dropout_l5B_Poly = Dropout(pDropout_l5)(Flatten_l5A_Poly)
FC_l5_Mono = Dense(hiddenUnits_l5, activation=None)(Dropout_l5B_Mono)
FC_l5_Poly = Dense(hiddenUnits_l5, activation=None)(Dropout_l5B_Poly)
Dropout_l6A_Mono = Dropout(pDropout_l6)(FC_l5_Mono)
Dropout_l6A_Poly = Dropout(pDropout_l6)(FC_l5_Poly)
output_l6_Mono = Dense(C, activation='softmax', name='MONOME')(Dropout_l6A_Mono)
output_l6_Poly = Dense(C, activation='softmax', name='POLYME')(Dropout_l6A_Poly)
modelBuilt = Model(inputs=[input_l0_Poly, input_l0_Mono],
outputs=[output_l6_Poly, output_l6_Mono])
print(modelBuilt.summary())
return modelBuilt
def getLossWeights(numModel):
if numModel==1:
gammaPoly = 0.5
elif numModel==2:
gammaPoly = 0.6
elif numModel==3:
gammaPoly = 0.7
elif numModel==4:
gammaPoly = 0.8
elif numModel==5:
gammaPoly = 0.9
else:
gammaPoly = 0.5
gammaMono = 1.0 - gammaPoly
lossWeights = [gammaPoly, gammaMono]
return lossWeights
def setHyperparamsMTMECNN():
E = 75
batchSize = 64
learningRate = 0.001
learningDecay = 1e-06
momentum = 0.90
optimiser = optimizers.SGD(lr=learningRate, decay=learningDecay,
momentum=momentum, nesterov=True)
"""
beta1 = 0.90
beta2 = 0.999
optimiser = optimizers.Adam(lr=learningRate, decay=learningDecay,
beta_1=beta1, beta_2=beta2)
"""
lossFunction = ['categorical_crossentropy', 'categorical_crossentropy']
return E, batchSize, optimiser, lossFunction
def compileMultiTaskModel(modelBuilt, lossFunction, lossWeights, optimiser):
modelBuilt.compile(loss=lossFunction,
loss_weights=lossWeights,
optimizer=optimiser,
metrics=['accuracy'])
return modelBuilt
def trainMultiTaskModel(modelCompiled,
trainingX1, trainingY1, trainingX2, trainingY2,
E, batchSize, nameModel, numModel, pathSave,
vsplitfactor=0.1):
filenameModel = nameModel+'_'+str(numModel)+'.h5'
checkpointer = ModelCheckpoint(filepath=pathSave+'/'+filenameModel,
monitor='val_loss', verbose=2,
save_best_only=True)
modelHistory = modelCompiled.fit([trainingX1, trainingX2],
[trainingY1, trainingY2],
batch_size=batchSize, epochs=E,
verbose=2, validation_split=vsplitfactor,
callbacks = [checkpointer])
print('Model has been trained and will now be evaluated on test set')
return modelCompiled, modelHistory
def plotMultiTaskModelHistory(modelHistory, nameModel, numModel, E):
print(modelHistory.history.keys())
plt.figure(1, figsize=(22, 11))
plt.rc('axes', axisbelow=True)
plt.plot(modelHistory.history['POLYME_acc'])
plt.plot(modelHistory.history['val_POLYME_acc'])
plt.plot(modelHistory.history['MONOME_acc'])
plt.plot(modelHistory.history['val_MONOME_acc'])
plt.xlabel('Epochs', fontsize = 16)
plt.ylabel('Accuracy', fontsize = 16)
plt.grid(axis='both', zorder=0)
plt.xticks(np.arange(0, E+1, 1), fontsize = 12)
plt.yticks(fontsize=15)
plt.legend(['Training Polyphonic','Validation Polyphonic', 'Training Monophonic', 'Validation Monophonic'], loc='upper left', fontsize=16)
#plt.title('Accuracy of '+nameModel+' over '+str(E)+' training epochs')
plt.savefig(nameModel+'_'+str(numModel)+'_SingleTaskAccuracy'+'.png')
plt.figure(2, figsize=(22, 11))
plt.rc('axes', axisbelow=True)
plt.plot(modelHistory.history['POLYME_loss'])
plt.plot(modelHistory.history['val_POLYME_loss'])
plt.plot(modelHistory.history['MONOME_loss'])
plt.plot(modelHistory.history['val_MONOME_loss'])
plt.xlabel('Epochs', fontsize = 16)
plt.ylabel('Loss', fontsize = 16)
plt.grid(axis='both', zorder=0)
plt.xticks(np.arange(0, E+1, 1), fontsize = 12)
plt.yticks(fontsize=15)
plt.legend(['Training Polyphonic','Validation Polyphonic', 'Training Monophonic', 'Validation Monophonic'], loc='upper left', fontsize=16)
#plt.title('Loss of '+nameModel+' over '+str(E)+' training epochs')
plt.savefig(nameModel+'_'+str(numModel)+'_SingleTaskLoss'+'.png')
plt.figure(3, figsize=(22, 11))
plt.rc('axes', axisbelow=True)
plt.plot(modelHistory.history['loss'])
plt.plot(modelHistory.history['val_loss'])
plt.xlabel('Epochs', fontsize = 16)
plt.ylabel('Loss', fontsize = 16)
plt.grid(axis='both', zorder=0)
plt.xticks(np.arange(0, E+1, 1), fontsize = 12)
plt.yticks(fontsize=15)
plt.legend(['Training Multi-Task','Validation Multi-Task'], loc='upper left', fontsize=16)
#plt.title('Accuracy of '+nameModel+' over '+str(E)+' training epochs')
plt.savefig(nameModel+'_'+str(numModel)+'_MultiTaskLoss'+'.png')
def evaluateMultiTaskModel(modelTrained, batchSize, XTest, yTest):
allMetrics = modelTrained.metrics_names
scores = modelTrained.evaluate([XTest, XTest], [yTest, yTest], batch_size=batchSize, verbose=2)
OverallLoss = scores[0]
TargetLoss = scores[1]
TargetAcc = scores[3]
AuxLoss = scores[2]
AuxAcc = scores[4]
print("Test Loss for Target Task is " + str(TargetLoss))
print("Test Accuracy for Target Task is " + str(TargetAcc))
print("Test Loss for Auxiliary Task is " + str(AuxLoss))
print("Test Accuracy for Auxiliary Task is " + str(AuxAcc))
print("Overall Test Loss is " + str(OverallLoss))
return allMetrics, scores, TargetLoss, TargetAcc
def saveModelDiagram(model, nameModel):
plot_model(model, to_file=nameModel+'.png')
print("Saved Model Graph")
def compileModel(modelBuilt, lossFunction, optimiser):
modelBuilt.compile(loss=lossFunction,
optimizer=optimiser,
metrics=['accuracy'])
return modelBuilt
def trainModel(modelCompiled, trainingX, trainingY,
E, batchSize, nameModel, numModel, pathSave,
vsplit=True, vsplitfactor=0.1,
XVal=None, yVal=None):
filenameModel = nameModel+'_'+str(numModel)+'.h5'
checkpointer = ModelCheckpoint(filepath=pathSave+'/'+filenameModel,
monitor='val_loss', verbose=2,
save_best_only=True)
if vsplit==True:
modelHistory = modelCompiled.fit(trainingX, trainingY,
batch_size=batchSize, epochs=E,
verbose=2, validation_split=vsplitfactor,
callbacks = [checkpointer])
elif vsplit==False:
modelHistory = modelCompiled.fit(trainingX, trainingY,
batch_size=batchSize, epochs=E,
verbose=2, validation_data=(XVal,yVal),
callbacks = [checkpointer])
print('Model has been trained and will now be evaluated on test set')
return modelCompiled, modelHistory
def saveModel(modelTrained, nameModel, numModel, savepath):
filenameModel = nameModel+'_'+str(numModel)+'.h5'
print('Saving model under filename '+filenameModel)
modelTrained.save(savepath+ '/' + filenameModel)
print('Saved model under filename '+filenameModel)
def loadpretrainedmodel(modelfilename):
loadedModel = load_model(modelfilename)
print("Model loaded")
return loadedModel
def plotModelHistory(modelHistory, nameModel, numModel, E):
print(modelHistory.history.keys())
plt.figure(1, figsize=(16, 8))
plt.rc('axes', axisbelow=True)
plt.plot(modelHistory.history['acc'])
plt.plot(modelHistory.history['val_acc'])
plt.xlabel('Epochs', fontsize = 14)
plt.ylabel('Accuracy', fontsize = 14)
plt.grid(axis='both',zorder=0)
plt.xticks(np.arange(0, E+1, 1), fontsize = 12)
plt.legend(['Training','Validation'], loc='upper left', fontsize=14)
#plt.title('Accuracy of '+nameModel+' over '+str(E)+' training epochs')
plt.savefig(nameModel+'_'+str(numModel)+'_Accuracy'+'.png')
plt.figure(2, figsize=(16, 8))
plt.rc('axes', axisbelow=True)
plt.plot(modelHistory.history['loss'])
plt.plot(modelHistory.history['val_loss'])
plt.xlabel('Epochs', fontsize = 14)
plt.ylabel('Loss', fontsize = 14)
plt.grid(axis='both',zorder=0)
plt.xticks(np.arange(0, E+1, 1), fontsize = 12)
plt.legend(['Training','Validation'], loc='upper left', fontsize=14)
#plt.title('Loss of '+nameModel+' over '+str(E)+' training epochs')
plt.savefig(nameModel+'_'+str(numModel)+'_Loss'+'.png')
print("Model has been evaluated on test set")
def evaluateModel(modelTrained, batchSize, XTest, yTest):
score = modelTrained.evaluate(XTest, yTest, batch_size=batchSize, verbose=2)
testLoss = score[0]
testAccuracy = score[1]
print("Test Loss is " + str(testLoss))
print("Test Accuracy is " + str(testAccuracy))
return testLoss, testAccuracy
def getClassNames(y):
f0Labels = np.unique(y)
lowestMIDI = preprocessing.getMIDIfromNote('G3')
classNamesTest = preprocessing.getNotesfromF0Labels(f0Labels, offset=lowestMIDI-1)
return lowestMIDI, f0Labels, classNamesTest
def getAllClassNames():
classnamesAll = ['None', 'G3', 'G#3', 'A3', 'A#3', 'B3', 'C4',
'C#4', 'D4', 'D#4', 'E4', 'F4', 'F#4', 'G4',
'G#4', 'A4', 'A#4', 'B4', 'C5', 'C#5', 'D5',
'D#5', 'E5', 'F5', 'F#5', 'G5', 'G#5', 'A5',
'A#5', 'B5', 'C6', 'C#6', 'D6', 'D#6', 'E6',
'F6', 'F#6', 'G6', 'G#6', 'A6', 'A#6', 'B6',
'C7', 'C#7', 'D7', 'D#7', 'E7', 'F7', 'F#7']
return classnamesAll
def plotConfusionMatrixSingle(model, nameModel, numModel,
XTest, YTest, classNames):
YHat = model.predict(np.array(XTest))
YHat_nonCategory = [np.argmax(t) for t in YHat]
YTest_nonCategory = [np.argmax(t) for t in YTest]
cnf_matrix = confusion_matrix(YTest_nonCategory, YHat_nonCategory)
cm = cnf_matrix.astype('float') / (cnf_matrix.astype('float').sum(axis=1))[:, np.newaxis]
plt.figure(figsize=(20, 20))
plt.imshow(cnf_matrix, interpolation='nearest', cmap=plt.cm.Blues)
#plt.title('Confusion Matrix')
#plt.colorbar()
tick_marks = np.arange(len(classNames))
plt.xticks(tick_marks, classNames, rotation = 45, fontsize = 17)
plt.yticks(tick_marks, classNames, fontsize = 17)
fmt = '.2f'
thresh = cm.max() * 0.85
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black", fontsize = 17)
plt.ylabel('Ground Truth f0 Label', fontsize = 17)
plt.xlabel('Predicted f0 Label', fontsize = 17)
plt.tight_layout()
plt.savefig(nameModel+'_'+str(numModel)+'_ConfusionMatrix'+'.png')
return cm
def plotConfusionMatrixMT(model, nameModel, numModel,
XTest, YTest, classNames):
YHat = model.predict(XTest)[0]
YTestPoly = YTest[0]
YHat_nonCategory = [np.argmax(t) for t in YHat]
YTest_nonCategory = [np.argmax(t) for t in YTestPoly]
cnf_matrix = confusion_matrix(YTest_nonCategory, YHat_nonCategory)
cm = cnf_matrix.astype('float') / (cnf_matrix.astype('float').sum(axis=1))[:, np.newaxis]
plt.figure(figsize=(20, 20))
plt.imshow(cnf_matrix, interpolation='nearest', cmap=plt.cm.Blues)
#plt.title('Confusion Matrix')
#plt.colorbar()
tick_marks = np.arange(len(classNames))
plt.xticks(tick_marks, classNames, rotation = 45, fontsize = 17)
plt.yticks(tick_marks, classNames, fontsize = 17)
fmt = '.2f'
thresh = cm.max() * 0.85
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black", fontsize = 17)
plt.ylabel('Ground Truth f0 Label', fontsize = 17)
plt.xlabel('Predicted f0 Label', fontsize = 17)
plt.tight_layout()
plt.savefig(nameModel+'_'+str(numModel)+'_ConfusionMatrix'+'.png')
return cm
| 37.55414
| 142
| 0.620378
|
74c187eaaf50834930934613ffd8f6fd08df9468
| 2,768
|
py
|
Python
|
python3/koans/about_string_manipulation.py
|
mahletbogale/python_koans
|
61380577467de08c744c47af0efccb9459497f98
|
[
"MIT"
] | null | null | null |
python3/koans/about_string_manipulation.py
|
mahletbogale/python_koans
|
61380577467de08c744c47af0efccb9459497f98
|
[
"MIT"
] | null | null | null |
python3/koans/about_string_manipulation.py
|
mahletbogale/python_koans
|
61380577467de08c744c47af0efccb9459497f98
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual('The values are one and 2', string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual('The values are DOH, doh, doh and DOH!', string)
def test_any_python_expression_may_be_interpolated(self):
import math # import a standard python module with math functions
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5),
decimal_places)
self.assertEqual('The square root of 5 is 2.2361', string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual('let', string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("a", string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(97, ord('a'))
self.assertEqual(True, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertListEqual(['Sausage', 'Egg', 'Cheese'], words)
def test_strings_can_be_split_with_different_patterns(self):
import re #import python regular expression library
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertListEqual(["the","rain","in","spain"], words)
# Pattern is a Python regular expression pattern which matches ',' or ';'
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual(r'\n', string)
self.assertEqual(2, len(string))
# Useful in regular expressions, file paths, URLs, etc.
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual('Now is the time', ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual('Guido', 'guido'.capitalize())
self.assertEqual('GUIDO', 'guido'.upper())
self.assertEqual('timbot', 'TimBot'.lower())
self.assertEqual('Guido Van Rossum', 'guido van rossum'.title())
self.assertEqual('tOtAlLy AwEsOmE', 'ToTaLlY aWeSoMe'.swapcase())
| 36.906667
| 81
| 0.649566
|
92ff7140afb365b2d6bb9a1214ce332a6b8a6e32
| 387
|
py
|
Python
|
blender/arm/logicnode/variable/LN_color.py
|
onelsonic/armory
|
55cfead0844923d419d75bf4bd677ebed714b4b5
|
[
"Zlib"
] | 2,583
|
2016-07-27T08:25:47.000Z
|
2022-03-31T10:42:17.000Z
|
blender/arm/logicnode/variable/LN_color.py
|
N8n5h/armory
|
5b4d24f067a2354bafd3ab417bb8e30ee0c5aff8
|
[
"Zlib"
] | 2,122
|
2016-07-31T14:20:04.000Z
|
2022-03-31T20:44:14.000Z
|
blender/arm/logicnode/variable/LN_color.py
|
N8n5h/armory
|
5b4d24f067a2354bafd3ab417bb8e30ee0c5aff8
|
[
"Zlib"
] | 451
|
2016-08-12T05:52:58.000Z
|
2022-03-31T01:33:07.000Z
|
from arm.logicnode.arm_nodes import *
class ColorNode(ArmLogicTreeNode):
"""Stores the given color as a variable."""
bl_idname = 'LNColorNode'
bl_label = 'Color'
arm_version = 1
def arm_init(self, context):
self.add_input('ArmColorSocket', 'Color In', default_value=[1.0, 1.0, 1.0, 1.0])
self.add_output('ArmColorSocket', 'Color Out', is_var=True)
| 29.769231
| 88
| 0.669251
|
ced541c354c6043ad0d0cecb11d6ce920e80a9ea
| 242,200
|
py
|
Python
|
tensorflow/python/framework/ops.py
|
mckib2/tensorflow-icg
|
fede3707c8700a198c8a972978749e4a69bf9a81
|
[
"Apache-2.0"
] | 1
|
2020-07-29T14:42:30.000Z
|
2020-07-29T14:42:30.000Z
|
tensorflow/python/framework/ops.py
|
mckib2/tensorflow-icg
|
fede3707c8700a198c8a972978749e4a69bf9a81
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/framework/ops.py
|
mckib2/tensorflow-icg
|
fede3707c8700a198c8a972978749e4a69bf9a81
|
[
"Apache-2.0"
] | 1
|
2020-11-10T15:45:53.000Z
|
2020-11-10T15:45:53.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import re
import sys
import threading
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import memory
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
ag_ctx = LazyLoader(
"ag_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = True
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/ops_eager_execution",
"Whether ops.enable_eager_execution() is called.")
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE
# pylint: enable=protected-access
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _UserDeviceSpec(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
# NOTE(taylorrobie): This MUST be False. None signals a break in the
# device stack, so `is_null_merge` must be False for such a case to
# allow callers to safely skip over null merges without missing a None.
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
# We perform this check in __init__ because it is of non-trivial cost,
# and self.string_merge is typically called many times.
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def):
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator has already been overwritten,
or if operator is not allowed to be overwritten.
"""
existing = getattr(clazz_object, operator, None)
if existing is not None:
# Check to see if this is a default method-wrapper or slot wrapper which
# will be true for the comparison operators.
if not isinstance(existing, type(object.__lt__)):
raise ValueError("operator %s cannot be overwritten again on class %s." %
(operator, clazz_object))
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
_TENSOR_LIKE_TYPES = tuple()
def is_dense_tensor_like(t):
"""EXPERIMENTAL: Returns true if `t` implements the tensor interface.
See `register_dense_tensor_like_type()` for the current definition of a
"tensor-like type".
Args:
t: An object.
Returns:
True iff `t` is an instance of one of the registered "tensor-like" types.
"""
return isinstance(t, _TENSOR_LIKE_TYPES)
def register_dense_tensor_like_type(tensor_type):
"""EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface.
A "tensor-like type" can represent a single dense tensor, and implements
the `name` and `dtype` properties.
Args:
tensor_type: A type implementing the tensor interface.
Raises:
TypeError: If `tensor_type` does not implement the tensor interface.
"""
try:
if not isinstance(tensor_type.name, property):
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `name` property" %
tensor_type.__name__)
try:
if not isinstance(tensor_type.dtype, property):
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
except AttributeError:
raise TypeError("Type %s does not define a `dtype` property" %
tensor_type.__name__)
# We expect this list to be small, so choose quadratic complexity
# for registration, so that we have a tuple that can be used for
# more efficient `isinstance` checks later.
global _TENSOR_LIKE_TYPES
_TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type])
def uid():
"""A unique (within this program execution) integer."""
return c_api.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
# pylint: disable=protected-access
text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
# pylint: enable=protected-access
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
@tf_export(v1=["enable_tensor_equality"])
def enable_tensor_equality():
"""Compare Tensors with element-wise comparison and thus be unhashable.
Comparing tensors with element-wise allows comparisons such as
tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are
unhashable. Thus tensors can no longer be directly used in sets or as a key in
a dictionary.
"""
Tensor._USE_EQUALITY = True # pylint: disable=protected-access
@tf_export(v1=["disable_tensor_equality"])
def disable_tensor_equality():
"""Compare Tensors by their id and be hashable.
This is a legacy behaviour of TensorFlow and is highly discouraged.
"""
Tensor._USE_EQUALITY = False # pylint: disable=protected-access
@tf_export("Tensor")
class Tensor(_TensorLike):
"""Represents one of the outputs of an `Operation`.
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
TensorFlow `tf.compat.v1.Session`.
This class has two primary purposes:
1. A `Tensor` can be passed as an input to another `Operation`.
This builds a dataflow connection between operations, which
enables TensorFlow to execute an entire `Graph` that represents a
large, multi-step computation.
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
`tf.Session.run`.
`t.eval()` is a shortcut for calling
`tf.compat.v1.get_default_session().run(t)`.
In the following example, `c`, `d`, and `e` are symbolic `Tensor`
objects, whereas `result` is a numpy array that stores a concrete
value:
```python
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = tf.matmul(c, d)
# Construct a `Session` to execute the graph.
sess = tf.compat.v1.Session()
# Execute the graph and store the value that `e` represents in `result`.
result = sess.run(e)
```
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
# Whether to allow hashing or numpy-style equality
_USE_EQUALITY = tf2.enabled()
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % op)
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self._as_tf_output().
self._tf_output = None
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
self._id = uid()
self._name = None
@staticmethod
def _create_with_tf_output(op, value_index, dtype, tf_output):
ret = Tensor(op, value_index, dtype)
ret._tf_output = tf_output
return ret
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if self._name is None:
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
self._name = "%s:%d" % (self._op.name, self._value_index)
return self._name
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of this tensor.
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
`tf.TensorShape`
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
information without having to launch the graph in a session. This
can be used for debugging, and providing early error messages. For
example:
```python
c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
print(c.shape)
==> TensorShape([Dimension(2), Dimension(3)])
d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]])
print(d.shape)
==> TensorShape([Dimension(4), Dimension(2)])
# Raises a ValueError, because `c` and `d` do not have compatible
# inner dimensions.
e = tf.matmul(c, d)
f = tf.matmul(c, d, transpose_a=True, transpose_b=True)
print(f.shape)
==> TensorShape([Dimension(3), Dimension(4)])
```
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `Tensor.set_shape()` can be used to augment the
inferred shape.
Returns:
A `TensorShape` representing the shape of this tensor.
"""
if self._shape_val is None:
self._shape_val = self._c_api_shape()
return self._shape_val
def _get_input_ops_without_shapes(self, target_op):
"""Returns ops needing shape inference to compute target_op's shape."""
result = []
stack = [self._op]
visited = set()
while stack:
op = stack.pop()
if op in visited:
continue
result.append(op)
stack.extend(t.op for t in op.inputs if t._shape_val is None)
visited.add(op)
return result
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vector = [None if d == -1 else d for d in shape_vector]
return tensor_shape.TensorShape(shape_vector)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def _disallow_when_autograph_disabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph is disabled in this function."
" Try decorating it directly with @tf.function.".format(task))
def _disallow_when_autograph_enabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph did not convert this function. Try"
" decorating it directly with @tf.function.".format(task))
def _disallow_in_graph_mode(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed in Graph execution. Use Eager execution or decorate"
" this function with @tf.function.".format(task))
def _disallow_bool_casting(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled(
"using a `tf.Tensor` as a Python `bool`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled(
"using a `tf.Tensor` as a Python `bool`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("using a `tf.Tensor` as a Python `bool`")
def _disallow_iteration(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled("iterating over `tf.Tensor`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("iterating over `tf.Tensor`")
def __iter__(self):
if not context.executing_eagerly():
self._disallow_iteration()
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
for i in xrange(shape[0]):
yield self[i]
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
This method can be called multiple times, and will merge the given
`shape` with the current shape of this tensor. It can be used to
provide additional information about the shape of this tensor that
cannot be inferred from the graph alone. For example, this can be used
to provide additional information about the shapes of images:
```python
_, image_data = tf.compat.v1.TFRecordReader(...).read(...)
image = tf.image.decode_png(image_data, channels=3)
# The height and width dimensions of `image` are data dependent, and
# cannot be computed without executing the op.
print(image.shape)
==> TensorShape([Dimension(None), Dimension(None), Dimension(3)])
# We know that each image in this dataset is 28 x 28 pixels.
image.set_shape([28, 28, 3])
print(image.shape)
==> TensorShape([Dimension(28), Dimension(28), Dimension(3)])
```
NOTE: This shape is not enforced at runtime. Setting incorrect shapes can
result in inconsistencies between the statically-known graph and the runtime
value of tensors. For runtime validation of the shape, use `tf.ensure_shape`
instead.
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# Reset cached shape.
self._shape_val = None
# We want set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
c_api.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = c_api.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
# NOTE: Beyond preventing unnecessary (re-)allocation, the cached object
# also guarantees that a dictionary of tf_output objects will retain a
# deterministic (yet unsorted) order which prevents memory blowup in the
# cache of executor(s) stored for every session.
if self._tf_output is None:
self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)
return self._tf_output
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
g = getattr(self, "graph", None)
if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and
(g is None or g._building_function)): # pylint: disable=protected-access
raise TypeError("Tensor is unhashable if Tensor equality is enabled. "
"Instead, use tensor.experimental_ref() as the key.")
else:
return id(self)
def __copy__(self):
# TODO(b/77597810): get rid of Tensor copies.
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
def __array__(self):
raise NotImplementedError("Cannot convert a symbolic Tensor ({}) to a numpy"
" array.".format(self.name))
def __len__(self):
raise TypeError("len is not well defined for symbolic Tensors. ({}) "
"Please call `x.shape` rather than `len(x)` for "
"shape information.".format(self.name))
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (most commonly in an `if` or `while`
statement), in code that was not converted by AutoGraph. For example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
def experimental_ref(self):
# tf.Variable also has the same experimental_ref() API. If you update the
# documenation here, please update tf.Variable.experimental_ref() as well.
"""Returns a hashable reference object to this Tensor.
Warning: Experimental API that could be changed or removed.
The primary usecase for this API is to put tensors in a set/dictionary.
We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer
available starting Tensorflow 2.0.
```python
import tensorflow as tf
x = tf.constant(5)
y = tf.constant(10)
z = tf.constant(10)
# The followings will raise an exception starting 2.0
# TypeError: Tensor is unhashable if Tensor equality is enabled.
tensor_set = {x, y, z}
tensor_dict = {x: 'five', y: 'ten', z: 'ten'}
```
Instead, we can use `tensor.experimental_ref()`.
```python
tensor_set = {x.experimental_ref(),
y.experimental_ref(),
z.experimental_ref()}
print(x.experimental_ref() in tensor_set)
==> True
tensor_dict = {x.experimental_ref(): 'five',
y.experimental_ref(): 'ten',
z.experimental_ref(): 'ten'}
print(tensor_dict[y.experimental_ref()])
==> ten
```
Also, the reference object provides `.deref()` function that returns the
original Tensor.
```python
x = tf.constant(5)
print(x.experimental_ref().deref())
==> tf.Tensor(5, shape=(), dtype=int32)
```
"""
return object_identity.Reference(self)
# TODO(agarwal): consider getting rid of this.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
# __int__, __float__ and __index__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __int__(self):
return int(self._numpy())
def __long__(self):
return long(self._numpy())
def __float__(self):
return float(self._numpy())
def __index__(self):
maybe_arr = self._numpy()
if isinstance(maybe_arr, np.ndarray):
return maybe_arr.__index__()
return int(maybe_arr) # Must be a NumPy scalar.
def __bool__(self):
return bool(self._numpy())
__nonzero__ = __bool__
def __format__(self, format_spec):
return self._numpy().__format__(format_spec)
def __reduce__(self):
return convert_to_tensor, (self._numpy(),)
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % (
self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True))
def __len__(self):
"""Returns the length of the first dimension in the Tensor."""
if not self.shape.ndims:
raise TypeError("Scalar tensor has no `len()`")
return self._shape_tuple()[0]
def _numpy(self):
raise NotImplementedError()
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Returns a numpy array or a scalar with the same contents as the Tensor.
TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying
buffer but instead always explicitly copy? Note that currently it may or may
not copy based on whether the numpy data is properly aligned or not.
Returns:
A numpy array or a scalar. Numpy array may share memory with the
Tensor object. Any changes to one may be reflected in the other. A scalar
value is returned when self has rank 0.
Raises:
ValueError: if the type of this Tensor is not representable in numpy.
"""
maybe_arr = self._numpy() # pylint: disable=protected-access
return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
@property
def backing_device(self):
"""Returns the name of the device holding this tensor's memory.
`.backing_device` is usually the same as `.device`, which returns
the device on which the kernel of the operation that produced this tensor
ran. However, some operations can produce tensors on a different device
(e.g., an operation that executes on the GPU but produces output tensors
in host memory).
"""
raise NotImplementedError()
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _num_elements(self):
"""Number of elements of this Tensor.
Unlike regular Tensors, the number of elements is always known for
EagerTensors.
This is more performant than tensor.shape.num_elements
Returns:
Long - num elements in the tensor
"""
raise NotImplementedError()
def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name
raise NotImplementedError()
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
"""Copies tensor to dest device, but doesn't record the operation."""
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
new_tensor = self._copy_to_device(device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [
dresult._copy(device_name=self_device)
if hasattr(dresult, "_copy") else dresult
]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
@deprecation.deprecated(None, "Use tf.identity instead.")
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
@deprecation.deprecated(None, "Use tf.identity instead.")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Arguments:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?")
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase)
register_dense_tensor_like_type(Tensor)
@tf_export(v1=["convert_to_tensor"])
def convert_to_tensor(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
dtype_hint: same meaning as preferred_dtype, and overrides it.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
@tf_export("convert_to_tensor", v1=[])
def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return internal_convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=dtype_hint,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None,
accepted_result_types=(Tensor,)):
"""Implementation of the public convert_to_tensor."""
if isinstance(value, EagerTensor):
if ctx is None:
ctx = context.context()
if not ctx.executing_eagerly():
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, Tensor):
if dtype is not None and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
if preferred_dtype is not None:
preferred_dtype = dtypes.as_dtype(preferred_dtype)
for base_type, conversion_func in tensor_conversion_registry.get(type(value)):
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
# Could not coerce the conversion to use the preferred dtype.
pass
else:
if (ret is not NotImplemented and
ret.dtype.base_dtype != preferred_dtype.base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype, preferred_dtype.base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, accepted_result_types):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, type(value)))
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
if ctx is None:
ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_composite(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor` or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_composite(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_composite(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor`, or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, composite_tensor.CompositeTensor):
value_dtype = getattr(value, "dtype", None)
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
value,
dtype=dtype,
name=name,
as_ref=as_ref,
accepted_result_types=(Tensor, composite_tensor.CompositeTensor))
def internal_convert_n_to_tensor_or_composite(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor`, or objects that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `CompositeTensor`, and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_composite(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_composite(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor``, or objects that can be
consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor` and/or `CompositeTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_composite(
values=values, dtype=dtype, name=name, as_ref=False)
def _device_string(dev_spec):
if pydev.is_device_spec(dev_spec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
device: string, device, or function from NodeDef to string. Value for the
"device" attribute of the NodeDef proto.
attrs: Optional dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef()
node_def.op = compat.as_bytes(op_type)
node_def.name = compat.as_bytes(name)
if attrs is not None:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
if device is not None:
if callable(device):
node_def.device = device(node_def)
else:
node_def.device = _device_string(device)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
def _create_c_op(graph, node_def, inputs, control_inputs):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs, e.g. "int64 * N",
"list(int64)"). The length of the list should be equal to the number of
inputs specified by this operation's op def.
control_inputs: A list of `Operation`s to set as control dependencies.
Returns:
A wrapped TF_Operation*.
"""
# pylint: disable=protected-access
op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
c_api.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input])
else:
c_api.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
c_api.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized)
try:
c_op = c_api.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
`tf.matmul`)
or `tf.Graph.create_op`.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
as output.
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
`tf.Session.run`.
`op.run()` is a shortcut for calling
`tf.compat.v1.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for
attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and
`device`. The `input` attribute is irrelevant here as it will be
computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the `Tensors`
computed by this operation. The length of this list indicates the
number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a control
dependency.
input_types: List of `DType` objects representing the types of the tensors
accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x
in inputs]`. Operations that expect reference-typed inputs must specify
these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type
that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "SwigPyObject":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % node_def)
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % g)
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % inputs)
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % a)
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs], input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._id_value = self._graph._next_id()
self._original_op = original_op
self._traceback = tf_stack.extract_stack()
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# Initialize self._c_op.
if c_op:
self._c_op = c_op
op_def = g._get_op_def(c_api.TF_OperationOpType(c_op))
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
grouped_inputs = self._reconstruct_sequence_inputs(
op_def, inputs, node_def.attr)
self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
control_input_ops)
# pylint: enable=protected-access
self._is_stateful = op_def.is_stateful
# Initialize self._outputs.
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
self._outputs = []
for i in range(num_outputs):
tf_output = c_api_util.tf_output(self._c_op, i)
output_type = c_api.TF_OperationOutputType(tf_output)
tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access
self._outputs.append(tensor)
self._graph._add_op(self) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing()
def _control_flow_post_processing(self):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
"""
for input_tensor in self.inputs:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def _reconstruct_sequence_inputs(self, op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return c_api.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return c_api.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
"""Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
"""
return self._device_code_locations or []
@property
def _colocation_dict(self):
"""Code locations for colocation context managers active at op creation.
This property will return a dictionary for which the keys are nodes with
which this Operation is colocated, and for which the values are
traceable_stack.TraceableObject instances. The TraceableObject instances
record the location of the relevant colocation context manager but have the
"obj" field set to None to prevent leaking private data.
For example, suppose file_a contained these lines:
file_a.py:
14: node_a = tf.constant(3, name='NODE_A')
15: with tf.compat.v1.colocate_with(node_a):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the colocation context manager
would have these member values:
t_obj.obj -> None
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._colocation_dict would return the dictionary
{ 'NODE_A': t_obj }
Returns:
{str: traceable_stack.TraceableObject} as per this method's description,
above.
"""
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in c_api.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = c_api.TF_OperationNumOutputs(self._c_op)
output_types = [
c_api.TF_OperationOutputType(self._tf_output(i))
for i in xrange(num_outputs)
]
# In all the tests we have output_types that are passed into
# Operation.__init__ are a list of ints (which is illegal according
# to the docstring), but input_types are instances of DType.
# This extra assert is to catch if we ever use DType for output_types.
if output_types:
assert isinstance(output_types[0], int)
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = c_api.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = c_api.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._set_device_from_string(compat.as_str(_device_string(device)))
def _set_device_from_string(self, device_str):
"""Fast path to set device if the type is known to be a string.
This function is called frequently enough during graph construction that
there are non-trivial performance gains if the caller can guarantee that
the specified device is already a string.
Args:
device_str: A string specifying where to place this op.
"""
c_api.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
device_str)
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_while_inputs(self, tensors):
"""See AddWhileInputHack in python_api.h.
NOTE: This is for TF internal use only. Please don't use it.
Args:
tensors: list of Tensors
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
for tensor in tensors:
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
c_api.AddWhileInputHack(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._c_op)
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def _add_outputs(self, types, shapes):
"""Adds new Tensors to self.outputs.
Note: this is generally unsafe to use. This is used in certain situations in
conjunction with _set_type_list_attr.
Arguments:
types: list of DTypes
shapes: list of TensorShapes
"""
assert len(types) == len(shapes)
orig_num_outputs = len(self.outputs)
for i in range(len(types)):
t = Tensor(self, orig_num_outputs + i, types[i])
self._outputs.append(t)
t.set_shape(shapes[i])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
class _InputList(object):
"""Immutable input list wrapper."""
def __init__(self, inputs):
self._inputs = inputs
def __iter__(self):
return iter(self._inputs)
def __len__(self):
return len(self._inputs)
def __bool__(self):
return bool(self._inputs)
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __getitem__(self, i):
return self._inputs[i]
@property
def inputs(self):
"""The list of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
tf_outputs = c_api.GetOperationInputs(self._c_op)
# pylint: disable=protected-access
retval = [
self.graph._get_tensor_by_tf_output(tf_output)
for tf_output in tf_outputs
]
# pylint: enable=protected-access
self._inputs_val = Operation._InputList(retval)
return self._inputs_val
@property
def _inputs(self):
logging.warning("Operation._inputs is private, use Operation.inputs "
"instead. Operation._inputs will eventually be removed.")
return self.inputs
@_inputs.setter
def _inputs(self, value):
raise ValueError("Cannot assign _inputs")
@property
def _input_types(self):
num_inputs = c_api.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@_input_types.setter
def _input_types(self, value):
raise ValueError("Cannot assign _input_types")
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op))
for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_inputs(self):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
return self.control_inputs
@_control_inputs.setter
def _control_inputs(self, value):
logging.warning("Operation._control_inputs is private, use "
"Operation.control_inputs instead. "
"Operation._control_inputs will eventually be removed.")
# Copy value because it may be self._control_inputs_val (in particular if
# this is called from self._control_inputs += ...), and we don't want to
# clear value below.
value = copy.copy(value)
self._remove_all_control_inputs()
self._add_control_inputs(value)
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return c_api.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationToNodeDef(self._c_op, buf)
data = c_api.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def _node_def(self):
logging.warning("Operation._node_def is private, use Operation.node_def "
"instead. Operation._node_def will eventually be removed.")
return self.node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def _op_def(self):
logging.warning("Operation._op_def is private, use Operation.op_def "
"instead. Operation._op_def will eventually be removed.")
return self.op_def
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return tf_stack.convert_stack(self._traceback)
@property
def traceback_with_start_lines(self):
"""Same as traceback but includes start line of function definition.
Returns:
A list of 5-tuples (filename, lineno, name, code, func_start_lineno).
"""
return tf_stack.convert_stack(
self._traceback, include_func_start_lineno=True)
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = c_api.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
# pylint: disable=protected-access
c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf)
# pylint: enable=protected-access
finally:
c_api.TF_DeleteBuffer(buf)
def _set_func_attr(self, attr_name, func_name):
"""Private method used to set a function attribute in the node_def."""
func = attr_value_pb2.NameAttrList(name=func_name)
self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))
def _set_func_list_attr(self, attr_name, func_names):
"""Private method used to set a list(function) attribute in the node_def."""
funcs = [attr_value_pb2.NameAttrList(name=func_name)
for func_name in func_names]
funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))
def _set_type_list_attr(self, attr_name, types):
"""Private method used to set a list(type) attribute in the node_def."""
if not types:
return
if isinstance(types[0], dtypes.DType):
types = [dt.as_datatype_enum for dt in types]
types_list = attr_value_pb2.AttrValue.ListValue(type=types)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))
def _set_shape_list_attr(self, attr_name, shapes):
"""Private method used to set a list(shape) attribute in the node_def."""
shapes = [s.as_proto() for s in shapes]
shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))
def _clear_attr(self, attr_name):
"""Private method used to clear an attribute in the node_def."""
# pylint: disable=protected-access
c_api.ClearAttr(self._graph._c_graph, self._c_op, attr_name)
# pylint: enable=protected-access
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func")
try:
with c_api_util.tf_buffer() as buf:
c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = c_api.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
oneof_value = x.WhichOneof("value")
if oneof_value is None:
return []
if oneof_value == "list":
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(t) for t in x.list.type]
else:
return list(getattr(x.list, f))
return []
if oneof_value == "type":
return dtypes.as_dtype(x.type)
assert oneof_value in fields, "Unsupported field type in " + str(x)
return getattr(x, oneof_value)
def _get_attr_type(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
try:
dtype_enum = c_api.TF_OperationGetAttrType(self._c_op, name)
return _DTYPES_INTERN_TABLE[dtype_enum]
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.no_gradient("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient = no_gradient
NotDifferentiable = no_gradient
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
_shape_registry = registry.Registry("shape functions")
_default_shape_function_registry = registry.Registry("default shape functions")
# These are set to common_shapes.call_cpp_shape_fn by op generated code
# (generated by python_op_gen.cc).
# It is set outside ops.py to avoid a circular dependency.
_call_cpp_shape_fn = None
_call_cpp_shape_fn_and_require_op = None
def _set_call_cpp_shape_fn(call_cpp_shape_fn):
"""Sets default shape fns from passed common_shapes.call_cpp_shape_fn."""
global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op
if _call_cpp_shape_fn:
return # already registered
def call_without_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=False)
_call_cpp_shape_fn = call_without_requiring
def call_with_requiring(op):
return call_cpp_shape_fn(op, require_shape_fn=True)
_call_cpp_shape_fn_and_require_op = call_with_requiring
class RegisterShape(object):
"""No longer used.
Was: A decorator for registering a shape function.
Shape functions must now be registered via the SetShapeFn on the
original Op specification in C++.
"""
def __init__(self, op_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers "f" as the shape function for "op_type"."""
if f is None:
assert _call_cpp_shape_fn
# None is a special "weak" value that provides a default shape function,
# and can be overridden by a non-None registration.
try:
_default_shape_function_registry.register(_call_cpp_shape_fn,
self._op_type)
except KeyError:
# Ignore duplicate registrations of the weak value. This can
# occur if the op library input to wrapper generation
# inadvertently links in one or more of the standard op
# libraries.
pass
else:
_shape_registry.register(f, self._op_type)
return f
def set_shape_and_handle_data_for_outputs(_):
"""No op. TODO(b/74620627): Remove this."""
pass
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
`tf.Operation` objects,
which represent units of computation; and
`tf.Tensor` objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
`tf.compat.v1.get_default_graph`.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
```python
c = tf.constant(4.0)
assert c.graph is tf.compat.v1.get_default_graph()
```
Another typical usage involves the
`tf.Graph.as_default`
context manager, which overrides the current default graph for the
lifetime of the context:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors.
# Thread-safety is provided on a best-effort basis to support buggy
# programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
# The group lock synchronizes Session.run calls with methods that create
# and mutate ops (e.g. Graph.create_op()). This synchronization is
# necessary because it's illegal to modify an operation after it's been run.
# The group lock allows any number of threads to mutate ops at the same time
# but if any modification is going on, all Session.run calls have to wait.
# Similarly, if one or more Session.run calls are going on, all mutate ops
# have to wait until all Session.run calls have finished.
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = {} # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = {} # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._device_function_stack is used instead.
self._graph_device_function_stack = traceable_stack.TraceableStack()
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = object_identity.ObjectIdentitySet()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None. This is required only for
# backward compatibility with Estimator and optimizer V1 use cases.
self._last_loss_reduction = None
# Flag that is used to indicate whether loss has been scaled by optimizer.
# If this flag has been set, then estimator uses it to scale losss back
# before reporting. This is required only for backward compatibility with
# Estimator and optimizer V1 use cases.
self._is_loss_scaled_by_optimizer = False
self._container = ""
self._registered_ops = op_def_registry.get_registered_ops()
# Set to True if this graph is being built in an
# AutomaticControlDependencies context.
self._add_control_dependencies = False
# Cache for OpDef protobufs retrieved via the C API.
self._op_def_cache = {}
# Cache for constant results of `broadcast_gradient_args()`. The keys are
# tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the
# values are tuples of reduction indices: (rx, ry).
self._bcast_grad_args_cache = {}
# Cache for constant results of `reduced_shape()`. The keys are pairs of
# tuples: (input_shape_tuple, reduction_indices_tuple), and the values
# are pairs of tuples: (output_shape_kept_dims, tile_scaling).
self._reduced_shape_cache = {}
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
c_api.SetRequireShapeInferenceFns(self._c_graph, False)
if tf2.enabled():
self.switch_to_thread_local()
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator, priority=100):
"""Scope which defines a variable creation function.
Args:
creator: A callable taking `next_creator` and `kwargs`. See the
`tf.variable_creator_scope` docstring.
priority: Creators with a higher `priority` are called first. Within the
same priority, creators are called inner-to-outer.
Yields:
`_variable_creator_scope` is a context manager with a side effect, but
doesn't return a value.
Raises:
RuntimeError: If variable creator scopes are not properly nested.
"""
# This step keeps a reference to the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = self._variable_creator_stack
new = list(old)
new.append((priority, creator))
# Sorting is stable, so we'll put higher-priority creators later in the list
# but otherwise maintain registration order.
new.sort(key=lambda item: item[0])
self._thread_local._variable_creator_stack = new # pylint: disable=protected-access
try:
yield
finally:
if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access
raise RuntimeError(
"Exiting variable_creator_scope without proper nesting.")
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
# This previously returned a copy of the stack instead of the stack itself,
# to guard against accidental mutation. Consider, however, code that wants
# to save and restore the variable creator stack:
# def f():
# original_stack = graph._variable_creator_stack
# graph._variable_creator_stack = new_stack
# ... # Some code
# graph._variable_creator_stack = original_stack
#
# And lets say you have some code that calls this function with some
# variable_creator:
# def g():
# with variable_scope.variable_creator_scope(creator):
# f()
# When exiting the variable creator scope, it would see a different stack
# object than it expected leading to a "Exiting variable_creator_scope
# without proper nesting" error.
return self._thread_local._variable_creator_stack # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op):
"""Adds 'op' to the graph.
Args:
op: the Operator or Tensor to add.
Raises:
TypeError: if op is not an Operation or Tensor.
ValueError: if the op.name or op._id are already used.
"""
self._check_not_finalized()
if not isinstance(op, (Tensor, Operation)):
raise TypeError("op must be a Tensor or Operation: %s" % op)
with self._lock:
# pylint: disable=protected-access
if op._id in self._nodes_by_id:
raise ValueError("cannot add an op with id %d as it already "
"exists in the graph" % op._id)
if op.name in self._nodes_by_name:
raise ValueError("cannot add op with name %s as that name "
"is already used" % op.name)
self._nodes_by_id[op._id] = op
self._nodes_by_name[op.name] = op
self._version = max(self._version, op._id)
# pylint: enable=protected-access
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphVersions(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a `tf.compat.v1.train.QueueRunner`.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`.
Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
c_api.TF_GraphToGraphDef(self._c_graph, buf)
data = c_api.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
for function_def in graph.library.function:
defined_function = self._functions[function_def.signature.name]
try:
func_graph = defined_function.graph
except AttributeError:
# _DefinedFunction doesn't have a graph, _EagerDefinedFunction
# does. Both rely on ops.py, so we can't really isinstance check
# them.
continue
input_shapes = function_def.attr["_input_shapes"]
try:
func_graph_inputs = func_graph.inputs
except AttributeError:
continue
for input_tensor in func_graph_inputs:
if input_tensor.dtype == dtypes.resource:
# TODO(allenl): Save and restore handle data, then save the
# resource placeholder's shape. Right now some shape functions get
# confused if we set the shape of the resource placeholder (to a
# scalar of course) and there isn't any handle data.
input_shapes.list.shape.add().CopyFrom(
tensor_shape.TensorShape(None).as_proto())
else:
input_shapes.list.shape.add().CopyFrom(
input_tensor.get_shape().as_proto())
for node in function_def.node_def:
try:
op = func_graph.get_operation_by_name(node.name)
except KeyError:
continue
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return compat.as_str(name) in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(compat.as_str(name), None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
gradient = (
function._grad_func._c_func.func if function._grad_func else None)
c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient)
# pylint: enable=protected-access
self._functions[compat.as_str(name)] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
return self._create_op_internal(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Creates an `Operation` in this graph.
Implements `Graph.create_op()` without the overhead of the deprecation
wrapper.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, device=None, attrs=attrs)
input_ops = set([t.op for t in inputs])
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# Snapshot the colocation stack metadata before we might generate error
# messages using it. Note that this snapshot depends on the actual stack
# and is independent of the op's _class attribute.
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# pylint: disable=protected-access
op._set_device(colocation_op.device)
# pylint: enable=protected-access
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr(
"_class",
attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op._is_stateful: # pylint: disable=protected-access
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed to
compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can
also be any object with an `_as_graph_element()` method that returns a
value of one of these types. Note: `_as_graph_element` will be called
inside the graph's lock and so may not modify the graph.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." %
(repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." %
(type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friedly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = c_api.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
def _next_id(self):
"""Id for next Operation instance. Also increments the internal id."""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
return self._next_id_counter
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
# NOTE: No locking is required because the lookup and insertion operations
# on Python dictionaries are atomic.
try:
return self._op_def_cache[type]
except KeyError:
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf)
# pylint: enable=protected-access
data = c_api.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
self._op_def_cache[type] = op_def
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly.
Use this method with the `with` keyword to specify that ops created within
the scope of a block should be added to this graph. In this case, once
the scope of the `with` is exited, the previous default graph is set again
as default. There is a stack, so it's ok to have multiple nested levels
of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
try:
if regex.match(item.name):
c.append(item)
except AttributeError:
# Collection items with no name are ignored.
pass
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i - 1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.compat.v1.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None and self._control_flow_context is not None:
self._control_flow_context.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
self._control_flow_context.ExitGradientColocation(op, gradient_uid)
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within the
context, rather than applying all colocation properties on the stack.
If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
op = _op_to_colocate_with(op, self)
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# offset refers to the stack frame used for storing code location.
# We use 4, the sum of 1 to use our caller's stack frame and 3
# to jump over layers of context managers above us.
self._colocation_stack.push_obj(op, offset=4)
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
"""Add device to stack manually, separate from a context manager."""
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in the
context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If device scopes are not properly nested.
"""
self._add_device_to_stack(device_name_or_function, offset=2)
old_top_of_stack = self._device_function_stack.peek_top_obj()
try:
yield
finally:
new_top_of_stack = self._device_function_stack.peek_top_obj()
if old_top_of_stack is not new_top_of_stack:
raise RuntimeError("Exiting device scope without proper scope nesting.")
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
# pylint: disable=protected-access
prior_device_string = None
for device_spec in self._device_function_stack.peek_objs():
if device_spec.is_null_merge:
continue
if device_spec.function is None:
break
device_string = device_spec.string_merge(op)
# Take advantage of the fact that None is a singleton and Python interns
# strings, since identity checks are faster than equality checks.
if device_string is not prior_device_string:
op._set_device_from_string(device_string)
prior_device_string = device_string
op._device_code_locations = self._snapshot_device_function_stack_metadata()
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.queue.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.queue.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.queue.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition to the
current control dependencies. None to indicate that the dependencies
should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
if isinstance(op, Tensor):
op = op.experimental_ref()
self._seen_nodes.add(op)
def op_in_group(self, op):
if isinstance(op, Tensor):
op = op.experimental_ref()
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend([c for c in controller.control_inputs if c not in input_ops])
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the
context. Can also be `None` to clear the control dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
# The hasattr(handle) is designed to match ResourceVariables. This is so
# control dependencies on a variable or on an unread variable don't
# trigger reads.
if (isinstance(c, IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to AttrValue protocol
buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to kernel
label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op type
strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
"""Return device function stack as a list of TraceableObjects.
Returns:
[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj
member is a displayable name for the user's argument to Graph.device, and
the filename and lineno members point to the code location where
Graph.device was called directly or indirectly by the user.
"""
snapshot = []
for obj in self._device_function_stack.peek_traceable_objs():
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
"""Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
# pylint: enable=protected-access
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
"""Return colocation stack metadata as a dictionary."""
return {
traceable_obj.obj.name: traceable_obj.copy_metadata()
for traceable_obj in self._colocation_stack.peek_traceable_objs()
}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._colocation_stack = colocation_stack
# pylint: enable=protected-access
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
"""A stack to maintain distribution strategy context for each thread."""
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
@property
def _global_distribute_strategy_scope(self):
"""For implementing `tf.distribute.set_strategy()`."""
if not hasattr(self._thread_local, "distribute_strategy_scope"):
self._thread_local.distribute_strategy_scope = None
return self._thread_local.distribute_strategy_scope
@_global_distribute_strategy_scope.setter
def _global_distribute_strategy_scope(self, distribute_strategy_scope):
self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)
@property
def _auto_cast_variable_read_dtype(self):
"""The dtype that instances of `AutoCastVariable` will be casted to.
This is None if `AutoCastVariables` should not be casted.
See `AutoCastVariable` for more information.
Returns:
The dtype that instances of `AutoCastVariable` will be casted to.
"""
if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"):
self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access
return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access
@_auto_cast_variable_read_dtype.setter
def _auto_cast_variable_read_dtype(self, dtype):
if dtype:
dtype = dtypes.as_dtype(dtype)
self._thread_local._auto_cast_variable_read_dtype = dtype # pylint: disable=protected-access
@tf_contextlib.contextmanager
def _enable_auto_casting_variables(self, dtype):
"""Context manager to automatically cast AutoCastVariables.
If an AutoCastVariable `var` is used under this context manager, it will be
casted to `dtype` before being used.
See `AutoCastVariable` for more information.
Args:
dtype: The dtype that AutoCastVariables should be casted to.
Yields:
Nothing.
"""
prev_read_dtype = self._auto_cast_variable_read_dtype
try:
self._auto_cast_variable_read_dtype = dtype
yield
finally:
self._auto_cast_variable_read_dtype = prev_read_dtype
def _mutation_lock(self):
"""Returns a lock to guard code that creates & mutates ops.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
"""Returns a lock to guard code for Session.run.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export(v1=["device"])
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See `tf.Graph.device` for more details.
Args:
device_name_or_function: The device name or function to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
elif executing_eagerly_outside_functions():
@tf_contextlib.contextmanager
def combined(device_name_or_function):
with get_default_graph().device(device_name_or_function):
if not callable(device_name_or_function):
with context.device(device_name_or_function):
yield
else:
yield
return combined(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("device", v1=[])
def device_v2(device_name):
"""Specifies the device for ops created/executed in this context.
`device_name` can be fully specified, as in "/job:worker/task:1/device:cpu:0",
or partially specified, containing only a subset of the "/"-separated
fields. Any fields which are specified override device annotations from outer
scopes. For example:
```python
with tf.device('/job:foo'):
# ops created here have devices with /job:foo
with tf.device('/job:bar/task:0/device:gpu:2'):
# ops created here have the fully specified device above
with tf.device('/device:gpu:1'):
# ops created here have the device '/job:foo/device:gpu:1'
```
Args:
device_name: The device name to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If a function is passed in.
"""
if callable(device_name):
raise RuntimeError("tf.device does not support functions.")
return device(device_name)
@tf_export(v1=["container"])
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
if not hasattr(op, "device"):
op = internal_convert_to_tensor_or_indexed_slices(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
# Internal interface to colocate_with. colocate_with has been deprecated from
# public API. There are still a few internal uses of colocate_with. Add internal
# only API for those uses to avoid deprecation warning.
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@deprecation.deprecated(
date=None, instructions="Colocations handled automatically by placer.")
@tf_export(v1=["colocate_with"])
def _colocate_with(op, ignore_existing=False):
return colocate_with(op, ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See `tf.Graph.control_dependencies`
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the context.
Can also be `None` to clear the control dependencies. If eager execution
is enabled, any callable object in the `control_inputs` list will be
called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Excute any pending callables.
for control in control_inputs:
if callable(control):
control()
return NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if len(self.stack) >= 1 else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
ret = super(_DefaultGraphStack, self).get_default()
if ret is None:
ret = self._GetGlobalDefaultGraph()
return ret
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# Shared helper used in init_scope and executing_eagerly_outside_functions
# to obtain the outermost context that is not building a function, and the
# innermost non empty device stack.
def _get_outer_context_and_inner_device_stack():
"""Get the outermost context not building a function."""
default_graph = get_default_graph()
outer_context = None
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
return outer_context, innermost_nonempty_device_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
When eager execution is enabled, code inside an init_scope block runs with
eager execution enabled even when defining graph functions via
tf.contrib.eager.defun. For example:
```python
tf.compat.v1.enable_eager_execution()
@tf.contrib.eager.defun
def func():
# A defun-decorated function constructs TensorFlow graphs,
# it does not execute eagerly.
assert not tf.executing_eagerly()
with tf.init_scope():
# Initialization runs with eager execution enabled
assert tf.executing_eagerly()
```
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
scope = get_default_graph().get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
outer_context, innermost_nonempty_device_stack = (
_get_outer_context_and_inner_device_stack())
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(scope), control_dependencies(
None), tape.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
# It is currently not possible to have a device function in V2,
# but in V1 we are unable to apply device functions in eager mode.
# This means that we will silently skip some of the entries on the
# device stack in V1 + eager mode.
with context_manager(context_manager_input):
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
def executing_eagerly_outside_functions():
"""Returns True if executing eagerly, even if inside a graph function."""
if context.executing_eagerly():
return True
else:
outer_context, _ = _get_outer_context_and_inner_device_stack()
with outer_context():
return context.executing_eagerly()
def inside_function():
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)
and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.compat.v1.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the
environment in which operations are executed. Note that
`tf.compat.v1.ConfigProto` is also used to configure graph execution (via
`tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`
are not implemented (or are irrelevant) when eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will
be picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
_api_usage_gauge.get_cell().set(True)
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution():
"""Disables eager execution.
This function can only be called before any Graphs, Ops, or Tensors have been
created. It can be used at the beginning of the program for complex migration
projects from TensorFlow 1.x to 2.x.
"""
_api_usage_gauge.get_cell().set(False)
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False # pylint: disable=protected-access
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
# pylint: disable=protected-access
with context._context_lock:
if context._context is None:
context._set_context_locked(context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def))
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
# We already created everything, so update the thread local data.
context._context._thread_local_data.is_eager = True
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will
result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
if original_item.graph is not item.graph:
raise ValueError("%s must be from the same graph as %s." %
(item, original_item))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
current_default_graph = get_default_graph()
if current_default_graph.building_function:
return current_default_graph
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % graph)
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, _TensorLike)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = graph_element.graph
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or current_default_graph
@tf_export(v1=["GraphKeys"])
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
`tf.compat.v1.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
`tf.compat.v1.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
`tf.compat.v1.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
`tf.compat.v1.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
`tf.compat.v1.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
@deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls): # pylint: disable=no-self-argument
return cls.GLOBAL_VARIABLES
def dismantle_graph(graph):
"""Cleans up reference cycles from a `Graph`.
Helpful for making sure the garbage collector doesn't need to run after a
temporary `Graph` is no longer needed.
Args:
graph: A `Graph` object to destroy. Neither it nor any of its ops are usable
after this function runs.
"""
memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access
# Now clean up Operation<->Graph reference cycles by clearing all of the
# attributes for the Graph and its ops.
graph_operations = graph.get_operations()
for op in graph_operations:
op.__dict__ = {}
graph.__dict__ = {}
@tf_export(v1=["add_to_collection"])
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export(v1=["add_to_collections"])
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See `tf.Graph.add_to_collections`
for more details.
Args:
names: The key for the collections. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collections. @compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export(v1=["get_collection_ref"])
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See `tf.Graph.get_collection_ref`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export(v1=["get_collection"])
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See `tf.Graph.get_collection`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items without
a `name` attribute are never returned if a scope is supplied and the
choice or `re.match` means that a `scope` without special tokens filters
by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["name_scope"])
class name_scope(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
if not (default_name is None or isinstance(default_name, six.string_types)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
self._ctx = context.context()
self._in_eager_mode = self._ctx.executing_eagerly()
self._has_symbolic_input_in_eager = False
if self._values and self._in_eager_mode:
# The presence of a graph tensor in `self._values` overrides the context.
for value in self._values:
if hasattr(value, "graph"):
self._has_symbolic_input_in_eager = True
self._name_scope = value.graph.name_scope(self._name)
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._has_symbolic_input_in_eager:
return self._name_scope.__enter__()
if self._in_eager_mode:
scope_name, self._old_name = enter_eager_name_scope(self._ctx, self._name)
return scope_name
else:
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
g = get_default_graph()
if self._values and not g.building_function:
# Specialize based on the knowledge that `_get_graph_from_inputs()`
# ignores `inputs` when building a function.
g_from_inputs = _get_graph_from_inputs(self._values)
if g_from_inputs is not g:
g = g_from_inputs
self._g_manager = g.as_default()
self._g_manager.__enter__()
else:
self._g_manager = None
else:
self._g_manager = None
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
if self._g_manager is not None:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, type_arg, value_arg, traceback_arg):
if self._has_symbolic_input_in_eager:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
elif self._in_eager_mode:
self._ctx.scope_name = self._old_name
else:
self._name_scope.__exit__(type_arg, value_arg, traceback_arg)
if self._g_manager is not None:
self._g_manager.__exit__(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def enter_eager_name_scope(ctx, name):
"""Updates the eager context to enter the given name scope."""
old_name = ctx.scope_name
if not name:
scope_name = ""
else:
if name.endswith("/"):
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
scope_name = name
else:
scope_name = name + "/"
if old_name:
scope_name = old_name + scope_name
ctx.scope_name = scope_name
return scope_name, old_name
@tf_export("name_scope", v1=[])
class name_scope_v2(name_scope):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
If the scope name already exists, the name will be made unique by appending
`_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`,
etc.
"""
def __init__(self, name):
"""Initialize the context manager.
Args:
name: The prefix to use on all names created within the name scope.
Raises:
ValueError: If name is None, or not a string.
"""
if name is None or not isinstance(name, six.string_types):
raise ValueError("name for name_scope must be a string.")
self._name = name
self._exit_fns = []
@property
def name(self):
return self._name
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
ctx = context.context()
if ctx.executing_eagerly():
scope_name, old_scope_name = enter_eager_name_scope(ctx, self._name)
self._exit_fns.append(
lambda *a: setattr(ctx, "scope_name", old_scope_name))
else:
scope = get_default_graph().name_scope(self._name)
scope_name = scope.__enter__()
self._exit_fns.append(scope.__exit__)
return scope_name
def __exit__(self, type_arg, value_arg, traceback_arg):
exit_fn = self._exit_fns.pop()
exit_fn(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export(v1=["op_scope"])
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _operation_conversion_error(op, dtype=None, name=None, as_ref=False):
"""Produce a nice error if someone converts an Operation to a Tensor."""
raise TypeError(("Can't convert Operation '%s' to Tensor "
"(target dtype=%r, name=%r, as_ref=%r)") %
(op.name, dtype, name, as_ref))
def _op_to_colocate_with(v, graph):
"""Operation object corresponding to v to use for colocation constraints."""
if v is None:
return None
if isinstance(v, Operation):
return v
# We always want to colocate with the reference op.
# When 'v' is a ResourceVariable, the reference op is the handle creating op.
#
# What this should be is:
# if isinstance(v, ResourceVariable):
# return v.handle.op
# However, that would require a circular import dependency.
# As of October 2018, there were attempts underway to remove
# colocation constraints altogether. Assuming that will
# happen soon, perhaps this hack to work around the circular
# import dependency is acceptable.
if hasattr(v, "handle") and hasattr(v.handle, "op") and isinstance(
v.handle.op, Operation):
if graph.building_function:
return graph.capture(v.handle).op
else:
return v.handle.op
return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op
def _is_keras_symbolic_tensor(x):
return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph"
tensor_conversion_registry.register_tensor_conversion_function(
Operation, _operation_conversion_error)
# These symbols were originally defined in this module; import them for
# backwards compatibility until all references have been updated to access
# them from the indexed_slices.py module.
IndexedSlices = indexed_slices.IndexedSlices
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
convert_to_tensor_or_indexed_slices = \
indexed_slices.convert_to_tensor_or_indexed_slices
convert_n_to_tensor_or_indexed_slices = \
indexed_slices.convert_n_to_tensor_or_indexed_slices
internal_convert_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_to_tensor_or_indexed_slices
internal_convert_n_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_n_to_tensor_or_indexed_slices
register_tensor_conversion_function = \
tensor_conversion_registry.register_tensor_conversion_function
| 36.475904
| 115
| 0.69192
|
7a96b61c583030bfa7d6ec7318df4537be72b4d1
| 1,549
|
py
|
Python
|
tests/records/dumpers/test_pids_dumper.py
|
inveniosoftware/invenio-datacite
|
d25e3670b74f132390fc42e5647765ae5c605ef3
|
[
"MIT"
] | 10
|
2020-01-17T10:13:09.000Z
|
2022-03-17T10:14:41.000Z
|
tests/records/dumpers/test_pids_dumper.py
|
inveniosoftware/invenio-datacite
|
d25e3670b74f132390fc42e5647765ae5c605ef3
|
[
"MIT"
] | 570
|
2019-08-15T16:35:25.000Z
|
2022-03-31T13:46:17.000Z
|
tests/records/dumpers/test_pids_dumper.py
|
inveniosoftware/invenio-datacite
|
d25e3670b74f132390fc42e5647765ae5c605ef3
|
[
"MIT"
] | 57
|
2019-09-04T09:25:29.000Z
|
2022-03-30T19:32:55.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Module tests."""
from invenio_records.dumpers import ElasticsearchDumper
from invenio_rdm_records.records import RDMRecord
from invenio_rdm_records.records.api import RDMParent
from invenio_rdm_records.records.dumpers import PIDsDumperExt
def test_esdumper_with_externalpidsext(app, db, minimal_record, location):
# Create a simple extension that adds a computed field.
dumper = ElasticsearchDumper(
extensions=[PIDsDumperExt()]
)
minimal_record["pids"] = {
"doi": {
"identifier": "10.5281/zenodo.1234",
"provider": "datacite",
"client": "zenodo"
},
"handle": {
"identifier": "9.12314",
"provider": "cern-handle",
"client": "zenodo"
}
}
# Create the record
record = RDMRecord.create(minimal_record, parent=RDMParent.create({}))
db.session.commit()
# Dump it
dump = record.dumps(dumper=dumper)
dumped_pids = dump["pids"]
for dumped_pid in dumped_pids:
pid_attrs = dumped_pid.keys()
assert "scheme" in pid_attrs
assert "identifier" in pid_attrs
assert "provider" in pid_attrs
assert "client" in pid_attrs
# Load it
new_record = RDMRecord.loads(dump, loader=dumper)
assert minimal_record["pids"] == new_record["pids"]
| 28.685185
| 77
| 0.652679
|
413b5d7172b204633ad4a243279055d7e40d1bc1
| 292
|
py
|
Python
|
docs/report/cloudmesh-openapi/tests/add-json/add.py
|
rickotten/cybertraining-dsc.github.io
|
c8ea59be4f09fd543040ba0908af118df5820a70
|
[
"Apache-2.0"
] | 7
|
2020-02-29T14:53:19.000Z
|
2021-01-17T17:08:44.000Z
|
docs/report/cloudmesh-openapi/tests/add-json/add.py
|
rickotten/cybertraining-dsc.github.io
|
c8ea59be4f09fd543040ba0908af118df5820a70
|
[
"Apache-2.0"
] | 27
|
2020-02-29T13:38:11.000Z
|
2020-09-02T20:24:59.000Z
|
docs/report/cloudmesh-openapi/tests/add-json/add.py
|
rickotten/cybertraining-dsc.github.io
|
c8ea59be4f09fd543040ba0908af118df5820a70
|
[
"Apache-2.0"
] | 6
|
2020-03-02T17:09:14.000Z
|
2020-10-30T22:48:01.000Z
|
from flask import jsonify
def add(x: float, y: float) -> str:
"""
adding float and float.
:param x: x value
:type x: float
:param y: y value
:type y: float
:return: result
:return type: float
"""
result = {"result": x + y}
return jsonify(result)
| 18.25
| 35
| 0.568493
|
cfb9c9893f84ab9eeab000956d10b0d8efbfbb9d
| 8,298
|
py
|
Python
|
tefla/da/iterator.py
|
SiddhantKapil/tefla
|
cff18fbaf5cde4ef55643978593cbcfcb35b3b37
|
[
"MIT"
] | 3
|
2017-02-26T14:35:03.000Z
|
2020-04-26T23:06:36.000Z
|
tefla/da/iterator.py
|
SiddhantKapil/tefla
|
cff18fbaf5cde4ef55643978593cbcfcb35b3b37
|
[
"MIT"
] | null | null | null |
tefla/da/iterator.py
|
SiddhantKapil/tefla
|
cff18fbaf5cde4ef55643978593cbcfcb35b3b37
|
[
"MIT"
] | 1
|
2020-05-19T19:18:08.000Z
|
2020-05-19T19:18:08.000Z
|
from __future__ import division, print_function, absolute_import
import Queue
import SharedArray
import multiprocessing
import os
import threading
from uuid import uuid4
import numpy as np
from tefla.da import data
class BatchIterator(object):
def __init__(self, batch_size, shuffle):
self.batch_size = batch_size
self.shuffle = shuffle
def __call__(self, X, y=None):
if self.shuffle:
index_array = np.random.permutation(len(X))
self.X = X[index_array]
self.y = y[index_array] if y is not None else y
else:
self.X, self.y = X, y
return self
def __iter__(self):
n_samples = self.X.shape[0]
bs = self.batch_size
for i in range((n_samples + bs - 1) // bs):
sl = slice(i * bs, (i + 1) * bs)
Xb = self.X[sl]
if self.y is not None:
yb = self.y[sl]
else:
yb = None
yield self.transform(Xb, yb)
def transform(self, Xb, yb):
return Xb, yb
def __getstate__(self):
state = dict(self.__dict__)
for attr in ('X', 'y',):
if attr in state:
del state[attr]
return state
class QueuedMixin(object):
def __iter__(self):
queue = Queue.Queue(maxsize=20)
end_marker = object()
def producer():
for Xb, yb in super(QueuedMixin, self).__iter__():
queue.put((np.array(Xb), np.array(yb)))
queue.put(end_marker)
thread = threading.Thread(target=producer)
thread.daemon = True
thread.start()
item = queue.get()
while item is not end_marker:
yield item
queue.task_done()
item = queue.get()
class QueuedIterator(QueuedMixin, BatchIterator):
pass
class DAIterator(BatchIterator):
def __call__(self, X, y=None, crop_bbox=None, xform=None):
self.crop_bbox = crop_bbox
self.xform = xform
return super(DAIterator, self).__call__(X, y)
def __init__(self, batch_size, shuffle, preprocessor, crop_size, is_training,
aug_params=data.no_augmentation_params, fill_mode='constant', fill_mode_cval=0, standardizer=None,
save_to_dir=None):
self.preprocessor = preprocessor if preprocessor else data.image_no_preprocessing
self.w = crop_size[0]
self.h = crop_size[1]
self.is_training = is_training
self.aug_params = aug_params
self.fill_mode = fill_mode
self.fill_mode_cval = fill_mode_cval
self.standardizer = standardizer
self.save_to_dir = save_to_dir
if save_to_dir and not os.path.exists(save_to_dir):
os.makedirs(save_to_dir)
super(DAIterator, self).__init__(batch_size, shuffle)
def da_args(self):
kwargs = {'preprocessor': self.preprocessor, 'w': self.w, 'h': self.h, 'is_training': self.is_training,
'fill_mode': self.fill_mode, 'fill_mode_cval': self.fill_mode_cval, 'standardizer': self.standardizer,
'save_to_dir': self.save_to_dir}
if self.crop_bbox is not None:
assert not self.is_training, "crop bbox only in validation/prediction mode"
kwargs['bbox'] = self.crop_bbox
elif self.xform is not None:
assert not self.is_training, "transform only in validation/prediction mode"
kwargs['transform'] = self.xform
else:
kwargs['aug_params'] = self.aug_params
return kwargs
def transform(self, Xb, yb):
fnames, labels = Xb, yb
Xb = data.load_augmented_images(fnames, **self.da_args())
return Xb, labels
class QueuedDAIterator(QueuedMixin, DAIterator):
pass
pool_process_seed = None
def load_shared(args):
import os
i, array_name, fname, kwargs = args
array = SharedArray.attach(array_name)
global pool_process_seed
if not pool_process_seed:
pool_process_seed = os.getpid()
# print("random seed: %d in pid %d" % (pool_process_seed, os.getpid()))
np.random.seed(pool_process_seed)
array[i] = data.load_augment(fname, **kwargs)
class ParallelDAIterator(QueuedDAIterator):
def __init__(self, batch_size, shuffle, preprocessor, crop_size, is_training,
aug_params=data.no_augmentation_params, fill_mode='constant', fill_mode_cval=0, standardizer=None,
save_to_dir=None):
self.pool = multiprocessing.Pool()
self.num_image_channels = None
super(ParallelDAIterator, self).__init__(batch_size, shuffle, preprocessor, crop_size, is_training, aug_params,
fill_mode, fill_mode_cval, standardizer, save_to_dir)
def transform(self, Xb, yb):
shared_array_name = str(uuid4())
fnames, labels = Xb, yb
args = []
da_args = self.da_args()
for i, fname in enumerate(fnames):
args.append((i, shared_array_name, fname, da_args))
if self.num_image_channels is None:
test_img = data.load_augment(fnames[0], **da_args)
self.num_image_channels = test_img.shape[-1]
try:
shared_array = SharedArray.create(
shared_array_name, [len(Xb), self.w, self.h, self.num_image_channels], dtype=np.float32)
self.pool.map(load_shared, args)
Xb = np.array(shared_array, dtype=np.float32)
finally:
SharedArray.delete(shared_array_name)
# if labels is not None:
# labels = labels[:, np.newaxis]
return Xb, labels
class BalancingDAIterator(ParallelDAIterator):
def __init__(
self, batch_size, shuffle, preprocessor, crop_size, is_training,
balance_weights, final_balance_weights, balance_ratio, balance_epoch_count=0,
aug_params=data.no_augmentation_params,
fill_mode='constant', fill_mode_cval=0, standardizer=None, save_to_dir=None):
self.count = balance_epoch_count
self.balance_weights = balance_weights
self.final_balance_weights = final_balance_weights
self.balance_ratio = balance_ratio
super(BalancingDAIterator, self).__init__(batch_size, shuffle, preprocessor, crop_size, is_training, aug_params,
fill_mode, fill_mode_cval, standardizer, save_to_dir)
def __call__(self, X, y=None):
if y is not None:
alpha = self.balance_ratio ** self.count
class_weights = self.balance_weights * alpha + self.final_balance_weights * (1 - alpha)
self.count += 1
indices = data.balance_per_class_indices(y, weights=class_weights)
X = X[indices]
y = y[indices]
return super(BalancingDAIterator, self).__call__(X, y)
# Todo remove code duplication with BalancingDAIterator (call method)
class BalancingQueuedDAIterator(QueuedDAIterator):
def __init__(
self, batch_size, shuffle, preprocessor, crop_size, is_training,
balance_weights, final_balance_weights, balance_ratio, balance_epoch_count=0,
aug_params=data.no_augmentation_params,
fill_mode='constant', fill_mode_cval=0, standardizer=None, save_to_dir=None):
self.count = balance_epoch_count
self.balance_weights = balance_weights
self.final_balance_weights = final_balance_weights
self.balance_ratio = balance_ratio
super(BalancingQueuedDAIterator, self).__init__(batch_size, shuffle, preprocessor, crop_size, is_training,
aug_params, fill_mode, fill_mode_cval, standardizer,
save_to_dir)
def __call__(self, X, y=None):
if y is not None:
alpha = self.balance_ratio ** self.count
class_weights = self.balance_weights * alpha + self.final_balance_weights * (1 - alpha)
self.count += 1
indices = data.balance_per_class_indices(y, weights=class_weights)
X = X[indices]
y = y[indices]
return super(BalancingQueuedDAIterator, self).__call__(X, y)
| 37.210762
| 120
| 0.627501
|
61f55a73a88c5360e22740ba24577788bcd668b8
| 2,238
|
py
|
Python
|
labml_nn/resnet/experiment.py
|
Aarsh2001/annotated_deep_learning_paper_implementations
|
ff0d5c065da1a46769f5f66fddc252c178f8fa37
|
[
"MIT"
] | 1
|
2022-03-12T11:04:19.000Z
|
2022-03-12T11:04:19.000Z
|
robust StyleGAN 2/labml_nn/resnet/experiment.py
|
sbnietert/robust-OT
|
d1fc1bc0e09975687419149dbe25cce786f81fcc
|
[
"MIT"
] | null | null | null |
robust StyleGAN 2/labml_nn/resnet/experiment.py
|
sbnietert/robust-OT
|
d1fc1bc0e09975687419149dbe25cce786f81fcc
|
[
"MIT"
] | 1
|
2022-02-09T04:11:36.000Z
|
2022-02-09T04:11:36.000Z
|
"""
---
title: Train a ResNet on CIFAR 10
summary: >
Train a ResNet on CIFAR 10
---
# Train a [ResNet](index.html) on CIFAR 10
[](https://app.labml.ai/run/fc5ad600e4af11ebbafd23b8665193c1)
"""
from typing import List, Optional
from torch import nn
from labml import experiment
from labml.configs import option
from labml_nn.experiments.cifar10 import CIFAR10Configs
from labml_nn.resnet import ResNetBase
class Configs(CIFAR10Configs):
"""
## Configurations
We use [`CIFAR10Configs`](../experiments/cifar10.html) which defines all the
dataset related configurations, optimizer, and a training loop.
"""
# Number fo blocks for each feature map size
n_blocks: List[int] = [3, 3, 3]
# Number of channels for each feature map size
n_channels: List[int] = [16, 32, 64]
# Bottleneck sizes
bottlenecks: Optional[List[int]] = None
# Kernel size of the initial convolution layer
first_kernel_size: int = 7
@option(Configs.model)
def _resnet(c: Configs):
"""
### Create model
"""
# [ResNet](index.html)
base = ResNetBase(c.n_blocks, c.n_channels, c.bottlenecks, img_channels=3, first_kernel_size=c.first_kernel_size)
# Linear layer for classification
classification = nn.Linear(c.n_channels[-1], 10)
# Stack them
model = nn.Sequential(base, classification)
# Move the model to the device
return model.to(c.device)
def main():
# Create experiment
experiment.create(name='resnet', comment='cifar10')
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
'bottlenecks': [8, 16, 16],
'n_blocks': [6, 6, 6],
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
'epochs': 500,
'train_batch_size': 256,
'train_dataset': 'cifar10_train_augmented',
'valid_dataset': 'cifar10_valid_no_augment',
})
# Set model for saving/loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| 26.329412
| 131
| 0.674263
|
c196a11a122f0464f8219ae11d9726069b0a3976
| 9,789
|
py
|
Python
|
sanic/errorpages.py
|
AggressivelyMeows/sanic
|
04427bb96f2ca756d5620c58cac7e240e313b7af
|
[
"MIT"
] | null | null | null |
sanic/errorpages.py
|
AggressivelyMeows/sanic
|
04427bb96f2ca756d5620c58cac7e240e313b7af
|
[
"MIT"
] | null | null | null |
sanic/errorpages.py
|
AggressivelyMeows/sanic
|
04427bb96f2ca756d5620c58cac7e240e313b7af
|
[
"MIT"
] | null | null | null |
import sys
import typing as t
from functools import partial
from traceback import extract_tb
from sanic.exceptions import InvalidUsage, SanicException
from sanic.helpers import STATUS_CODES
from sanic.request import Request
from sanic.response import HTTPResponse, html, json, text
try:
from ujson import dumps
dumps = partial(dumps, escape_forward_slashes=False)
except ImportError: # noqa
from json import dumps # type: ignore
FALLBACK_TEXT = (
"The server encountered an internal error and "
"cannot complete your request."
)
FALLBACK_STATUS = 500
class BaseRenderer:
def __init__(self, request, exception, debug):
self.request = request
self.exception = exception
self.debug = debug
@property
def headers(self):
if isinstance(self.exception, SanicException):
return getattr(self.exception, "headers", {})
return {}
@property
def status(self):
if isinstance(self.exception, SanicException):
return getattr(self.exception, "status_code", FALLBACK_STATUS)
return FALLBACK_STATUS
@property
def text(self):
if self.debug or isinstance(self.exception, SanicException):
return str(self.exception)
return FALLBACK_TEXT
@property
def title(self):
status_text = STATUS_CODES.get(self.status, b"Error Occurred").decode()
return f"{self.status} — {status_text}"
def render(self):
output = (
self.full
if self.debug and not getattr(self.exception, "quiet", False)
else self.minimal
)
return output()
def minimal(self): # noqa
raise NotImplementedError
def full(self): # noqa
raise NotImplementedError
class HTMLRenderer(BaseRenderer):
TRACEBACK_STYLE = """
html { font-family: sans-serif }
h2 { color: #888; }
.tb-wrapper p { margin: 0 }
.frame-border { margin: 1rem }
.frame-line > * { padding: 0.3rem 0.6rem }
.frame-line { margin-bottom: 0.3rem }
.frame-code { font-size: 16px; padding-left: 4ch }
.tb-wrapper { border: 1px solid #eee }
.tb-header { background: #eee; padding: 0.3rem; font-weight: bold }
.frame-descriptor { background: #e2eafb; font-size: 14px }
"""
TRACEBACK_WRAPPER_HTML = (
"<div class=tb-header>{exc_name}: {exc_value}</div>"
"<div class=tb-wrapper>{frame_html}</div>"
)
TRACEBACK_BORDER = (
"<div class=frame-border>"
"The above exception was the direct cause of the following exception:"
"</div>"
)
TRACEBACK_LINE_HTML = (
"<div class=frame-line>"
"<p class=frame-descriptor>"
"File {0.filename}, line <i>{0.lineno}</i>, "
"in <code><b>{0.name}</b></code>"
"<p class=frame-code><code>{0.line}</code>"
"</div>"
)
OUTPUT_HTML = (
"<!DOCTYPE html><html lang=en>"
"<meta charset=UTF-8><title>{title}</title>\n"
"<style>{style}</style>\n"
"<h1>{title}</h1><p>{text}\n"
"{body}"
)
def full(self):
return html(
self.OUTPUT_HTML.format(
title=self.title,
text=self.text,
style=self.TRACEBACK_STYLE,
body=self._generate_body(),
),
status=self.status,
)
def minimal(self):
return html(
self.OUTPUT_HTML.format(
title=self.title,
text=self.text,
style=self.TRACEBACK_STYLE,
body="",
),
status=self.status,
headers=self.headers,
)
@property
def text(self):
return escape(super().text)
@property
def title(self):
return escape(f"⚠️ {super().title}")
def _generate_body(self):
_, exc_value, __ = sys.exc_info()
exceptions = []
while exc_value:
exceptions.append(self._format_exc(exc_value))
exc_value = exc_value.__cause__
traceback_html = self.TRACEBACK_BORDER.join(reversed(exceptions))
appname = escape(self.request.app.name)
name = escape(self.exception.__class__.__name__)
value = escape(self.exception)
path = escape(self.request.path)
lines = [
f"<h2>Traceback of {appname} (most recent call last):</h2>",
f"{traceback_html}",
"<div class=summary><p>",
f"<b>{name}: {value}</b> while handling path <code>{path}</code>",
"</div>",
]
return "\n".join(lines)
def _format_exc(self, exc):
frames = extract_tb(exc.__traceback__)
frame_html = "".join(
self.TRACEBACK_LINE_HTML.format(frame) for frame in frames
)
return self.TRACEBACK_WRAPPER_HTML.format(
exc_name=escape(exc.__class__.__name__),
exc_value=escape(exc),
frame_html=frame_html,
)
class TextRenderer(BaseRenderer):
OUTPUT_TEXT = "{title}\n{bar}\n{text}\n\n{body}"
SPACER = " "
def full(self):
return text(
self.OUTPUT_TEXT.format(
title=self.title,
text=self.text,
bar=("=" * len(self.title)),
body=self._generate_body(),
),
status=self.status,
)
def minimal(self):
return text(
self.OUTPUT_TEXT.format(
title=self.title,
text=self.text,
bar=("=" * len(self.title)),
body="",
),
status=self.status,
headers=self.headers,
)
@property
def title(self):
return f"⚠️ {super().title}"
def _generate_body(self):
_, exc_value, __ = sys.exc_info()
exceptions = []
lines = [
f"{self.exception.__class__.__name__}: {self.exception} while "
f"handling path {self.request.path}",
f"Traceback of {self.request.app.name} (most recent call last):\n",
]
while exc_value:
exceptions.append(self._format_exc(exc_value))
exc_value = exc_value.__cause__
return "\n".join(lines + exceptions[::-1])
def _format_exc(self, exc):
frames = "\n\n".join(
[
f"{self.SPACER * 2}File {frame.filename}, "
f"line {frame.lineno}, in "
f"{frame.name}\n{self.SPACER * 2}{frame.line}"
for frame in extract_tb(exc.__traceback__)
]
)
return f"{self.SPACER}{exc.__class__.__name__}: {exc}\n{frames}"
class JSONRenderer(BaseRenderer):
def full(self):
output = self._generate_output(full=True)
return json(output, status=self.status, dumps=dumps)
def minimal(self):
output = self._generate_output(full=False)
return json(output, status=self.status, dumps=dumps)
def _generate_output(self, *, full):
output = {
"description": self.title,
"status": self.status,
"message": self.text,
}
if full:
_, exc_value, __ = sys.exc_info()
exceptions = []
while exc_value:
exceptions.append(
{
"type": exc_value.__class__.__name__,
"exception": str(exc_value),
"frames": [
{
"file": frame.filename,
"line": frame.lineno,
"name": frame.name,
"src": frame.line,
}
for frame in extract_tb(exc_value.__traceback__)
],
}
)
exc_value = exc_value.__cause__
output["path"] = self.request.path
output["args"] = self.request.args
output["exceptions"] = exceptions[::-1]
return output
@property
def title(self):
return STATUS_CODES.get(self.status, b"Error Occurred").decode()
def escape(text):
"""Minimal HTML escaping, not for attribute values (unlike html.escape)."""
return f"{text}".replace("&", "&").replace("<", "<")
RENDERERS_BY_CONFIG = {
"html": HTMLRenderer,
"json": JSONRenderer,
"text": TextRenderer,
}
RENDERERS_BY_CONTENT_TYPE = {
"multipart/form-data": HTMLRenderer,
"application/json": JSONRenderer,
"text/plain": TextRenderer,
}
def exception_response(
request: Request,
exception: Exception,
debug: bool,
renderer: t.Type[t.Optional[BaseRenderer]] = None,
) -> HTTPResponse:
"""Render a response for the default FALLBACK exception handler"""
if not renderer:
renderer = HTMLRenderer
if request:
if request.app.config.FALLBACK_ERROR_FORMAT == "auto":
try:
renderer = JSONRenderer if request.json else HTMLRenderer
except InvalidUsage:
renderer = HTMLRenderer
content_type, *_ = request.headers.get(
"content-type", ""
).split(";")
renderer = RENDERERS_BY_CONTENT_TYPE.get(
content_type, renderer
)
else:
render_format = request.app.config.FALLBACK_ERROR_FORMAT
renderer = RENDERERS_BY_CONFIG.get(render_format, renderer)
renderer = t.cast(t.Type[BaseRenderer], renderer)
return renderer(request, exception, debug).render()
| 29.663636
| 79
| 0.548473
|
b7415d786ff106ac51975dddea19b249d86e65ad
| 763
|
py
|
Python
|
tests/test_alpha_helix.py
|
xingjiepan/alpha_helix_generator
|
2b35691b790e6363d5c4897a72c3efa8556d0143
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_alpha_helix.py
|
xingjiepan/alpha_helix_generator
|
2b35691b790e6363d5c4897a72c3efa8556d0143
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_alpha_helix.py
|
xingjiepan/alpha_helix_generator
|
2b35691b790e6363d5c4897a72c3efa8556d0143
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import pytest
import numpy as np
np.seterr(all='raise')
import ss_generator as ssg
def test_build_beta_sheet():
print("test build beta sheet.")
res_list = ssg.alpha_helix.build_ideal_straight_alpha_helix(20)
ssg.IO.save_residue_list(res_list, "ideal_straight_helix.pdb")
theta = np.radians(90)
phi = np.radians(-4)
directions = [np.array([0, np.sin(theta), np.cos(theta)])]
for i in range(1, 50):
directions.append(np.dot(ssg.geometry.rotation_matrix_from_axis_and_angle(
np.array([0, 0, 1]), phi), directions[-1]))
res_list = ssg.alpha_helix.build_alpha_helix_from_directions(directions)
ssg.IO.save_residue_list(res_list, "helix_perturbed.pdb")
| 28.259259
| 82
| 0.685452
|
9f00735e14b595c0579f296dd5d2151c6d0235a8
| 100
|
py
|
Python
|
Python/DivisibilityProblem.py
|
Zardosh/code-forces-solutions
|
ea1446b8e4f391f3e9ef63094816c7bdaded1557
|
[
"MIT"
] | null | null | null |
Python/DivisibilityProblem.py
|
Zardosh/code-forces-solutions
|
ea1446b8e4f391f3e9ef63094816c7bdaded1557
|
[
"MIT"
] | null | null | null |
Python/DivisibilityProblem.py
|
Zardosh/code-forces-solutions
|
ea1446b8e4f391f3e9ef63094816c7bdaded1557
|
[
"MIT"
] | null | null | null |
t = int(input())
for _ in range(t):
a, b = map(int, input().split())
print((b - a % b) % b)
| 20
| 36
| 0.48
|
6695e94528d8f1a836ec7968f8855909c72a0fed
| 7,565
|
py
|
Python
|
src/oci/identity_data_plane/models/bad_user_state_authenticate_user_result.py
|
xjuarez/oci-python-sdk
|
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2020-09-10T22:09:45.000Z
|
2021-12-24T17:00:07.000Z
|
src/oci/identity_data_plane/models/bad_user_state_authenticate_user_result.py
|
xjuarez/oci-python-sdk
|
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/identity_data_plane/models/bad_user_state_authenticate_user_result.py
|
xjuarez/oci-python-sdk
|
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class BadUserStateAuthenticateUserResult(object):
"""
BadUserStateAuthenticateUserResult model.
"""
#: A constant which can be used with the user_state property of a BadUserStateAuthenticateUserResult.
#: This constant has a value of "USER_BLOCKED"
USER_STATE_USER_BLOCKED = "USER_BLOCKED"
#: A constant which can be used with the user_state property of a BadUserStateAuthenticateUserResult.
#: This constant has a value of "USER_DISABLED"
USER_STATE_USER_DISABLED = "USER_DISABLED"
#: A constant which can be used with the user_state property of a BadUserStateAuthenticateUserResult.
#: This constant has a value of "ONE_TIME_PASSWORD_EXPIRED"
USER_STATE_ONE_TIME_PASSWORD_EXPIRED = "ONE_TIME_PASSWORD_EXPIRED"
#: A constant which can be used with the user_state property of a BadUserStateAuthenticateUserResult.
#: This constant has a value of "PASSWORD_INVALID"
USER_STATE_PASSWORD_INVALID = "PASSWORD_INVALID"
def __init__(self, **kwargs):
"""
Initializes a new BadUserStateAuthenticateUserResult object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param tenant_input:
The value to assign to the tenant_input property of this BadUserStateAuthenticateUserResult.
:type tenant_input: str
:param user_input:
The value to assign to the user_input property of this BadUserStateAuthenticateUserResult.
:type user_input: str
:param resolved_tenant_id:
The value to assign to the resolved_tenant_id property of this BadUserStateAuthenticateUserResult.
:type resolved_tenant_id: str
:param resolved_user_id:
The value to assign to the resolved_user_id property of this BadUserStateAuthenticateUserResult.
:type resolved_user_id: str
:param user_state:
The value to assign to the user_state property of this BadUserStateAuthenticateUserResult.
Allowed values for this property are: "USER_BLOCKED", "USER_DISABLED", "ONE_TIME_PASSWORD_EXPIRED", "PASSWORD_INVALID"
:type user_state: str
"""
self.swagger_types = {
'tenant_input': 'str',
'user_input': 'str',
'resolved_tenant_id': 'str',
'resolved_user_id': 'str',
'user_state': 'str'
}
self.attribute_map = {
'tenant_input': 'tenantInput',
'user_input': 'userInput',
'resolved_tenant_id': 'resolvedTenantId',
'resolved_user_id': 'resolvedUserId',
'user_state': 'userState'
}
self._tenant_input = None
self._user_input = None
self._resolved_tenant_id = None
self._resolved_user_id = None
self._user_state = None
@property
def tenant_input(self):
"""
**[Required]** Gets the tenant_input of this BadUserStateAuthenticateUserResult.
The tenant name.
:return: The tenant_input of this BadUserStateAuthenticateUserResult.
:rtype: str
"""
return self._tenant_input
@tenant_input.setter
def tenant_input(self, tenant_input):
"""
Sets the tenant_input of this BadUserStateAuthenticateUserResult.
The tenant name.
:param tenant_input: The tenant_input of this BadUserStateAuthenticateUserResult.
:type: str
"""
self._tenant_input = tenant_input
@property
def user_input(self):
"""
**[Required]** Gets the user_input of this BadUserStateAuthenticateUserResult.
The user name.
:return: The user_input of this BadUserStateAuthenticateUserResult.
:rtype: str
"""
return self._user_input
@user_input.setter
def user_input(self, user_input):
"""
Sets the user_input of this BadUserStateAuthenticateUserResult.
The user name.
:param user_input: The user_input of this BadUserStateAuthenticateUserResult.
:type: str
"""
self._user_input = user_input
@property
def resolved_tenant_id(self):
"""
**[Required]** Gets the resolved_tenant_id of this BadUserStateAuthenticateUserResult.
The resolved tenant id.
:return: The resolved_tenant_id of this BadUserStateAuthenticateUserResult.
:rtype: str
"""
return self._resolved_tenant_id
@resolved_tenant_id.setter
def resolved_tenant_id(self, resolved_tenant_id):
"""
Sets the resolved_tenant_id of this BadUserStateAuthenticateUserResult.
The resolved tenant id.
:param resolved_tenant_id: The resolved_tenant_id of this BadUserStateAuthenticateUserResult.
:type: str
"""
self._resolved_tenant_id = resolved_tenant_id
@property
def resolved_user_id(self):
"""
**[Required]** Gets the resolved_user_id of this BadUserStateAuthenticateUserResult.
The resolved user id.
:return: The resolved_user_id of this BadUserStateAuthenticateUserResult.
:rtype: str
"""
return self._resolved_user_id
@resolved_user_id.setter
def resolved_user_id(self, resolved_user_id):
"""
Sets the resolved_user_id of this BadUserStateAuthenticateUserResult.
The resolved user id.
:param resolved_user_id: The resolved_user_id of this BadUserStateAuthenticateUserResult.
:type: str
"""
self._resolved_user_id = resolved_user_id
@property
def user_state(self):
"""
**[Required]** Gets the user_state of this BadUserStateAuthenticateUserResult.
The bad user state.
Allowed values for this property are: "USER_BLOCKED", "USER_DISABLED", "ONE_TIME_PASSWORD_EXPIRED", "PASSWORD_INVALID"
:return: The user_state of this BadUserStateAuthenticateUserResult.
:rtype: str
"""
return self._user_state
@user_state.setter
def user_state(self, user_state):
"""
Sets the user_state of this BadUserStateAuthenticateUserResult.
The bad user state.
:param user_state: The user_state of this BadUserStateAuthenticateUserResult.
:type: str
"""
allowed_values = ["USER_BLOCKED", "USER_DISABLED", "ONE_TIME_PASSWORD_EXPIRED", "PASSWORD_INVALID"]
if not value_allowed_none_or_none_sentinel(user_state, allowed_values):
raise ValueError(
"Invalid value for `user_state`, must be None or one of {0}"
.format(allowed_values)
)
self._user_state = user_state
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.386364
| 245
| 0.678519
|
989b6c18ecf49debb2a55a4e8ef501e76e102db2
| 1,260
|
py
|
Python
|
bot.py
|
wazaan159/Auto-Filter-Bot-V2
|
e5cfd5c1d91fd049b040ff07388c0e5de7c82b1f
|
[
"MIT"
] | null | null | null |
bot.py
|
wazaan159/Auto-Filter-Bot-V2
|
e5cfd5c1d91fd049b040ff07388c0e5de7c82b1f
|
[
"MIT"
] | null | null | null |
bot.py
|
wazaan159/Auto-Filter-Bot-V2
|
e5cfd5c1d91fd049b040ff07388c0e5de7c82b1f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Cinema_FestivalTG
from pyrogram import (
Client,
__version__
)
from config import (
API_HASH,
APP_ID,
LOGGER,
AUTH_USERS,
TG_BOT_SESSION,
TG_BOT_TOKEN,
TG_BOT_WORKERS
)
from user import User
class Bot(Client):
USER: User = None
USER_ID: int = None
def __init__(self):
super().__init__(
TG_BOT_SESSION,
api_hash=API_HASH,
api_id=APP_ID,
plugins={
"root": "plugins"
},
workers=TG_BOT_WORKERS,
bot_token=TG_BOT_TOKEN
)
self.LOGGER = LOGGER
async def start(self):
await super().start()
usr_bot_me = await self.get_me()
self.set_parse_mode("html")
self.LOGGER(__name__).info(
f"@{usr_bot_me.username} started!\n\n"
f"Add @{usr_bot_me.username} as admin with all rights in your required channels\n\n"
)
AUTH_USERS.add(680815375)
self.USER, self.USER_ID = await User().start()
async def stop(self, *args):
await super().stop()
self.LOGGER(__name__).info("Bot stopped. Bye.")
| 22.5
| 97
| 0.545238
|
a45a879067e597dccbbd02df6d2f9020e5cee639
| 1,826
|
py
|
Python
|
gadget/route/api/project/model.py
|
wvankuipers/gadget-backend
|
ac23c9b991c44ed343784a197ed0805b23b88f02
|
[
"MIT"
] | null | null | null |
gadget/route/api/project/model.py
|
wvankuipers/gadget-backend
|
ac23c9b991c44ed343784a197ed0805b23b88f02
|
[
"MIT"
] | 9
|
2020-09-26T11:25:11.000Z
|
2020-09-30T18:18:26.000Z
|
gadget/route/api/project/model.py
|
wvankuipers/gadget-backend
|
ac23c9b991c44ed343784a197ed0805b23b88f02
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from flask import current_app
from slugify import slugify
from sqlalchemy.ext.hybrid import hybrid_property
from gadget import db
from gadget import utils
class Project(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
_slug = db.Column('slug', db.String(250), nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
updated = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
suites = db.relationship('Suite', backref='project', cascade='all, delete-orphan')
@hybrid_property
def slug(self):
return self._slug
@slug.setter
def slug(self, slug):
self._slug = slugify(slug)
def create(self):
db.session.add(self)
db.session.commit()
def update(self, name, slug):
self.updated = datetime.utcnow()
self.name = name
self. slug = slug
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def fixtures():
if current_app.config['DEBUG'] is False:
raise Exception('Inserting fixtures not allowed in production')
return [
Project(name='project1', slug='project1'),
Project(name='project2', slug='project2'),
Project(name='project3', slug='project3'),
]
def serialize(self, recursive=True):
result = {
'id': self.id,
'name': self.name,
'slug': self.slug,
'created': utils.to_local_js_timestamp(self.created),
'updated': utils.to_local_js_timestamp(self.updated)
}
if recursive:
result['suites'] = utils.serialize_list(self.suites)
return result
| 28.984127
| 86
| 0.625958
|
d461f2651a0bba4cc4cdea00bb54b62335f87bde
| 2,974
|
py
|
Python
|
extra_apps/xadmin/plugins/details.py
|
ansonsry/Education
|
3c96cc3e951dfdb5ab6b44997a72f61501fbe0f7
|
[
"MIT"
] | null | null | null |
extra_apps/xadmin/plugins/details.py
|
ansonsry/Education
|
3c96cc3e951dfdb5ab6b44997a72f61501fbe0f7
|
[
"MIT"
] | 1
|
2021-02-08T20:31:28.000Z
|
2021-02-08T20:31:28.000Z
|
extra_apps/xadmin/plugins/details.py
|
ansonsry/Freshshop
|
79ab8beb1aa993f6365182c8d3bb478ee4e028f8
|
[
"MIT"
] | null | null | null |
from django.utils.translation import ugettext as _
# from django.core.urlresolvers import reverse, NoReverseMatch
from django.urls import NoReverseMatch, reverse
from django.db import models
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
class DetailsPlugin(BaseAdminPlugin):
show_detail_fields = []
show_all_rel_details = True
def result_item(self, item, obj, field_name, row):
if (self.show_all_rel_details or (field_name in self.show_detail_fields)):
rel_obj = None
if hasattr(item.field, 'rel') and isinstance(item.field.rel, models.ManyToOneRel):
rel_obj = getattr(obj, field_name)
elif field_name in self.show_detail_fields:
rel_obj = obj
if rel_obj:
if rel_obj.__class__ in site._registry:
try:
model_admin = site._registry[rel_obj.__class__]
has_view_perm = model_admin(self.admin_view.request).has_view_permission(rel_obj)
has_change_perm = model_admin(self.admin_view.request).has_change_permission(rel_obj)
except:
has_view_perm = self.admin_view.has_model_perm(rel_obj.__class__, 'view')
has_change_perm = self.has_model_perm(rel_obj.__class__, 'change')
else:
has_view_perm = self.admin_view.has_model_perm(rel_obj.__class__, 'view')
has_change_perm = self.has_model_perm(rel_obj.__class__, 'change')
if rel_obj and has_view_perm:
opts = rel_obj._meta
try:
item_res_uri = reverse(
'%s:%s_%s_detail' % (self.admin_site.app_name,
opts.app_label, opts.model_name),
args=(getattr(rel_obj, opts.pk.attname),))
if item_res_uri:
if has_change_perm:
edit_url = reverse(
'%s:%s_%s_change' % (self.admin_site.app_name, opts.app_label, opts.model_name),
args=(getattr(rel_obj, opts.pk.attname),))
else:
edit_url = ''
item.btns.append('<a data-res-uri="%s" data-edit-uri="%s" class="details-handler" rel="tooltip" title="%s"><i class="fa fa-info-circle"></i></a>'
% (item_res_uri, edit_url, _(u'Details of %s') % str(rel_obj)))
except NoReverseMatch:
pass
return item
# Media
def get_media(self, media):
if self.show_all_rel_details or self.show_detail_fields:
media = media + self.vendor('xadmin.plugin.details.js', 'xadmin.form.css')
return media
site.register_plugin(DetailsPlugin, ListAdminView)
| 45.060606
| 169
| 0.569267
|
557100b8218a3096e706d4775effa9bcb4cc1993
| 299
|
py
|
Python
|
source/vectorpaths/__init__.py
|
chrisarridge/fitCurves
|
44e30f480dcacfb8b05494549e6a902aaa384a79
|
[
"MIT"
] | 2
|
2021-09-10T18:54:19.000Z
|
2021-11-26T06:17:19.000Z
|
source/vectorpaths/__init__.py
|
chrisarridge/fitCurves
|
44e30f480dcacfb8b05494549e6a902aaa384a79
|
[
"MIT"
] | null | null | null |
source/vectorpaths/__init__.py
|
chrisarridge/fitCurves
|
44e30f480dcacfb8b05494549e6a902aaa384a79
|
[
"MIT"
] | 1
|
2021-09-10T18:54:54.000Z
|
2021-09-10T18:54:54.000Z
|
"""Path package; mainly for fitting cubic beziers to points.
"""
import numpy as np
import matplotlib.pyplot as plt
import logging
__author__ = 'Chris Arridge'
__version__ = '0.2'
_path_logger = logging.getLogger(__name__)
from .bezier import CubicBezier
from .fitcurves import fit_cubic_bezier
| 19.933333
| 60
| 0.785953
|
5a0a0499ddba5b6288e040a308b7f90def81dff1
| 1,132
|
py
|
Python
|
mysite/ct/tests/test_models.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 3
|
2015-11-20T07:33:28.000Z
|
2017-01-15T23:33:50.000Z
|
mysite/ct/tests/test_models.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 28
|
2015-07-14T11:33:24.000Z
|
2017-11-17T15:21:22.000Z
|
mysite/ct/tests/test_models.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 4
|
2015-04-29T09:04:59.000Z
|
2017-07-19T14:11:16.000Z
|
import pytest
from .models import Lesson
from core.common.utils import get_onboarding_status_with_settings
from core.common.onboarding import CREATE_THREAD
from mysite.helpers import base64_to_file
@pytest.mark.django_db
def test_get_canvas_html(lesson_question_canvas, base64_gif_image):
assert lesson_question_canvas.attachment is not None
attachment = base64_to_file('data:image/gif;base64,{}'.format(base64_gif_image))
lesson_question_canvas.attachment.save('image.gif', attachment, save=True)
assert lesson_question_canvas.attachment.url is not None
assert lesson_question_canvas.get_html().find(lesson_question_canvas.attachment.url) > -1
@pytest.mark.django_db
@pytest.mark.parametrize("kind, updated", [
(Lesson.BASE_EXPLANATION, True),
(Lesson.EXPLANATION, True),
(Lesson.ORCT_QUESTION, False),
(Lesson.ANSWER, True),
])
def test_onboarding_step_5_update(unique_instructor, kind, updated):
Lesson(title='Lesson test', kind=kind, addedBy=unique_instructor).save()
assert get_onboarding_status_with_settings(unique_instructor.id).get(CREATE_THREAD).get('done') == updated
| 35.375
| 110
| 0.793286
|
f82ea255c74a5a9205624c13f9094974ccdffdf3
| 1,073
|
py
|
Python
|
roles/finalise/files/finish.py
|
cbutakoff/slurm-ansible-playbook
|
c957a8b96dae3a9859ab7c3095dc4d3c1619d1c0
|
[
"MIT"
] | null | null | null |
roles/finalise/files/finish.py
|
cbutakoff/slurm-ansible-playbook
|
c957a8b96dae3a9859ab7c3095dc4d3c1619d1c0
|
[
"MIT"
] | null | null | null |
roles/finalise/files/finish.py
|
cbutakoff/slurm-ansible-playbook
|
c957a8b96dae3a9859ab7c3095dc4d3c1619d1c0
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python2
from __future__ import (absolute_import, division, print_function)
import glob
import os
import subprocess
finished_nodes = set(os.path.basename(file) for file in glob.glob('/mnt/shared/finalised/*'))
all_nodes = {'mgmt'}
unfinished_nodes = all_nodes - finished_nodes
if unfinished_nodes:
print('Error: The following nodes have not reported finishing their setup:')
for node in sorted(unfinished_nodes):
print(' ', node)
print('Please allow them to finish before continuing.')
print('For information about why they have not finished, SSH to that machine and check the file /root/ansible-pull.log')
exit(1)
if not os.path.exists('/home/opc/limits.yaml'):
print('Error: Could not find limits.yaml')
print('Please create the file and rerun this script.')
print('See https://cluster-in-the-cloud.readthedocs.io/en/latest/finalise.html#setting-service-limits for details.')
exit(1)
subprocess.call(['sudo', '/usr/local/bin/update_config'])
subprocess.call(['sudo', 'systemctl', 'restart', 'slurmctld'])
| 35.766667
| 124
| 0.72973
|
ff3175b68f512ce5315b2cbf21f687c63d18b7d3
| 11,242
|
py
|
Python
|
flow_collector/netflow_v5.py
|
xyTel/flowanalyzer-kubernetes
|
4a26e973a297cf08976934d5c1aac365014ac59b
|
[
"MIT"
] | 1
|
2020-09-27T17:52:08.000Z
|
2020-09-27T17:52:08.000Z
|
flow_collector/netflow_v5.py
|
xyTel/flowanalyzer-kubernetes
|
4a26e973a297cf08976934d5c1aac365014ac59b
|
[
"MIT"
] | null | null | null |
flow_collector/netflow_v5.py
|
xyTel/flowanalyzer-kubernetes
|
4a26e973a297cf08976934d5c1aac365014ac59b
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017, Manito Networks, LLC
# All rights reserved.
# Import what we need
import time, datetime, socket, struct, sys, json, socket, logging, logging.handlers, getopt, parser_modules
from struct import *
from socket import inet_ntoa
from elasticsearch import Elasticsearch, helpers
from IPy import IP
# Protocol numbers and types of traffic for comparison
from protocol_numbers import protocol_type
from defined_ports import registered_ports, other_ports
from netflow_options import *
### Get the command line arguments ###
try:
arguments = getopt.getopt(sys.argv[1:], "hl:", ["--help", "log="])
for option_set in arguments:
for opt, arg in option_set:
if opt in ('-l', '--log'): # Log level
arg = arg.upper() # Uppercase for matching and logging.basicConfig() format
if arg in ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]:
log_level = arg # Use what was passed in arguments
elif opt in ('-h', '--help'):
with open("./help.txt") as help_file:
print help_file.read()
sys.exit()
else:
pass
except Exception:
sys.exit("Unsupported or badly formed options, see -h for available arguments.")
# Set the logging level per https://docs.python.org/2/howto/logging.html
try:
log_level # Check if log level was passed in from command arguments
except NameError:
log_level = "WARNING" # Use default logging level
logging.basicConfig(level = str(log_level)) # Set the logging level
logging.critical('Log level set to ' + str(log_level) + " - OK") # Show the logging level for debug
### DNS Lookups ###
#
# Reverse lookups
try:
if dns is False:
logging.warning("DNS reverse lookups disabled - DISABLED")
elif dns is True:
logging.warning("DNS reverse lookups enabled - OK")
else:
logging.warning("DNS enable option incorrectly set - DISABLING")
dns = False
except:
logging.warning("DNS enable option not set - DISABLING")
dns = False
# RFC-1918 reverse lookups
try:
if lookup_internal is False:
logging.warning("DNS local IP reverse lookups disabled - DISABLED")
elif lookup_internal is True:
logging.warning("DNS local IP reverse lookups enabled - OK")
else:
logging.warning("DNS local IP reverse lookups incorrectly set - DISABLING")
lookup_internal = False
except:
logging.warning("DNS local IP reverse lookups not set - DISABLING")
lookup_internal = False
# Set packet information variables
#
# Netflow v5 packet structure is STATIC - DO NOT MODIFY THESE VALUES
packet_header_size = 24
flow_record_size = 48
# Check if the Netflow v5 port is specified
try:
netflow_v5_port
except NameError: # Not specified, use default
netflow_v5_port = 2055
logging.warning("Netflow v5 port not set in netflow_options.py, defaulting to " +
str(netflow_v5_port) +
" - OK")
# Set up the socket listener
try:
netflow_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
netflow_sock.bind(('0.0.0.0', netflow_v5_port))
logging.critical("Bound to port " + str(netflow_v5_port) + " - OK")
except Exception as socket_error:
logging.critical("Could not open or bind a socket on port " + str(netflow_v9_port))
logging.critical(str(socket_error))
sys.exit()
# ElasticSearch class
es = Elasticsearch([elasticsearch_host])
# DNS lookup class
name_lookups = parser_modules.name_lookups()
# TCP / UDP identification class
tcp_udp = parser_modules.ports_and_protocols()
### Netflow v5 Collector ###
if __name__ == "__main__":
# Stage the flows for the bulk API index operation
flow_dic = []
# Number of cached records
record_num = 0
# Continually listen for inbound packets
while True:
flow_packet_contents, sensor_address = netflow_sock.recvfrom(65565)
# Unpack the header
try:
logging.info("Unpacking header from " + str(sensor_address[0]))
# Netflow v5 packet fields
packet_keys = [
"netflow_version",
"flow_count",
"sys_uptime",
"unix_secs",
"unix_nsecs",
"flow_seq",
"engine_type",
"engine_id"]
packet_values = struct.unpack('!HHIIIIBB', flow_packet_contents[0:22])
packet_contents = dict(zip(packet_keys, packet_values)) # v5 packet fields and values
logging.info(str(packet_contents))
logging.info("Finished unpacking header from " + str(sensor_address[0]))
# Failed to unpack the header
except Exception as flow_header_error:
logging.warning("Failed unpacking header from " +
str(sensor_address[0]) + " - " + str(flow_header_error))
continue
# Timestamp for flow received
now = datetime.datetime.utcnow()
# Check the Netflow version
if packet_contents["netflow_version"] != 5:
logging.warning("Received a non-v5 Netflow packet - SKIPPING")
continue
# Iterate over flows in packet
for flow_num in range(0, packet_contents["flow_count"]):
logging.info("Parsing flow " + str(flow_num+1))
# Calculate flow starting point
base = packet_header_size + (flow_num * flow_record_size)
# Index for upload
flow_index = {
"_index": str("flow-" + now.strftime("%Y-%m-%d")),
"_type": "Flow",
"_source": {
"Flow Type": "Netflow v5",
"IP Protocol Version": 4,
"Sensor": sensor_address[0],
"Time": now.strftime("%Y-%m-%dT%H:%M:%S") + ".%03d" % (now.microsecond / 1000) + "Z",
"Engine Type": packet_contents["engine_type"],
"Engine ID": packet_contents["engine_id"]
}
}
# Unpack flow data, populate flow_index["_source"]
(
ip_source,
ip_destination,
next_hop,
flow_index["_source"]["Input Interface"],
flow_index["_source"]["Output Interface"],
flow_index["_source"]["Packets In"],
flow_index["_source"]["Bytes In"],
flow_index["_source"]["System Uptime Start"],
flow_index["_source"]["System Uptime Stop"],
flow_index["_source"]["Source Port"],
flow_index["_source"]["Destination Port"],
pad,
flow_index["_source"]["TCP Flags"],
flow_index["_source"]["Protocol Number"],
flow_index["_source"]["Type of Service"],
flow_index["_source"]["Source AS"],
flow_index["_source"]["Destination AS"],
flow_index["_source"]["Source Mask"],
flow_index["_source"]["Destination Mask"]
) = struct.unpack('!4s4s4shhIIIIHHcBBBhhBB', flow_packet_contents[base+0:base+46])
# Final unpack, IP addresses via inet_ntoa()
flow_index["_source"]["IPv4 Source"] = inet_ntoa(ip_source)
flow_index["_source"]["IPv4 Destination"] = inet_ntoa(ip_destination)
flow_index["_source"]["IPv4 Next Hop"] = inet_ntoa(next_hop)
# Protocols
try:
# Protocol name
flow_index["_source"]["Protocol"] = protocol_type[flow_index["_source"]["Protocol Number"]]["Name"]
except Exception as protocol_error:
flow_protocol = "Other" # Should never see this unless undefined protocol in use
logging.warning("Unknown protocol number - " + str(flow_index["_source"]["Protocol Number"]) + ". Please report to the author for inclusion.")
logging.warning(str(protocol_error))
# If the protocol is TCP or UDP try to apply traffic labels
if flow_index["_source"]["Protocol Number"] in ([6, 17, 33, 132]):
traffic_and_category = tcp_udp.port_traffic_classifier(flow_index["_source"]["Source Port"], flow_index["_source"]["Destination Port"])
flow_index["_source"]["Traffic"] = traffic_and_category["Traffic"]
flow_index["_source"]["Traffic Category"] = traffic_and_category["Traffic Category"]
else:
# Protocol category
if "Category" in protocol_type[flow_index["_source"]["Protocol Number"]]:
flow_index["_source"]['Traffic Category'] = protocol_type[flow_index["_source"]["Protocol Number"]]["Category"]
else:
flow_index["_source"]['Traffic Category'] = "Uncategorized"
# Perform DNS lookups if enabled
if dns is True:
# Source DNS
source_lookups = name_lookups.ip_names(4, flow_index["_source"]["IPv4 Source"])
flow_index["_source"]["Source FQDN"] = source_lookups["FQDN"]
flow_index["_source"]["Source Domain"] = source_lookups["Domain"]
# Destination DNS
destination_lookups = name_lookups.ip_names(4, flow_index["_source"]["IPv4 Destination"])
flow_index["_source"]["Destination FQDN"] = destination_lookups["FQDN"]
flow_index["_source"]["Destination Domain"] = destination_lookups["Domain"]
# Content
src_dest_categories = [source_lookups["Content"], destination_lookups["Content"]]
try: # Pick unique domain Content != "Uncategorized"
unique_content = [category for category in src_dest_categories if category != "Uncategorized"]
flow_index["_source"]["Content"] = unique_content[0]
except: # No unique domain Content
flow_index["_source"]["Content"] = "Uncategorized"
logging.debug("Current flow data: " + str(flow_index))
logging.info("Finished flow " + str(flow_num+1) + " of " + str(packet_contents["flow_count"]))
# Add the parsed flow to flow_dic for bulk insert
flow_dic.append(flow_index)
# Increment the record counter
record_num += 1
# Elasticsearch bulk insert
if record_num >= bulk_insert_count:
try:
helpers.bulk(es, flow_dic)
logging.info(str(record_num)+" flow(s) uploaded to Elasticsearch - OK")
except ValueError as bulk_index_error:
logging.critical(str(record_num)+" flow(s) DROPPED, unable to index flows - FAIL")
logging.critical(bulk_index_error.message)
# Reset flow_dic
flow_dic = []
# Reset the record counter
record_num = 0
| 41.179487
| 158
| 0.58913
|
f7913cf4d4e61ab4cb4674ea62165f9d2320b83f
| 4,193
|
py
|
Python
|
underground_box/tests.py
|
fga-eps-mds/2017.2-SiGI-Op_API
|
4532019c15414fd17e06bb3aa78501886e00da1d
|
[
"BSD-3-Clause"
] | 6
|
2017-08-24T13:18:21.000Z
|
2017-10-03T18:06:13.000Z
|
underground_box/tests.py
|
fga-gpp-mds/2017.2-Grupo9
|
4532019c15414fd17e06bb3aa78501886e00da1d
|
[
"BSD-3-Clause"
] | 173
|
2017-08-31T15:29:01.000Z
|
2017-12-14T13:40:13.000Z
|
underground_box/tests.py
|
fga-gpp-mds/2017.2-SiGI-Op_API
|
4532019c15414fd17e06bb3aa78501886e00da1d
|
[
"BSD-3-Clause"
] | 2
|
2018-11-19T10:33:00.000Z
|
2019-06-19T22:35:43.000Z
|
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from emendation_box.models import EmendationBox
from emendation_box.models import EmendationBoxType
from emendation_box.models import EmendationBoxStructure
from technical_reserve.models import TechnicalReserve
from .models import UndergroundBox
from .models import UndergroundBoxType
from .views import UndergroundBoxTypeViewSet
from .views import UndergroundBoxViewSet
class UndergroundBoxTest(TestCase):
def test_type_view_set(self):
request = APIRequestFactory().get("")
undergroundbox_type_detail = UndergroundBoxTypeViewSet.as_view(actions={'get': 'retrieve'})
undergroundbox_type = UndergroundBoxType.objects.create(name="Random undergroundBox")
response = undergroundbox_type_detail(request, pk=undergroundbox_type.pk)
self.assertEqual(response.status_code, 200)
def test_undergroundbox_view_set(self):
request = APIRequestFactory().get("")
undergroundbox_detail = UndergroundBoxViewSet.as_view(actions={'get':'retrieve'})
undergroundbox_type = UndergroundBoxType.objects.create(name="Random undergroundBox")
emendationtype = EmendationBoxType.objects.create(description="IHATEIT")
structure = EmendationBoxStructure.objects.create(description="SERIOUSLY")
emendationbox = EmendationBox.objects.create(
lattitude=42.42,
longitude=42.42,
designNumber=42,
access_box=True,
creation_date="2017-01-01",
extinction_date="2018-01-01",
emendation_type=emendationtype,
emendation_structure=structure,
)
reserve = TechnicalReserve.objects.create(
code=666,
length=8008.5,
lattitude=8001,
longitude=9001
)
undergroundbox = UndergroundBox.objects.create(
box_type=undergroundbox_type,
code=123,
lattitude=123,
cover_type="Cover type",
longitude=321,
emendation_box=emendationbox,
technical_reserve=reserve
)
response = undergroundbox_detail(request, pk=undergroundbox.pk)
self.assertEqual(response.status_code, 200)
def test_wrong_type_view_set(self):
request = APIRequestFactory().get("")
undergroundbox_type_detail = UndergroundBoxTypeViewSet.as_view(actions={'get': 'retrieve'})
undergroundbox_type = UndergroundBoxType.objects.create(name="Fundergroundbox")
primary_key = undergroundbox_type.pk
undergroundbox_type.delete()
response = undergroundbox_type_detail(request, pk=primary_key)
self.assertEqual(response.status_code, 404)
def test_wrong_view_set(self):
request = APIRequestFactory().get("")
undergroundbox_detail = UndergroundBoxViewSet.as_view(actions={'get':'retrieve'})
undergroundbox_type = UndergroundBoxType.objects.create(name="Random undergroundBox")
emendationtype = EmendationBoxType.objects.create(description="IHATEIT")
structure = EmendationBoxStructure.objects.create(description="TOOMUCH")
emendationbox = EmendationBox.objects.create(
lattitude=42.42,
longitude=42.42,
designNumber=42,
emendation_type=emendationtype,
access_box=True,
emendation_structure=structure,
creation_date="2017-01-01",
extinction_date="2018-01-01",
)
reserve = TechnicalReserve.objects.create(
code=666,
length=8008.5,
lattitude=8001,
longitude=9001
)
undergroundbox = UndergroundBox.objects.create(
box_type=undergroundbox_type,
code=123,
lattitude=123,
cover_type="Cover type",
longitude=321,
emendation_box=emendationbox,
technical_reserve=reserve
)
primary_key = undergroundbox.pk
undergroundbox.delete()
response = undergroundbox_detail(request, pk=primary_key)
self.assertEqual(response.status_code, 404)
| 42.353535
| 99
| 0.679227
|
cd3bd96d6a315026d80b4e09d946ba0179403fe0
| 4,096
|
py
|
Python
|
app/camera.py
|
1297rohit/PigCount
|
449a75e24b2f34c856e713cea5e804e858383d71
|
[
"MIT"
] | null | null | null |
app/camera.py
|
1297rohit/PigCount
|
449a75e24b2f34c856e713cea5e804e858383d71
|
[
"MIT"
] | null | null | null |
app/camera.py
|
1297rohit/PigCount
|
449a75e24b2f34c856e713cea5e804e858383d71
|
[
"MIT"
] | null | null | null |
from picamera.array import PiRGBArray # Generates a 3D RGB array
from picamera import PiCamera # Provides a Python interface for the RPi Camera Module
import time # Provides time-related functions
import cv2
import tensorflow as tf
import numpy as np
from azure.iot.device import IoTHubDeviceClient, Message
#azure connection string
CONNECTION_STRING = "HostName=HUBDEVKIT.azure-devices.net;DeviceId=piazuretest;SharedAccessKey=L000kRCO4zda4rzBp6WMJGAn4Wbpanscdj2jChib0WQ="
def iothub_client_init():
# Create an IoT Hub client
client = IoTHubDeviceClient.create_from_connection_string(CONNECTION_STRING)
return client
MSG_TXT = '{{"pigcount": {count}}}'
#model files
model_filename = 'model.pb'
LABELS_FILENAME = 'labels.txt'
labels = None
INPUT_TENSOR_NAME = 'image_tensor:0'
OUTPUT_TENSOR_NAMES = ['detected_boxes:0', 'detected_scores:0', 'detected_classes:0']
graph_def = tf.compat.v1.GraphDef()
with open(model_filename, 'rb') as f:
graph_def.ParseFromString(f.read())
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(graph_def, name='')
# Get input shape
with tf.compat.v1.Session(graph=graph) as sess:
input_shape = sess.graph.get_tensor_by_name(INPUT_TENSOR_NAME).shape.as_list()[1:3]
with open(LABELS_FILENAME) as f:
labels = [l.strip() for l in f.readlines()]
def predict_image(image):
inputs = np.array(image, dtype=np.float32)[np.newaxis, :, :, :]
with tf.compat.v1.Session(graph=graph) as sess:
output_tensors = [sess.graph.get_tensor_by_name(n) for n in OUTPUT_TENSOR_NAMES]
outputs = sess.run(output_tensors, {INPUT_TENSOR_NAME: inputs})
return outputs
class VideoCamera(object):
def __init__(self):
self.camera = PiCamera()
self.camera.resolution = (640, 480)
self.camera.framerate = 32
self.raw_capture = PiRGBArray(self.camera, size=(640, 480))
self.client = iothub_client_init()
time.sleep(0.1)
def __del__(self):
self.raw_capture.truncate(0)
def get_frame(self,min_conf_threshold):
for image in self.camera.capture_continuous(self.raw_capture, format="bgr", use_video_port=True):
frame = image.array
height,width,_ = frame.shape
screen = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
resized = cv2.resize(screen, (tuple(input_shape)), interpolation = cv2.INTER_AREA)
pred_out = predict_image(resized)
predictions = [{'probability': round(float(p[1]), 8),
'tagId': int(p[2]),
'tagName': labels[p[2]],
'boundingBox': {
'left': round(float(p[0][0]), 8),
'top': round(float(p[0][1]), 8),
'width': round(float(p[0][2] - p[0][0]), 8),
'height': round(float(p[0][3] - p[0][1]), 8)
}
} for p in zip(*pred_out)]
count = 0
for i in predictions:
if i["probability"] > min_conf_threshold and i["tagName"] == "pig":
count +=1
x = int(i["boundingBox"]["left"]*width)
y = int(i["boundingBox"]["top"]*height)
w = int(i["boundingBox"]["width"]*width)
h = int(i["boundingBox"]["height"]*height)
# print(x,y,w,h)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,128), 5)
cv2.putText(frame,str(count),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
msg_txt_formatted = MSG_TXT.format(count=count)
message = Message(msg_txt_formatted)
print( "Sending message: {}".format(message) )
self.client.send_message(message)
print ( "Message successfully sent" )
time.sleep(5)
ret, jpeg = cv2.imencode('.jpg', frame)
self.raw_capture.truncate(0)
return jpeg.tobytes()
| 40.554455
| 140
| 0.598877
|
0e8817582daacc30a7e5c3b1c68b11a27b807188
| 11,890
|
py
|
Python
|
rasa_core/training/dsl.py
|
ymihay/dialogue_flow
|
ccb7ba89d1832c23c2bd1c8b7ff2e30f5019c874
|
[
"Apache-2.0"
] | null | null | null |
rasa_core/training/dsl.py
|
ymihay/dialogue_flow
|
ccb7ba89d1832c23c2bd1c8b7ff2e30f5019c874
|
[
"Apache-2.0"
] | null | null | null |
rasa_core/training/dsl.py
|
ymihay/dialogue_flow
|
ccb7ba89d1832c23c2bd1c8b7ff2e30f5019c874
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import json
import logging
import os
import re
import warnings
from typing import Optional, List, Text, Any, Dict
from rasa_core import utils
from rasa_core.events import (
ActionExecuted, UserUttered, Event)
from rasa_core.interpreter import RegexInterpreter
from rasa_core.training.structures import (
Checkpoint, STORY_END, STORY_START, StoryStep)
logger = logging.getLogger(__name__)
class StoryParseError(Exception):
"""Raised if there is an error while parsing the story file."""
def __init__(self, message):
self.message = message
class StoryStepBuilder(object):
def __init__(self, name):
self.name = name
self.story_steps = []
self.current_steps = []
self.start_checkpoints = []
def add_checkpoint(self, name, conditions):
# Depending on the state of the story part this
# is either a start or an end check point
if not self.current_steps:
self.start_checkpoints.append(Checkpoint(name, conditions))
else:
if conditions:
logger.warn("End or intermediate checkpoints "
"do not support conditions! "
"(checkpoint: {})".format(name))
additional_steps = []
for t in self.current_steps:
if t.end_checkpoints:
tcp = t.create_copy(use_new_id=True)
tcp.end_checkpoints = [Checkpoint(name)]
additional_steps.append(tcp)
else:
t.end_checkpoints = [Checkpoint(name)]
self.current_steps.extend(additional_steps)
def _prev_end_checkpoints(self):
if not self.current_steps:
return self.start_checkpoints
else:
# makes sure we got each end name only once
end_names = {e.name
for s in self.current_steps
for e in s.end_checkpoints}
return [Checkpoint(name) for name in end_names]
def add_user_messages(self, messages):
self.ensure_current_steps()
if len(messages) == 1:
# If there is only one possible intent, we'll keep things simple
for t in self.current_steps:
t.add_user_message(messages[0])
else:
# If there are multiple different intents the
# user can use the express the same thing
# we need to copy the blocks and create one
# copy for each possible message
generated_checkpoint = utils.generate_id("GENERATED_M_")
updated_steps = []
for t in self.current_steps:
for m in messages:
copied = t.create_copy(use_new_id=True)
copied.add_user_message(m)
copied.end_checkpoints = [Checkpoint(generated_checkpoint)]
updated_steps.append(copied)
self.current_steps = updated_steps
def add_event(self, event):
self.ensure_current_steps()
for t in self.current_steps:
t.add_event(event)
def ensure_current_steps(self):
completed = [step
for step in self.current_steps
if step.end_checkpoints]
unfinished = [step
for step in self.current_steps
if not step.end_checkpoints]
self.story_steps.extend(completed)
if unfinished:
self.current_steps = unfinished
else:
self.current_steps = self._next_story_steps()
def flush(self):
if self.current_steps:
self.story_steps.extend(self.current_steps)
self.current_steps = []
def _next_story_steps(self):
start_checkpoints = self._prev_end_checkpoints()
if not start_checkpoints:
start_checkpoints = [Checkpoint(STORY_START)]
current_turns = [StoryStep(block_name=self.name,
start_checkpoints=start_checkpoints)]
return current_turns
class StoryFileReader(object):
"""Helper class to read a story file."""
def __init__(self, domain, interpreter, template_vars=None):
self.story_steps = []
self.current_step_builder = None # type: Optional[StoryStepBuilder]
self.domain = domain
self.interpreter = interpreter
self.template_variables = template_vars if template_vars else {}
@staticmethod
def read_from_file(filename, domain, interpreter=RegexInterpreter(),
template_variables=None):
"""Given a json file reads the contained stories."""
try:
with io.open(filename, "r") as f:
lines = f.readlines()
reader = StoryFileReader(domain, interpreter, template_variables)
return reader.process_lines(lines)
except Exception:
logger.exception("Failed to parse '{}'".format(
os.path.abspath(filename)))
raise ValueError("Invalid story file format.")
@staticmethod
def _parameters_from_json_string(s, line):
# type: (Text, Text) -> Dict[Text, Any]
"""Parse the passed string as json and create a parameter dict."""
if s is None or not s.strip():
# if there is no strings there are not going to be any parameters
return {}
try:
parsed_slots = json.loads(s)
if isinstance(parsed_slots, dict):
return parsed_slots
else:
raise Exception("Parsed value isn't a json object "
"(instead parser found '{}')"
".".format(type(parsed_slots)))
except Exception as e:
raise ValueError("Invalid to parse arguments in line "
"'{}'. Failed to decode parameters"
"as a json object. Make sure the event"
"name is followed by a proper json "
"object. Error: {}".format(line, e))
@staticmethod
def _parse_event_line(line):
"""Tries to parse a single line as an event with arguments."""
# the regex matches "slot{"a": 1}"
m = re.search('^([^{]+)([{].+)?', line)
if m is not None:
event_name = m.group(1).strip()
slots_str = m.group(2)
parameters = StoryFileReader._parameters_from_json_string(slots_str,
line)
return event_name, parameters
else:
warnings.warn("Failed to parse action line '{}'. "
"Ignoring this line.".format(line))
return "", {}
def process_lines(self, lines):
# type: (List[Text]) -> List[StoryStep]
for idx, line in enumerate(lines):
line_num = idx + 1
try:
line = self._replace_template_variables(
self._clean_up_line(line))
if line.strip() == "":
continue
elif line.startswith("#"): # reached a new story block
name = line[1:].strip("# ")
self.new_story_part(name)
elif line.startswith(">"): # reached a checkpoint
name, conditions = self._parse_event_line(line[1:].strip())
self.add_checkpoint(name, conditions)
elif line.startswith(
"-"): # reached a slot, event, or executed action
event_name, parameters = self._parse_event_line(line[1:])
self.add_event(event_name, parameters)
elif line.startswith("*"): # reached a user message
user_messages = [el.strip() for el in
line[1:].split(" OR ")]
self.add_user_messages(user_messages, line_num)
else: # reached an unknown type of line
logger.warn("Skipping line {}. No valid command found. "
"Line Content: '{}'".format(line_num, line))
except Exception as e:
msg = "Error in line {}: {}".format(line_num, e.message)
logger.error(msg, exc_info=1)
raise Exception(msg)
self._add_current_stories_to_result()
return self.story_steps
def _replace_template_variables(self, line):
def process_match(matchobject):
varname = matchobject.group(1)
if varname in self.template_variables:
return self.template_variables[varname]
else:
raise ValueError("Unknown variable `{var}` "
"in template line '{line}'".format(var=varname,
line=line))
template_rx = re.compile(r"`([^`]+)`")
return template_rx.sub(process_match, line)
@staticmethod
def _clean_up_line(line):
# type: (Text) -> Text
"""Removes comments and trailing spaces"""
return re.sub(r'<!--.*?-->', '', line).strip()
def _add_current_stories_to_result(self):
if self.current_step_builder:
self.current_step_builder.flush()
self.story_steps.extend(self.current_step_builder.story_steps)
def new_story_part(self, name):
self._add_current_stories_to_result()
self.current_step_builder = StoryStepBuilder(name)
def add_checkpoint(self, name, conditions):
# type: (Text) -> None
# Ensure story part already has a name
if not self.current_step_builder:
raise StoryParseError("Checkpoint '{}' is at an invalid location. "
"Expected a story start.".format(name))
self.current_step_builder.add_checkpoint(name, conditions)
def add_user_messages(self, messages, line_num):
if not self.current_step_builder:
raise StoryParseError("User message '{}' at invalid location. "
"Expected story start.".format(messages))
parsed_messages = []
for m in messages:
parse_data = self.interpreter.parse(m)
utterance = UserUttered.from_parse_data(m, parse_data)
if m.startswith("_"):
c = utterance.as_story_string()
logger.warn("Stating user intents with a leading '_' is "
"deprecated. The new format is "
"'* {}'. Please update "
"your example '{}' to the new format.".format(c, m))
intent_name = utterance.intent.get("name")
if intent_name not in self.domain.intents:
logger.warn("Found unknown intent '{}' on line {}. Please, "
"make sure that all intents are listed in your "
"domain yaml.".format(intent_name, line_num))
parsed_messages.append(utterance)
self.current_step_builder.add_user_messages(parsed_messages)
def add_event(self, event_name, parameters):
parsed = Event.from_story_string(event_name, parameters, self.domain,
default=ActionExecuted)
if parsed is None:
raise StoryParseError("Unknown event '{}'. It is Neither an event "
"nor an action).".format(event_name))
self.current_step_builder.add_event(parsed)
| 40.719178
| 80
| 0.566442
|
140a876ca65171f20b607169e07f0f7d440e20e8
| 135
|
py
|
Python
|
harnessed_jobs/ptc_BOT/v0/validator_ptc_BOT.py
|
duncanwood/EO-analysis-jobs
|
26d22e49c0d2e32fbf2759f504048754f66ecc45
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2018-07-26T09:32:46.000Z
|
2019-05-28T20:57:43.000Z
|
harnessed_jobs/ptc_BOT/v0/validator_ptc_BOT.py
|
duncanwood/EO-analysis-jobs
|
26d22e49c0d2e32fbf2759f504048754f66ecc45
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2018-03-18T21:55:07.000Z
|
2019-04-18T18:26:06.000Z
|
harnessed_jobs/ptc_BOT/v0/validator_ptc_BOT.py
|
duncanwood/EO-analysis-jobs
|
26d22e49c0d2e32fbf2759f504048754f66ecc45
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2020-11-12T19:47:42.000Z
|
2022-02-25T21:43:03.000Z
|
#!/usr/bin/env ipython
"""
Validator script for BOT PTC analysis.
"""
from bot_eo_validators import run_validator
run_validator('ptc')
| 19.285714
| 43
| 0.77037
|
133e9743309f25a4795ef62e9e686c3567ebd693
| 111
|
py
|
Python
|
timSoft/imanager/processing.py
|
apetcho/timSoft
|
90f0084969bb9d4e2d8691583d2fdab4cfd1bb2c
|
[
"BSD-3-Clause"
] | null | null | null |
timSoft/imanager/processing.py
|
apetcho/timSoft
|
90f0084969bb9d4e2d8691583d2fdab4cfd1bb2c
|
[
"BSD-3-Clause"
] | null | null | null |
timSoft/imanager/processing.py
|
apetcho/timSoft
|
90f0084969bb9d4e2d8691583d2fdab4cfd1bb2c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 16, 2020 12:00:23 PM
@author: eapetcho
"""
| 18.5
| 39
| 0.612613
|
36736c564815bea8bff4b54b43aa9b5b3c45403b
| 6,681
|
py
|
Python
|
reachy_sdk/arm.py
|
Pandinosaurus/reachy-sdk
|
c155bc8f56488de305e6fe5bacdb77aad0383295
|
[
"Apache-2.0"
] | 21
|
2021-04-20T15:37:32.000Z
|
2022-01-06T15:23:11.000Z
|
reachy_sdk/arm.py
|
Pandinosaurus/reachy-sdk
|
c155bc8f56488de305e6fe5bacdb77aad0383295
|
[
"Apache-2.0"
] | 13
|
2021-03-24T13:29:08.000Z
|
2022-02-04T11:54:40.000Z
|
reachy_sdk/arm.py
|
Pandinosaurus/reachy-sdk
|
c155bc8f56488de305e6fe5bacdb77aad0383295
|
[
"Apache-2.0"
] | 4
|
2021-06-23T07:44:55.000Z
|
2021-12-07T16:15:05.000Z
|
"""Reachy Arm module.
Handles all specific method to an Arm (left and/or right) especially:
- the forward kinematics
- the inverse kinematics
"""
from abc import ABC
from typing import List, Optional, Set
import numpy as np
from reachy_sdk_api.arm_kinematics_pb2_grpc import ArmKinematicsStub
from reachy_sdk_api.arm_kinematics_pb2 import ArmEndEffector, ArmFKRequest, ArmIKRequest, ArmJointPosition, ArmSide
from reachy_sdk_api.kinematics_pb2 import joint__pb2
from reachy_sdk_api.arm_kinematics_pb2 import kinematics__pb2
from .device_holder import DeviceHolder
from .joint import Joint
# Circumvent https://github.com/grpc/grpc/issues/18139
JointId = joint__pb2.JointId
JointPosition = kinematics__pb2.JointPosition
Matrix4x4 = kinematics__pb2.Matrix4x4
class Arm(ABC):
"""Arm abstract class used for both left/right arms.
It exposes the kinematics of the arm:
- you can access the joints actually used in the kinematic chain,
- you can compute the forward and inverse kinematics
"""
def __init__(self, joints: List[Joint], grpc_channel) -> None:
"""Set up the arm with its kinematics."""
self._kin_stub = ArmKinematicsStub(grpc_channel)
found_joints = [j for j in joints if j.name in self._required_joints]
if len(found_joints) != len(self._required_joints):
raise ValueError(f'Required joints not found {self._required_joints}')
self.joints = DeviceHolder(found_joints)
self._setup_joints(found_joints)
self.kinematics_chain = DeviceHolder([j for j in found_joints if j.name in self._kinematics_chain])
def __repr__(self) -> str:
"""Clean representation of an arm state."""
return f'<Arm side="{self._side}" joints={self.joints}>'
@property
def _side(self) -> str:
...
def _setup_joints(self, joints: List[Joint]) -> None:
for j in joints:
if j.name in self._required_joints:
setattr(self, j.name, j)
def forward_kinematics(self, joints_position: Optional[List[float]] = None) -> np.ndarray:
"""Compute the forward kinematics of the arm.
It will return the pose 4x4 matrix (as a numpy array) expressed in Reachy coordinate systems.
You can either specify a given joints position, otherwise it will use the current robot position.
"""
if joints_position is None:
joints_position = [j.present_position for j in self.kinematics_chain.values()]
if isinstance(joints_position, np.ndarray) and len(joints_position.shape) > 1:
raise ValueError('Vectorized kinematics not supported!')
pos = np.deg2rad(list(joints_position))
if len(pos) != len(self._kinematics_chain):
raise ValueError(
f'joints_position should be length {len(self._kinematics_chain)} (got {len(pos)} instead)!'
)
req = ArmFKRequest(
arm_position=ArmJointPosition(
side=self._arm_side,
positions=self._joint_position_from_pos(pos),
),
)
resp = self._kin_stub.ComputeArmFK(req)
if not resp.success:
raise ValueError(f'No solution found for the given joints ({joints_position})!')
return np.array(resp.end_effector.pose.data).reshape((4, 4))
def inverse_kinematics(self, target: np.ndarray, q0: Optional[List[float]] = None) -> List[float]:
"""Compute the inverse kinematics of the arm.
Given a pose 4x4 target matrix (as a numpy array) expressed in Reachy coordinate systems,
it will try to compute a joint solution to reach this target (or get close).
It will raise a ValueError if no solution is found.
You can also specify a basic joint configuration as a prior for the solution.
"""
if target.shape != (4, 4):
raise ValueError('target shape should be (4, 4) (got {target.shape} instead)!')
if q0 is not None and (len(q0) != len(self._kinematics_chain)):
raise ValueError(f'q0 should be length {len(self._kinematics_chain)} (got {len(q0)} instead)!')
if isinstance(q0, np.ndarray) and len(q0.shape) > 1:
raise ValueError('Vectorized kinematics not supported!')
req_params = {
'target': ArmEndEffector(
side=self._arm_side,
pose=Matrix4x4(data=target.flatten().tolist()),
)
}
if q0 is not None:
req_params['q0'] = self._joint_position_from_pos(np.deg2rad(q0))
req = ArmIKRequest(**req_params)
resp = self._kin_stub.ComputeArmIK(req)
if not resp.success:
raise ValueError(f'No solution found for the given target ({target})!')
return np.rad2deg(resp.arm_position.positions.positions).tolist()
@property
def _kinematics_chain(self) -> List[str]:
...
@property
def _required_joints(self) -> Set[str]:
...
@property
def _arm_side(self):
return ArmSide.LEFT if self._side == 'left' else ArmSide.RIGHT
def _joint_position_from_pos(self, joints_position: List[float]) -> ArmJointPosition:
return JointPosition(
ids=[JointId(uid=j.uid) for j in self.kinematics_chain.values()],
positions=joints_position,
)
class LeftArm(Arm):
"""LeftArm class, all the work is actually done by the ABC Arm class.
It exposes the kinematics of the arm:
- you can access the joints actually used in the kinematic chain,
- you can compute the forward and inverse kinematics
"""
_side = 'left'
_kinematics_chain = (
'l_shoulder_pitch', 'l_shoulder_roll', 'l_arm_yaw',
'l_elbow_pitch', 'l_forearm_yaw',
'l_wrist_pitch', 'l_wrist_roll',
)
_required_joints = {
'l_shoulder_pitch', 'l_shoulder_roll', 'l_arm_yaw',
'l_elbow_pitch', 'l_forearm_yaw',
'l_wrist_pitch', 'l_wrist_roll',
'l_gripper',
}
class RightArm(Arm):
"""RightArm class, all the work is actually done by the ABC Arm class.
It exposes the kinematics of the arm:
- you can access the joints actually used in the kinematic chain,
- you can compute the forward and inverse kinematics
"""
_side = 'right'
_kinematics_chain = (
'r_shoulder_pitch', 'r_shoulder_roll', 'r_arm_yaw',
'r_elbow_pitch', 'r_forearm_yaw',
'r_wrist_pitch', 'r_wrist_roll',
)
_required_joints = {
'r_shoulder_pitch', 'r_shoulder_roll', 'r_arm_yaw',
'r_elbow_pitch', 'r_forearm_yaw',
'r_wrist_pitch', 'r_wrist_roll',
'r_gripper',
}
| 35.163158
| 115
| 0.659332
|
7e325eeb655e505534f1c68a10cfae153b37b697
| 2,968
|
py
|
Python
|
MyappV2/credential.py
|
Khairiazim/Raspinstabot
|
4973f9dbea68a669b6c3a86271bd9f017dd9b6e7
|
[
"Apache-2.0"
] | null | null | null |
MyappV2/credential.py
|
Khairiazim/Raspinstabot
|
4973f9dbea68a669b6c3a86271bd9f017dd9b6e7
|
[
"Apache-2.0"
] | null | null | null |
MyappV2/credential.py
|
Khairiazim/Raspinstabot
|
4973f9dbea68a669b6c3a86271bd9f017dd9b6e7
|
[
"Apache-2.0"
] | null | null | null |
# user enter credential
# check credential
# if true open mainwindow and store pc id
# close license
import sys
import webbrowser
from PyQt5.QtCore import QSharedMemory
from PyQt5 import QtCore, QtGui, QtWidgets, uic
# UI.py 1) import ui
# from ui import
# UI FORMAT
class License_class(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
uic.loadUi("ui/License.ui", self)
# UI.py FORMAT 2) inherit ui class at first paramiter
# class Software_class(UI_software.Ui_Dialog,QtWidgets.QDialog):
# def __init__(self):
# #UI.py 3) make a super
# super(UI_software.Ui_Dialog,self).__init__()
# self.setupUi(self)
# QtCore.QCoreApplication.processEvents()
self.pb_apply.clicked.connect(self.check_credential)
def check_credential(self):
email = str(self.le_username.text()).strip()
password = str(self.le_password.text()).strip()
# if email and password == true: todo
# open mainwindow
# else:
# reply = QtWidgets.QMessageBox.information(self, 'Dear User', "You are not register,\n"
# "would you like to register",
# QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
# QtWidgets.QMessageBox.Yes)
# if reply == QtWidgets.QMessageBox.Yes:
# # IF "YES" DOWNLOAD AND EXECUTE FILE WITH PROGRESSBAR/ IMPORT PROGRESS.PY
# webbrowser.open('https://vetogram.com/register', new=2)
# else:
# # IF "NO" CLOSE MESSAGEBOX
# pass
print("email:", email, "password:", password)
def message(self):
QtWidgets.QMessageBox.information(self, "info", "Vetogram application are currently running")
class MemoryCondition:
def __init__(self, key='memory_condition_key'):
self._shm = QSharedMemory(key)
if not self._shm.attach():
if not self._shm.create(1):
raise RuntimeError('error creating shared memory: %s' %
self._shm.errorString())
self.condition = False
def __enter__(self):
self._shm.lock()
if self._shm.data()[0] == b'\x00':
self.condition = True
self._shm.data()[0] = b'\x01'
self._shm.unlock()
return self.condition
def __exit__(self, exc_type, exc_value, traceback):
if self.condition:
self._shm.lock()
self._shm.data()[0] = b'\x00'
self._shm.unlock()
if __name__ == "__main__":
with MemoryCondition() as condition:
app = QtWidgets.QApplication(sys.argv)
License = License_class()
if condition:
License.show()
sys.exit(app.exec())
else:
License.message()
| 32.26087
| 109
| 0.574124
|
41441423b1340898c062e3dd2e63ea64a5b76a5a
| 7,837
|
py
|
Python
|
examples/imdb_dropout.py
|
albietz/stochs
|
3c3c4860c91ab59b2d8d5762583c722f73ab8608
|
[
"MIT"
] | 27
|
2017-03-22T15:21:59.000Z
|
2020-10-26T22:11:00.000Z
|
examples/imdb_dropout.py
|
albietz/stochs
|
3c3c4860c91ab59b2d8d5762583c722f73ab8608
|
[
"MIT"
] | null | null | null |
examples/imdb_dropout.py
|
albietz/stochs
|
3c3c4860c91ab59b2d8d5762583c722f73ab8608
|
[
"MIT"
] | 6
|
2017-03-26T23:56:19.000Z
|
2018-09-04T03:12:49.000Z
|
import argparse
import logging
import os
import sys
import time
import numpy as np
import scipy.sparse as sp
import stochs
from concurrent.futures import ThreadPoolExecutor
logging.basicConfig(level=logging.INFO)
params = {
'lmbda': [1e-2],
'delta': [0, 0.01, 0.3],
'algos': [
{'name': 'miso_nonu', 'lr': 1.0},
# {'name': 'miso', 'lr': 10.0},
# {'name': 'sgd_nonu', 'lr': 1.0},
# {'name': 'sgd', 'lr': 10.0},
# {'name': 'saga', 'lr': 10.0},
],
}
start_decay = 2
num_epochs = 301
loss = b'squared_hinge'
eval_delta = 10
eval_mc_samples = 0
seed = None
def load_imdb(folder):
def process_imdb(X, y):
return X.astype(stochs.dtype), (y > 5).astype(stochs.dtype)
from sklearn.datasets import load_svmlight_files
Xtrain, ytrain, Xtest, ytest = load_svmlight_files(
(os.path.join(folder, 'train/labeledBow.feat'),
os.path.join(folder, 'test/labeledBow.feat')))
Xtrain, ytrain = process_imdb(Xtrain, ytrain)
Xtest, ytest = process_imdb(Xtest, ytest)
return Xtrain, ytrain, Xtest, ytest
def training(lmbda, dropout_rate, solver, q=None):
ep = []
loss_train = []
loss_test = []
acc_train = []
acc_test = []
random = np.random.RandomState(seed=seed or 43)
# prepare evaluation set
if dropout_rate > 0 and eval_mc_samples > 0:
Xtrain_eval = sp.vstack((Xtrain for _ in range(eval_mc_samples)))
Xtrain_eval.sort_indices()
Xtrain_eval.data *= random.binomial(1, 1 - dropout_rate, size=Xtrain_eval.nnz) / (1 - dropout_rate)
ytrain_eval = np.hstack((ytrain for _ in range(eval_mc_samples)))
else:
Xtrain_eval = Xtrain
ytrain_eval = ytrain
t_start = time.time()
for epoch in range(num_epochs):
if epoch % eval_delta == 0:
ep.append(epoch)
loss_train.append(solver.compute_loss(Xtrain_eval, ytrain_eval) + 0.5 * lmbda * solver.compute_squared_norm())
loss_test.append(solver.compute_loss(Xtest, ytest))
acc_train.append((((2*ytrain - 1) * Xtrain.dot(solver.w)) >= 0).mean())
acc_test.append((((2*ytest - 1) * Xtest.dot(solver.w)) >= 0).mean())
if epoch == start_decay and dropout_rate > 0:
solver.start_decay()
if dropout_rate > 0:
idxs = random.choice(n, n, p=q)
Xtt = Xtrain[idxs]
Xtt.sort_indices()
Xtt.data *= random.binomial(1, 1 - dropout_rate, size=Xtt.nnz) / (1 - dropout_rate)
solver.iterate(Xtt, ytrain[idxs], idxs)
else:
idxs = random.choice(n, n, p=q)
solver.iterate_indexed(Xtrain, ytrain, idxs)
logging.info('elapsed: %f', time.time() - t_start)
# print('lmbda', lmbda, 'delta', dropout_rate,
# '=> train loss:', loss_train[-1], 'test loss:', loss_test[-1],
# 'train acc:', acc_train[-1], 'test acc:', acc_test[-1])
return {
'epochs': ep,
'loss_train': loss_train,
'loss_test': loss_test,
'acc_train': acc_train,
'acc_test': acc_test,
}
def train_sgd(lmbda, dropout_rate, lr):
solver = stochs.SparseSGD(d, lr=lr * (1 - dropout_rate)**2 / Lmax, lmbda=lmbda, loss=loss)
return training(lmbda, dropout_rate, solver)
def train_sgd_nonu(lmbda, dropout_rate, lr):
solver = stochs.SparseSGD(d, lr=lr * (1 - dropout_rate)**2 / Lavg, lmbda=lmbda, loss=loss)
q = np.asarray(Xtrain.power(2).sum(1)).flatten()
q += q.mean()
q /= q.sum()
solver.set_q(q)
return training(lmbda, dropout_rate, solver, q=q)
def train_miso(lmbda, dropout_rate, lr):
solver = stochs.SparseMISO(d, n, lmbda=lmbda, loss=loss)
solver.init(Xtrain)
solver.decay(lr * min(1, n * lmbda * (1 - dropout_rate)**2 / Lmax))
return training(lmbda, dropout_rate, solver)
def train_miso_nonu(lmbda, dropout_rate, lr):
alpha = lr * min(1, n * lmbda * (1 - dropout_rate)**2 / Lavg)
solver = stochs.SparseMISO(d, n, alpha=alpha, lmbda=lmbda, loss=loss)
solver.init(Xtrain)
q = np.asarray(Xtrain.power(2).sum(1)).flatten()
q += q.mean()
q /= q.sum()
solver.set_q(q)
return training(lmbda, dropout_rate, solver, q=q)
def train_saga(lmbda, dropout_rate, lr):
solver = stochs.SparseSAGA(d, n, lr=lr * (1 - dropout_rate)**2 / Lmax, lmbda=lmbda, loss=loss)
solver.init(Xtrain)
return training(lmbda, dropout_rate, solver)
train_fn = {'sgd': train_sgd,
'sgd_nonu': train_sgd_nonu,
'miso': train_miso,
'miso_nonu': train_miso_nonu,
'saga': train_saga}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='dropout training on imdb')
parser.add_argument('--num-workers', default=1,
type=int, help='number of threads for grid search')
parser.add_argument('--pdf-file', default=None, help='pdf file to save to')
parser.add_argument('--pkl-file', default=None, help='pickle file to save to')
parser.add_argument('--seed', default=None, type=int, help='random seed')
parser.add_argument('--data-folder', default='data/aclImdb', help='imdb dataset folder')
args = parser.parse_args()
seed = args.seed
print('seed:', seed)
logging.info('loading imdb data')
if not os.path.exists(args.data_folder):
logging.error('IMDB dataset folder {} is missing. '.format(args.data_folder) +
'Download at http://ai.stanford.edu/~amaas/data/sentiment/ or specify a different folder.')
sys.exit(0)
Xtrain, ytrain, Xtest, ytest = load_imdb(args.data_folder)
n = Xtrain.shape[0]
d = Xtrain.shape[1]
Lmax = Xtrain.power(2).sum(1).max()
Lavg = Xtrain.power(2).sum(1).mean()
pp = None
if args.pdf_file:
import matplotlib
matplotlib.use('Agg')
from matplotlib.backends.backend_pdf import PdfPages
import curves
pp = PdfPages(args.pdf_file)
pkl = None
if args.pkl_file is not None:
pkl = []
logging.info('training')
futures = []
with ThreadPoolExecutor(max_workers=args.num_workers) as executor:
for lmbda in params['lmbda']:
for delta in params['delta']:
futures.append(((lmbda, delta),
[(alg, executor.submit(train_fn[alg['name']], lmbda, delta, alg['lr']))
for alg in params['algos']]))
for (lmbda, delta), futs in futures:
res = [(alg, f.result()) for alg, f in futs]
print('lmbda', lmbda, 'delta', delta)
for alg, r in res:
print(alg['name'], alg['lr'], 'train loss', r['loss_train'][-1],
'test acc', r['acc_test'][-1])
if pp or pkl is not None:
plot_res = {}
plot_res['params'] = [dict(name=alg['name'], lr=alg['lr'], lmbda=lmbda, loss=loss)
for alg, r in res]
plot_res['epochs'] = res[0][1]['epochs']
def transpose(key):
return list(zip(*(r[key] for alg, r in res)))
plot_res['test_accs'] = transpose('acc_test')
plot_res['train_accs'] = transpose('acc_train')
plot_res['train_losses'] = transpose('loss_train')
plot_res['test_losses'] = transpose('loss_test')
if pkl is not None:
pkl.append(((lmbda, delta), plot_res))
if pp:
curves.plot_loss(plot_res, ty='train', log=True, step=1, last=None,
small=False, legend=True, title='imdb, $\delta$ = {:.2f}'.format(delta))
pp.savefig()
if pp:
pp.close()
if pkl:
import pickle
pickle.dump(pkl, open(args.pkl_file, 'wb'))
| 36.451163
| 122
| 0.590277
|
74095c2ae0f63ee6887469e4af190482f7e204a7
| 4,586
|
py
|
Python
|
sqlpython/metadata.py
|
fortime/sqlpython
|
40e6b7d0b24ffeafabac0e3719049c081e26e8b2
|
[
"MIT"
] | null | null | null |
sqlpython/metadata.py
|
fortime/sqlpython
|
40e6b7d0b24ffeafabac0e3719049c081e26e8b2
|
[
"MIT"
] | null | null | null |
sqlpython/metadata.py
|
fortime/sqlpython
|
40e6b7d0b24ffeafabac0e3719049c081e26e8b2
|
[
"MIT"
] | null | null | null |
from defaultdict import defaultdict
metaqueries = defaultdict(defaultdict)
metaqueries['desc']['oracle'] = defaultdict(defaultdict)
metaqueries['desc']['oracle']['TABLE']['long'] = (
"""SELECT atc.column_id "#",
atc.column_name,
CASE atc.nullable WHEN 'Y' THEN 'NULL' ELSE 'NOT NULL' END "Null?",
atc.data_type ||
CASE atc.data_type WHEN 'DATE' THEN ''
ELSE '(' ||
CASE atc.data_type WHEN 'NUMBER' THEN TO_CHAR(atc.data_precision) ||
CASE atc.data_scale WHEN 0 THEN ''
ELSE ',' || TO_CHAR(atc.data_scale) END
ELSE TO_CHAR(atc.data_length) END
END ||
CASE atc.data_type WHEN 'DATE' THEN '' ELSE ')' END
data_type,
acc.comments
FROM all_tab_columns atc
JOIN all_col_comments acc ON (acc.owner = atc.owner AND acc.table_name = atc.table_name AND acc.column_name = atc.column_name)
WHERE atc.table_name = :object_name
AND atc.owner = :owner
ORDER BY atc.column_id;""",)
metaqueries['desc']['oracle']['TABLE']['short'] = (
"""SELECT atc.column_name,
CASE atc.nullable WHEN 'Y' THEN 'NULL' ELSE 'NOT NULL' END "Null?",
atc.data_type ||
CASE atc.data_type WHEN 'DATE' THEN ''
ELSE '(' ||
CASE atc.data_type WHEN 'NUMBER' THEN TO_CHAR(atc.data_precision) ||
CASE atc.data_scale WHEN 0 THEN ''
ELSE ',' || TO_CHAR(atc.data_scale) END
ELSE TO_CHAR(atc.data_length) END
END ||
CASE atc.data_type WHEN 'DATE' THEN '' ELSE ')' END
data_type
FROM all_tab_columns atc
WHERE atc.table_name = :object_name
AND atc.owner = :owner
ORDER BY atc.column_id;""",)
metaqueries['desc']['oracle']['PROCEDURE'] = (
"""SELECT NVL(argument_name, 'Return Value') argument_name,
data_type,
in_out,
default_value
FROM all_arguments
WHERE object_name = :object_name
AND owner = :owner
AND package_name IS NULL
ORDER BY sequence;""",)
metaqueries['desc']['oracle']['PackageObjects'] = (
"""SELECT DISTINCT object_name
FROM all_arguments
WHERE package_name = :package_name
AND owner = :owner""",)
metaqueries['desc']['oracle']['PackageObjArgs'] = (
"""SELECT object_name,
argument_name,
data_type,
in_out,
default_value
FROM all_arguments
WHERE package_name = :package_name
AND object_name = :object_name
AND owner = :owner
AND argument_name IS NOT NULL
ORDER BY sequence;""",)
metaqueries['desc']['oracle']['TRIGGER'] = (
"""SELECT description
FROM all_triggers
WHERE owner = :owner
AND trigger_name = :object_name;
""",
"""SELECT table_owner,
base_object_type,
table_name,
column_name,
when_clause,
status,
action_type,
crossedition
FROM all_triggers
WHERE owner = :owner
AND trigger_name = :object_name
\\t""",)
metaqueries['desc']['oracle']['INDEX'] = (
"""SELECT index_type,
table_owner,
table_name,
table_type,
uniqueness,
compression,
partitioned,
temporary,
generated,
secondary,
dropped,
visibility
FROM all_indexes
WHERE owner = :owner
AND index_name = :object_name\\t""",)
metaqueries['desc']['oracle']['VIEW'] = metaqueries['desc']['oracle']['TABLE']['short']
metaqueries['desc']['oracle']['FUNCTION'] = metaqueries['desc']['oracle']['PROCEDURE']
metaqueries['ls']['oracle'] = """
SELECT owner,
object_name,
object_type,
status,
last_ddl_time,
user as my_own
FROM all_objects"""
metaqueries['ls']['information_schema'] = """
SELECT table_schema as owner,
table_name as object_name,
table_type as object_type,
null as status,
null as last_ddl_time,
%(my_own)s as my_own
FROM information_schema.tables
UNION ALL
SELECT trigger_schema as owner,
trigger_name as object_name,
'TRIGGER' as object_type,
null as status,
created as last_ddl_time,
%(my_own)s as my_own
FROM information_schema.triggers
UNION ALL
SELECT routine_schema as owner,
routine_name as object_name,
routine_type as object_type,
null as status,
last_altered as last_ddl_time,
%(my_own)s as my_own
FROM information_schema.routines
"""
metaqueries['ls']['postgres'] = (metaqueries['ls']['information_schema'] + """UNION ALL
SELECT sequence_schema as owner,
sequence_name as object_name,
'SEQUENCE' as object_type,
null as status,
null as last_ddl_time,
%(my_own)s as my_own
FROM information_schema.sequences""") % {'my_own': "text('public')"}
metaqueries['ls']['mysql'] = metaqueries['ls']['information_schema'] % {'my_own':"database()"}
metaqueries['ls']['sqlite'] = """
SELECT '' as owner,
tbl_name as object_name,
type as object_type,
null as status,
null as last_ddl_time,
'' as current_username
FROM sqlite_master"""
| 27.297619
| 126
| 0.694287
|
dcedf9711a3e04223dfa7aa542c6feea98598e33
| 1,775
|
py
|
Python
|
src/encoded/tests/test_upgrade_analysis_step.py
|
Parul-Kudtarkar/t2dream-portal
|
ad0514e71da95cd911e874cb04112037e10c5823
|
[
"MIT"
] | null | null | null |
src/encoded/tests/test_upgrade_analysis_step.py
|
Parul-Kudtarkar/t2dream-portal
|
ad0514e71da95cd911e874cb04112037e10c5823
|
[
"MIT"
] | null | null | null |
src/encoded/tests/test_upgrade_analysis_step.py
|
Parul-Kudtarkar/t2dream-portal
|
ad0514e71da95cd911e874cb04112037e10c5823
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.fixture
def base_analysis_step(testapp, software_version):
item = {
'name': 'base_analysis_step_v_1',
'title': 'base_analysis_step_v_1 title',
'analysis_step_types': ['alignments'],
'input_file_types': ['reads'],
'software_versions': [
software_version['@id'],
],
}
return item
@pytest.fixture
def analysis_step_1(base_analysis_step):
item = base_analysis_step.copy()
item.update({
'schema_version': '2',
'output_file_types': ['signal of multi-mapped reads']
})
return item
@pytest.fixture
def analysis_step_3(base_analysis_step):
item = base_analysis_step.copy()
item.update({
'schema_version': '3',
'analysis_step_types': ['alignment', 'alignment'],
'input_file_types': ['reads', 'reads'],
'output_file_types': ['transcriptome alignments', 'transcriptome alignments']
})
return item
def test_analysis_step_run_2_3(registry, upgrader, analysis_step_1, threadlocals):
value = upgrader.upgrade('analysis_step', analysis_step_1, current_version='2', target_version='3', registry=registry)
assert 'signal of all reads' in value['output_file_types']
assert 'signal of multi-mapped reads' not in value['output_file_types']
def test_analysis_step_unique_array(upgrader, analysis_step_3):
value = upgrader.upgrade('analysis_step', analysis_step_3, current_version='3', target_version='4')
assert value['schema_version'] == '4'
assert len(value['analysis_step_types']) == len(set(value['analysis_step_types']))
assert len(value['input_file_types']) == len(set(value['input_file_types']))
assert len(value['output_file_types']) == len(set(value['output_file_types']))
| 34.134615
| 122
| 0.689577
|
05f1f8b61e4058236338b3f9954a40173b8f9fff
| 3,734
|
py
|
Python
|
Lib/site-packages/validictory/tests/test_disallow_unknown_properties.py
|
Kronos3/pyexec
|
c9e76a0302dee047ed137bc38aa669cec04c24cd
|
[
"bzip2-1.0.6"
] | 73
|
2015-07-05T12:52:55.000Z
|
2019-12-09T16:34:09.000Z
|
Lib/site-packages/validictory/tests/test_disallow_unknown_properties.py
|
Kronos3/pyexec
|
c9e76a0302dee047ed137bc38aa669cec04c24cd
|
[
"bzip2-1.0.6"
] | 30
|
2015-10-13T21:47:46.000Z
|
2017-10-26T17:00:47.000Z
|
Lib/site-packages/validictory/tests/test_disallow_unknown_properties.py
|
Kronos3/pyexec
|
c9e76a0302dee047ed137bc38aa669cec04c24cd
|
[
"bzip2-1.0.6"
] | 23
|
2015-07-11T05:11:36.000Z
|
2022-02-14T05:27:22.000Z
|
from unittest import TestCase
import validictory
class TestDisallowUnknownProperties(TestCase):
def setUp(self):
self.data_simple = {"name": "john doe", "age": 42}
self.schema_simple = {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
},
}
self.data_complex = {
"inv_number": "123",
"rows": [
{
"sku": "ab-456",
"desc": "a description",
"price": 100.45
},
{
"sku": "xy-123",
"desc": "another description",
"price": 999.00
}
],
"data": {
"name": "john doe",
"age": 42
}
}
self.schema_complex = {
"type": "object",
"properties": {
"inv_number": {"type": "string"},
"rows": {
"type": "array",
"items": {
"type": "object",
"properties": {
"sku": {"type": "string"},
"desc": {"type": "string"},
"price": {"type": "number"}
}
},
},
"data": {
"type": (
{
"type": "object",
"properties": {
"name": {
"type": "string"
},
"hair": {
"type": "string"
}
}
},
{
"type": "object",
"properties": {
"name": {
"type": "string"
},
"age": {
"type": "number"
}
}
}
)
}
}
}
def test_disallow_unknown_properties_pass(self):
try:
validictory.validate(self.data_simple, self.schema_simple,
disallow_unknown_properties=True)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_disallow_unknown_properties_fail(self):
self.data_simple["sex"] = "male"
self.assertRaises(validictory.SchemaError, validictory.validate,
self.data_simple, self.schema_simple,
disallow_unknown_properties=True)
def test_disallow_unknown_properties_complex_pass(self):
try:
validictory.validate(self.data_complex, self.schema_complex,
disallow_unknown_properties=True)
except ValueError as e:
self.fail("Unexpected failure: %s" % e)
def test_disallow_unknown_properties_complex_fail(self):
newrow = {"sku": "789", "desc": "catch me if you can", "price": 1,
"rice": 666}
self.data_complex["rows"].append(newrow)
self.assertRaises(validictory.SchemaError, validictory.validate,
self.data_complex, self.schema_complex,
disallow_unknown_properties=True)
| 34.574074
| 74
| 0.363417
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.