hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0fa484f7ae1a72b8411ce44ff1a3e31fab4d0ade | 383 | py | Python | exercises/python-1-a/example.py | ee7/exercism-research_experiment_1 | 92f875f53647f0da3dbcc32e13fdf53c77c3c460 | [
"MIT"
] | 8 | 2019-12-04T22:00:36.000Z | 2020-09-27T15:05:12.000Z | exercises/python-1-a/example.py | ee7/exercism-research_experiment_1 | 92f875f53647f0da3dbcc32e13fdf53c77c3c460 | [
"MIT"
] | 92 | 2019-11-29T19:44:06.000Z | 2021-11-09T16:15:48.000Z | exercises/python-1-a/example.py | ee7/exercism-research_experiment_1 | 92f875f53647f0da3dbcc32e13fdf53c77c3c460 | [
"MIT"
] | 27 | 2019-12-03T06:44:44.000Z | 2021-11-09T16:10:29.000Z | """
Perform RLE compression using imperative programming techniques.
"""
def compress(raw: str) -> bytes:
"""
Compress the raw string to bytes using RLE.
"""
packed = bytearray()
for char in raw.encode("utf-8"):
if not packed or char != packed[-1]:
packed.extend([1, char])
else:
packed[-2] += 1
return bytes(packed)
| 22.529412 | 64 | 0.577023 |
2ef423744e843b87d375fe109a418a363cb892ba | 3,386 | py | Python | generate/generate_semeval_NLI_M.py | MEHAMMEDAMINE/ABSA-BERT-pair | a5f978574de2e0514b2a09143a3122d2db6df561 | [
"MIT"
] | null | null | null | generate/generate_semeval_NLI_M.py | MEHAMMEDAMINE/ABSA-BERT-pair | a5f978574de2e0514b2a09143a3122d2db6df561 | [
"MIT"
] | null | null | null | generate/generate_semeval_NLI_M.py | MEHAMMEDAMINE/ABSA-BERT-pair | a5f978574de2e0514b2a09143a3122d2db6df561 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 2 10:00:05 2022
@author: MOHAMMED ELAMINE
"""
import os
from random import sample
import random
random.seed(10)
data_dir='../data/HAAD/'
dir_path = data_dir+'bert-pair/'
if not os.path.exists(dir_path):
os.makedirs(dir_path)
d=['الحبكة', 'المشاعر', 'الاسلوب', 'السياق', 'السلبيات', 'المؤلف', 'التقييم', 'الاماكن', 'الطائفية', 'الخاتمة', 'اللغات', 'الهوامش', 'الوقت', 'الاموال', 'المزايا']
with open(dir_path+"test_NLI_M.csv","w",encoding="utf-8") as g:
with open(data_dir+"HAAD_Test_GOLD.xml","r",encoding="utf-8") as f:
s=f.readline().strip()
while s:
category=[]
polarity=[]
if "<sentence id" in s:
left=s.find("id")
right=s.find(">")
id=s[left+4:right-1]
while not "</sentence>" in s:
if "<text>" in s:
left=s.find("<text>")
right=s.find("</text>")
text=s[left+6:right]
if "aspectCategory" in s:
left=s.find("category=")
right=s.find("polarity=")
category.append(s[left+10:right-2])
left=s.find("polarity=")
right=s.find("/>")
polarity.append(s[left+10:right-1])
s=f.readline().strip()
a=[item for item in d if item not in category]
ss=sample(a,3)
for i in d:
if i in category:
g.write(id+"\t"+polarity[category.index(i)]+"\t"+i+"\t"+text+"\n")
for l in ss:
g.write(id + "\t" + "none" + "\t" + l + "\t" + text + "\n")
else:
s = f.readline().strip()
with open(dir_path+"train_NLI_M.csv","w",encoding="utf-8") as g:
with open(data_dir+"HAAD_Train.xml","r",encoding="utf-8") as f:
s=f.readline().strip()
while s:
category=[]
polarity=[]
if "<sentence id" in s:
left=s.find("id")
right=s.find(">")
id=s[left+4:right-1]
while not "</sentence>" in s:
if "<text>" in s:
left=s.find("<text>")
right=s.find("</text>")
text=s[left+6:right]
if "aspectCategory" in s:
left=s.find("category=")
right=s.find("polarity=")
category.append(s[left+10:right-2])
left=s.find("polarity=")
right=s.find("/>")
polarity.append(s[left+10:right-1])
s=f.readline().strip()
a=[item for item in d if item not in category]
ss=sample(a,3)
for i in d:
if i in category:
g.write(id+"\t"+polarity[category.index(i)]+"\t"+i+"\t"+text+"\n")
for l in ss:
g.write(id + "\t" + "none" + "\t" + l + "\t" + text + "\n")
else:
s = f.readline().strip()
| 35.642105 | 163 | 0.412581 |
e43f1c0ad8566058e11cafb1849be9504879ad5e | 2,334 | py | Python | liveDetect.py | karencfisher/face_express | 330321088898697ed547061ee1a4ec2c16035a7a | [
"MIT"
] | null | null | null | liveDetect.py | karencfisher/face_express | 330321088898697ed547061ee1a4ec2c16035a7a | [
"MIT"
] | null | null | null | liveDetect.py | karencfisher/face_express | 330321088898697ed547061ee1a4ec2c16035a7a | [
"MIT"
] | null | null | null | import os, time
import cv2
import numpy as np
from tensorflow.keras.models import load_model
from imutils import resize
def detectExpression(roi):
'''
Uses model to detect expression in region of interest
arguments:
roi - 'region of interest' snipped from larger image. 2-d array of pixels.
return:
caption - string containing predicted label and probability
'''
# Preprocess image data and reshape to (1, h, w, 1)
img = resize(roi, width=48, height=48) / 255
img = np.expand_dims(img, axis=0)
img = np.expand_dims(img, axis=-1)
# Feed forward through model. Sometimes the image is not resized
# properly, so handle exception if occurs
try:
probs = model.predict(img)
except ValueError:
return ''
# Get label and associated probability
predict = np.argmax(probs[0])
prob = probs[0][predict]
caption = f'{classes[predict]} {str(round(prob * 100))} %'
return caption
# Setup face detection and video stream
haar_file = 'haarcascade_frontalface_default.xml'
face_cascade = cv2.CascadeClassifier(haar_file)
webcam = cv2.VideoCapture(0)
# Load model
model_file = os.path.join('models', 'best_model2.h5')
model = load_model(model_file)
classes = ['Neutral', 'Happy', 'Surprise', 'Sad', 'Angry', 'Disgust', 'Fear']
# Setup for running average of FPS
start_time = time.time()
frame_count = 0
fps = 0
while True:
# Get a frame from video stream and convert to gray scale
(_, im) = webcam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.3, 4)
# Predict expression in each face
for (x, y, w, h) in faces:
roi = gray[y:y+h, x:x+w]
caption = detectExpression(roi)
# Annotate original image
cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(im, caption,(x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(im, f'{fps} FPS', (20, 20),
cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 0, 255), 2)
# Update FPS
frame_count += 1
fps = int(frame_count / (time.time() - start_time))
# Display image. Escape key to end loop
cv2.imshow('OpenCV', im)
key = cv2.waitKey(10)
if key == 27:
break | 28.463415 | 82 | 0.637104 |
15f24568a9ee38e556b2a143f4cf2364893ce145 | 6,029 | py | Python | mitmproxy/coretypes/multidict.py | fedosgad/mitmproxy | 7eacc41f3b1079e000cf6b6c19c0f337d6e01177 | [
"MIT"
] | null | null | null | mitmproxy/coretypes/multidict.py | fedosgad/mitmproxy | 7eacc41f3b1079e000cf6b6c19c0f337d6e01177 | [
"MIT"
] | null | null | null | mitmproxy/coretypes/multidict.py | fedosgad/mitmproxy | 7eacc41f3b1079e000cf6b6c19c0f337d6e01177 | [
"MIT"
] | null | null | null | from abc import ABCMeta
from abc import abstractmethod
from collections.abc import Iterator, MutableMapping, Sequence
from typing import TypeVar
from mitmproxy.coretypes import serializable
KT = TypeVar("KT")
VT = TypeVar("VT")
class _MultiDict(MutableMapping[KT, VT], metaclass=ABCMeta):
"""
A MultiDict is a dictionary-like data structure that supports multiple values per key.
"""
fields: tuple[tuple[KT, VT], ...]
"""The underlying raw datastructure."""
def __repr__(self):
fields = (repr(field) for field in self.fields)
return "{cls}[{fields}]".format(
cls=type(self).__name__, fields=", ".join(fields)
)
@staticmethod
@abstractmethod
def _reduce_values(values: Sequence[VT]) -> VT:
"""
If a user accesses multidict["foo"], this method
reduces all values for "foo" to a single value that is returned.
For example, HTTP headers are folded, whereas we will just take
the first cookie we found with that name.
"""
@staticmethod
@abstractmethod
def _kconv(key: KT) -> KT:
"""
This method converts a key to its canonical representation.
For example, HTTP headers are case-insensitive, so this method returns key.lower().
"""
def __getitem__(self, key: KT) -> VT:
values = self.get_all(key)
if not values:
raise KeyError(key)
return self._reduce_values(values)
def __setitem__(self, key: KT, value: VT) -> None:
self.set_all(key, [value])
def __delitem__(self, key: KT) -> None:
if key not in self:
raise KeyError(key)
key = self._kconv(key)
self.fields = tuple(
field for field in self.fields if key != self._kconv(field[0])
)
def __iter__(self) -> Iterator[KT]:
seen = set()
for key, _ in self.fields:
key_kconv = self._kconv(key)
if key_kconv not in seen:
seen.add(key_kconv)
yield key
def __len__(self) -> int:
return len({self._kconv(key) for key, _ in self.fields})
def __eq__(self, other) -> bool:
if isinstance(other, MultiDict):
return self.fields == other.fields
return False
def get_all(self, key: KT) -> list[VT]:
"""
Return the list of all values for a given key.
If that key is not in the MultiDict, the return value will be an empty list.
"""
key = self._kconv(key)
return [value for k, value in self.fields if self._kconv(k) == key]
def set_all(self, key: KT, values: list[VT]) -> None:
"""
Remove the old values for a key and add new ones.
"""
key_kconv = self._kconv(key)
new_fields: list[tuple[KT, VT]] = []
for field in self.fields:
if self._kconv(field[0]) == key_kconv:
if values:
new_fields.append((field[0], values.pop(0)))
else:
new_fields.append(field)
while values:
new_fields.append((key, values.pop(0)))
self.fields = tuple(new_fields)
def add(self, key: KT, value: VT) -> None:
"""
Add an additional value for the given key at the bottom.
"""
self.insert(len(self.fields), key, value)
def insert(self, index: int, key: KT, value: VT) -> None:
"""
Insert an additional value for the given key at the specified position.
"""
item = (key, value)
self.fields = self.fields[:index] + (item,) + self.fields[index:]
def keys(self, multi: bool = False):
"""
Get all keys.
If `multi` is True, one key per value will be returned.
If `multi` is False, duplicate keys will only be returned once.
"""
return (k for k, _ in self.items(multi))
def values(self, multi: bool = False):
"""
Get all values.
If `multi` is True, all values will be returned.
If `multi` is False, only the first value per key will be returned.
"""
return (v for _, v in self.items(multi))
def items(self, multi: bool = False):
"""
Get all (key, value) tuples.
If `multi` is True, all `(key, value)` pairs will be returned.
If False, only one tuple per key is returned.
"""
if multi:
return self.fields
else:
return super().items()
class MultiDict(_MultiDict[KT, VT], serializable.Serializable):
"""A concrete MultiDict, storing its own data."""
def __init__(self, fields=()):
super().__init__()
self.fields = tuple(tuple(i) for i in fields)
@staticmethod
def _reduce_values(values):
return values[0]
@staticmethod
def _kconv(key):
return key
def get_state(self):
return self.fields
def set_state(self, state):
self.fields = tuple(tuple(x) for x in state)
@classmethod
def from_state(cls, state):
return cls(state)
class MultiDictView(_MultiDict[KT, VT]):
"""
The MultiDictView provides the MultiDict interface over calculated data.
The view itself contains no state - data is retrieved from the parent on
request, and stored back to the parent on change.
"""
def __init__(self, getter, setter):
self._getter = getter
self._setter = setter
super().__init__()
@staticmethod
def _kconv(key):
# All request-attributes are case-sensitive.
return key
@staticmethod
def _reduce_values(values):
# We just return the first element if
# multiple elements exist with the same key.
return values[0]
@property # type: ignore
def fields(self):
return self._getter()
@fields.setter
def fields(self, value):
self._setter(value)
def copy(self) -> "MultiDict[KT,VT]":
return MultiDict(self.fields)
| 29.409756 | 91 | 0.591309 |
eddbd94c014c7d3e14525642baccd84a01276131 | 1,599 | py | Python | packit_service/constants.py | IceWreck/packit-service | ab8a3ae7b7f078f4a5bf3465516c1abc894fe3dc | [
"MIT"
] | null | null | null | packit_service/constants.py | IceWreck/packit-service | ab8a3ae7b7f078f4a5bf3465516c1abc894fe3dc | [
"MIT"
] | null | null | null | packit_service/constants.py | IceWreck/packit-service | ab8a3ae7b7f078f4a5bf3465516c1abc894fe3dc | [
"MIT"
] | null | null | null | from enum import Enum
DOCS_URL = "https://packit.dev/packit-as-a-service/"
FAQ_URL = f"{DOCS_URL}#faq"
FAQ_URL_HOW_TO_RETRIGGER = (
f"{DOCS_URL}#how-to-re-trigger-packit-service-actions-in-your-pull-request"
)
SANDCASTLE_WORK_DIR = "/sandcastle"
SANDCASTLE_IMAGE = "docker.io/usercont/sandcastle"
SANDCASTLE_DEFAULT_PROJECT = "myproject"
SANDCASTLE_PVC = "SANDCASTLE_PVC"
CONFIG_FILE_NAME = "packit-service.yaml"
TESTING_FARM_TRIGGER_URL = (
"https://scheduler-testing-farm.apps.ci.centos.org/v0/trigger"
)
MSG_RETRIGGER = (
"You can re-trigger build by adding a comment (`/packit {build}`) "
"into this pull request."
)
FILE_DOWNLOAD_FAILURE = "Failed to download file from URL"
PERMISSIONS_ERROR_WRITE_OR_ADMIN = (
"Only users with write or admin permissions to the repository "
"can trigger Packit-as-a-Service"
)
COPR_SUCC_STATE = "succeeded"
COPR_FAILED_STATE = "failed"
COPR_API_SUCC_STATE = 1
COPR_API_FAIL_STATE = 2
PG_COPR_BUILD_STATUS_FAILURE = "failure"
PG_COPR_BUILD_STATUS_SUCCESS = "success"
RETRY_LIMIT = 5
WHITELIST_CONSTANTS = {
"approved_automatically": "approved_automatically",
"waiting": "waiting",
"approved_manually": "approved_manually",
}
class KojiBuildState(Enum):
"""
Koji states used in fedmsg payloads.
Sometimes, koji use numbers instead,
but we don't need them yet.
Corresponding numbers are as comments if anyone needs them.
"""
free = "FREE" # 0
open = "OPEN" # 1
closed = "CLOSED" # 2
canceled = "CANCELED" # 3
assigned = "ASSIGNED" # 4
failed = "FAILED" # 5
| 24.984375 | 79 | 0.716698 |
2ef554db7bed5faed756f3e0f90211969e1bf023 | 5,040 | py | Python | mwcp_parsers/Loki.py | CybercentreCanada/assemblyline-service-configextractor | ab456ed6bac2ae60dea56890b0e5d0cc42c7c519 | [
"MIT"
] | 2 | 2021-06-18T14:53:21.000Z | 2021-07-03T11:45:42.000Z | mwcp_parsers/Loki.py | CybercentreCanada/assemblyline-service-configextractor | ab456ed6bac2ae60dea56890b0e5d0cc42c7c519 | [
"MIT"
] | 5 | 2020-11-04T16:06:38.000Z | 2022-01-28T16:17:38.000Z | mwcp_parsers/Loki.py | CybercentreCanada/assemblyline-service-configextractor | ab456ed6bac2ae60dea56890b0e5d0cc42c7c519 | [
"MIT"
] | 2 | 2021-05-30T11:37:25.000Z | 2021-06-24T12:57:35.000Z | # MIT License
#
# Copyright (c) Jason Reaves - @sysopfb
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from mwcp.parser import Parser
import pefile
import sys
import re
import struct
from Crypto.Cipher import DES3
def find_iv(pe):
iv = -1
if type(pe) == pefile.PE:
t = pe.get_memory_mapped_image()
else:
t = pe
temp = re.findall(br"""\x68...\x00.{1,10}\x68...\x00\x68...\x00\x68...\x00\x03\xc1""", t)
if temp != []:
(addr,) = struct.unpack_from("<I", temp[0][1:])
addr -= 0x400000
iv = t[addr : addr + 8]
return iv
def try_find_iv(pe):
ret = []
dlen = 8 * 4
if type(pe) == pefile.PE:
t = pe.get_memory_mapped_image()
else:
t = pe
off = t.find(b"\x6a\x08\x59\xbe")
if off == -1:
return -1
(addr,) = struct.unpack_from("<I", t[off + 4 :])
# print(hex(addr))
addr -= 0x400000
conf = t[addr : addr + dlen]
# Go until past next blob to \x00\x00\x00\x00
off = t[addr + dlen + 4 :].find(b"\x00\x00\x00\x00")
off += addr + dlen + 4 + 4
iv = t[off : off + 8]
# This doesn't work for all samples... still interesting that the data is in close proximity sometimes
(nul, key3, nul, key2, nul, key1) = struct.unpack_from("<I8sI8sI8s", t[off + 8 :])
key = "\x08\x02\x00\x00\x03\x66\x00\x00\x18\x00\x00\x00" + key1 + key2 + key3
return iv
def find_conf(pe):
ret = []
dlen = 8 * 4
if type(pe) == pefile.PE:
t = pe.get_memory_mapped_image()
else:
t = pe
off = t.find(b"\x6a\x08\x59\xbe")
(addr,) = struct.unpack_from("<I", t[off + 4 :])
# print(hex(addr))
addr -= 0x400000
data = t[addr : addr + dlen]
ret.append(data)
dlen = 10 * 4
off = t.find(b"\x6a\x0a\x59\xbe")
(addr,) = struct.unpack_from("<I", t[off + 4 :])
# print(hex(addr))
addr -= 0x400000
data = t[addr : addr + dlen]
ret.append(data)
return ret
def find_key(pe):
ret = None
if type(pe) == pefile.PE:
t = pe.get_memory_mapped_image()
else:
t = pe
temp = re.findall(br"""\x68...\x00\x68...\x00\x68...\x00\x03\xc1""", t)
if temp != []:
ret = "\x08\x02\x00\x00\x03\x66\x00\x00\x18\x00\x00\x00"
temp = temp[0][:-2].split("\x68")[::-1]
for a in temp:
if a != "":
(addr,) = struct.unpack_from("<I", a)
# print(hex(addr))
addr -= 0x400000
ret += t[addr : addr + 8]
return ret
def decoder(data):
x_sect = None
urls = re.findall(br"""https?:\/\/[a-zA-Z0-9\/\.:\-_]+""", data)
pe = None
try:
pe = pefile.PE(sys.argv[1])
for sect in pe.sections:
if ".x" in sect.Name:
x_sect = sect
img = pe.get_memory_mapped_image()
except:
img = data
if x_sect != None:
x = img[x_sect.VirtualAddress : x_sect.VirtualAddress + x_sect.SizeOfRawData]
x = bytearray(x)
else:
x = bytearray(img)
for i in range(len(x)):
x[i] ^= 0xFF
temp = re.findall(br"""https?:\/\/[a-zA-Z0-9\/\.:\-_]+""", x)
urls += temp
urls = [x for x in urls if x != "http://www.ibsensoftware.com/" and x != ""]
# Try to decrypt onboard config then
if urls == []:
temp = ""
if pe == None:
pe = data
key = find_key(pe)
# iv = try_find_iv(pe)
iv = find_iv(pe)
confs = find_conf(pe)
if iv not in ["", -1] and confs != []:
for conf in confs:
dec = DES3.new(key[12:], DES3.MODE_CBC, iv)
temp += dec.decrypt(conf)
temp_urls = re.findall(br"""[a-zA-Z0-9\/\.:\-_]{6,}""", temp)
urls += temp_urls
return urls
class Loki(Parser):
DESCRIPTION = "Loki configuration parser."
AUTHOR = "sysopfb"
def run(self):
urls = decoder(self.file_object.file_data)
for url in urls:
self.reporter.add_metadata("address", url)
| 28.636364 | 106 | 0.575794 |
14ef0de576d60f385d8cf91370a8d6ad8d143f09 | 967 | py | Python | simple-http-server.py | rmayorgav/simple-http-server | 8898a983f648cecb16e7dcccca73993c65ecb807 | [
"MIT"
] | null | null | null | simple-http-server.py | rmayorgav/simple-http-server | 8898a983f648cecb16e7dcccca73993c65ecb807 | [
"MIT"
] | null | null | null | simple-http-server.py | rmayorgav/simple-http-server | 8898a983f648cecb16e7dcccca73993c65ecb807 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import http.server
import multiprocessing
import socket
HTTP_PORT = 8888
class HTTPServerIPv4(http.server.HTTPServer):
address_family = socket.AF_INET
class HTTPServerIPv6(http.server.HTTPServer):
address_family = socket.AF_INET6
def http_server_ipv4():
server = HTTPServerIPv4(
('', HTTP_PORT),
http.server.SimpleHTTPRequestHandler
)
server.serve_forever()
def http_server_ipv6():
server = HTTPServerIPv6(
('', HTTP_PORT),
http.server.SimpleHTTPRequestHandler
)
server.serve_forever()
if __name__ == '__main__':
multiprocessing.freeze_support()
server_ipv4 = multiprocessing.Process(target=http_server_ipv4)
server_ipv6 = multiprocessing.Process(target=http_server_ipv6)
server_ipv4.start()
server_ipv6.start()
print('Listening on port {}'.format(HTTP_PORT))
print('Press CTRL-C to quit')
server_ipv4.join()
server_ipv6.join()
| 20.145833 | 66 | 0.711479 |
8f30eb36dde45d406aa42b9faca0240f289cf77b | 2,110 | py | Python | ooobuild/cssdyn/datatransfer/clipboard/__init__.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/cssdyn/datatransfer/clipboard/__init__.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/cssdyn/datatransfer/clipboard/__init__.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ....dyn.datatransfer.clipboard.clipboard_event import ClipboardEvent as ClipboardEvent
from ....dyn.datatransfer.clipboard.clipboard_manager import ClipboardManager as ClipboardManager
from ....dyn.datatransfer.clipboard.generic_clipboard import GenericClipboard as GenericClipboard
from ....dyn.datatransfer.clipboard.rendering_capabilities import RenderingCapabilities as RenderingCapabilities
from ....dyn.datatransfer.clipboard.rendering_capabilities import RenderingCapabilitiesEnum as RenderingCapabilitiesEnum
from ....dyn.datatransfer.clipboard.system_clipboard import SystemClipboard as SystemClipboard
from ....dyn.datatransfer.clipboard.x_clipboard import XClipboard as XClipboard
from ....dyn.datatransfer.clipboard.x_clipboard_ex import XClipboardEx as XClipboardEx
from ....dyn.datatransfer.clipboard.x_clipboard_factory import XClipboardFactory as XClipboardFactory
from ....dyn.datatransfer.clipboard.x_clipboard_listener import XClipboardListener as XClipboardListener
from ....dyn.datatransfer.clipboard.x_clipboard_manager import XClipboardManager as XClipboardManager
from ....dyn.datatransfer.clipboard.x_clipboard_notifier import XClipboardNotifier as XClipboardNotifier
from ....dyn.datatransfer.clipboard.x_clipboard_owner import XClipboardOwner as XClipboardOwner
from ....dyn.datatransfer.clipboard.x_flushable_clipboard import XFlushableClipboard as XFlushableClipboard
from ....dyn.datatransfer.clipboard.x_system_clipboard import XSystemClipboard as XSystemClipboard
| 65.9375 | 120 | 0.836019 |
d8e87bd9445558df0c9744a1e45f2db3a8c343c7 | 28,441 | py | Python | src/transformers/modeling_electra.py | 12190143/transformers | 6faca88ee0c472de8207e648b0999a1ee01ff127 | [
"Apache-2.0"
] | 480 | 2019-10-14T02:22:34.000Z | 2022-03-29T18:07:00.000Z | src/transformers/modeling_electra.py | hmason/transformers | ab90353f1abfd15f8d21f99395658d060679a08c | [
"Apache-2.0"
] | 20 | 2019-10-15T16:18:05.000Z | 2022-02-27T21:23:55.000Z | src/transformers/modeling_electra.py | hmason/transformers | ab90353f1abfd15f8d21f99395658d060679a08c | [
"Apache-2.0"
] | 42 | 2019-10-14T21:29:59.000Z | 2021-12-28T15:25:58.000Z | import logging
import os
import torch
import torch.nn as nn
from .activations import get_activation
from .configuration_electra import ElectraConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_bert import BertEmbeddings, BertEncoder, BertLayerNorm, BertPreTrainedModel
logger = logging.getLogger(__name__)
ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP = {
"google/electra-small-generator": "https://cdn.huggingface.co/google/electra-small-generator/pytorch_model.bin",
"google/electra-base-generator": "https://cdn.huggingface.co/google/electra-base-generator/pytorch_model.bin",
"google/electra-large-generator": "https://cdn.huggingface.co/google/electra-large-generator/pytorch_model.bin",
"google/electra-small-discriminator": "https://cdn.huggingface.co/google/electra-small-discriminator/pytorch_model.bin",
"google/electra-base-discriminator": "https://cdn.huggingface.co/google/electra-base-discriminator/pytorch_model.bin",
"google/electra-large-discriminator": "https://cdn.huggingface.co/google/electra-large-discriminator/pytorch_model.bin",
}
def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator="discriminator"):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
original_name: str = name
try:
if isinstance(model, ElectraForMaskedLM):
name = name.replace("electra/embeddings/", "generator/embeddings/")
if discriminator_or_generator == "generator":
name = name.replace("electra/", "discriminator/")
name = name.replace("generator/", "electra/")
name = name.replace("dense_1", "dense_prediction")
name = name.replace("generator_predictions/output_bias", "generator_lm_head/bias")
name = name.split("/")
# print(original_name, name)
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["global_step", "temperature"] for n in name):
logger.info("Skipping {}".format(original_name))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name.endswith("_embeddings"):
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape, original_name
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {}".format(name), original_name)
pointer.data = torch.from_numpy(array)
except AttributeError as e:
print("Skipping {}".format(original_name), name, e)
continue
return model
class ElectraEmbeddings(BertEmbeddings):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__(config)
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.embedding_size, eps=config.layer_norm_eps)
class ElectraDiscriminatorPredictions(nn.Module):
"""Prediction module for the discriminator, made up of two dense layers."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dense_prediction = nn.Linear(config.hidden_size, 1)
self.config = config
def forward(self, discriminator_hidden_states, attention_mask):
hidden_states = self.dense(discriminator_hidden_states)
hidden_states = get_activation(self.config.hidden_act)(hidden_states)
logits = self.dense_prediction(hidden_states).squeeze()
return logits
class ElectraGeneratorPredictions(nn.Module):
"""Prediction module for the generator, made up of two dense layers."""
def __init__(self, config):
super().__init__()
self.LayerNorm = BertLayerNorm(config.embedding_size)
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
def forward(self, generator_hidden_states):
hidden_states = self.dense(generator_hidden_states)
hidden_states = get_activation("gelu")(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class ElectraPreTrainedModel(BertPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = ElectraConfig
pretrained_model_archive_map = ELECTRA_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_electra
base_model_prefix = "electra"
ELECTRA_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.ElectraConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ELECTRA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.ElectraTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
"""
@add_start_docstrings(
"The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to "
"the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the "
"hidden size and embedding size are different."
""
"Both the generator and discriminator checkpoints may be loaded into this model.",
ELECTRA_START_DOCSTRING,
)
class ElectraModel(ElectraPreTrainedModel):
config_class = ElectraConfig
def __init__(self, config):
super().__init__(config)
self.embeddings = ElectraEmbeddings(config)
if config.embedding_size != config.hidden_size:
self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
self.encoder = BertEncoder(config)
self.config = config
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import ElectraModel, ElectraTokenizer
import torch
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator')
model = ElectraModel.from_pretrained('google/electra-small-discriminator')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
hidden_states = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states)
hidden_states = self.encoder(hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask)
return hidden_states
@add_start_docstrings(
"""
Electra model with a binary classification head on top as used during pre-training for identifying generated
tokens.
It is recommended to load the discriminator checkpoint into that model.""",
ELECTRA_START_DOCSTRING,
)
class ElectraForPreTraining(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.electra = ElectraModel(config)
self.discriminator_predictions = ElectraDiscriminatorPredictions(config)
self.init_weights()
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see :obj:`input_ids` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates the token is an original token,
``1`` indicates the token was replaced.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
loss (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss of the ELECTRA objective.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`)
Prediction scores of the head (scores for each token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when :obj:`config.output_hidden_states=True`):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import ElectraTokenizer, ElectraForPreTraining
import torch
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator')
model = ElectraForPreTraining.from_pretrained('google/electra-small-discriminator')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
discriminator_hidden_states = self.electra(
input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds
)
discriminator_sequence_output = discriminator_hidden_states[0]
logits = self.discriminator_predictions(discriminator_sequence_output, attention_mask)
output = (logits,)
if labels is not None:
loss_fct = nn.BCEWithLogitsLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1
active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss]
active_labels = labels[active_loss]
loss = loss_fct(active_logits, active_labels.float())
else:
loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float())
output = (loss,) + output
output += discriminator_hidden_states[1:]
return output # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings(
"""
Electra model with a language modeling head on top.
Even though both the discriminator and generator may be loaded into this model, the generator is
the only model of the two to have been trained for the masked language modeling task.""",
ELECTRA_START_DOCSTRING,
)
class ElectraForMaskedLM(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.electra = ElectraModel(config)
self.generator_predictions = ElectraGeneratorPredictions(config)
self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
self.init_weights()
def get_output_embeddings(self):
return self.generator_lm_head
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import ElectraTokenizer, ElectraForMaskedLM
import torch
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-generator')
model = ElectraForMaskedLM.from_pretrained('google/electra-small-generator')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
generator_hidden_states = self.electra(
input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds
)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output)
prediction_scores = self.generator_lm_head(prediction_scores)
output = (prediction_scores,)
# Masked language modeling softmax layer
if masked_lm_labels is not None:
loss_fct = nn.CrossEntropyLoss() # -100 index = padding token
loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
output = (loss,) + output
output += generator_hidden_states[1:]
return output # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings(
"""
Electra model with a token classification head on top.
Both the discriminator and generator may be loaded into this model.""",
ELECTRA_START_DOCSTRING,
)
class ElectraForTokenClassification(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.electra = ElectraModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(ELECTRA_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.ElectraConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import ElectraTokenizer, ElectraForTokenClassification
import torch
tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator')
model = ElectraForTokenClassification.from_pretrained('google/electra-small-discriminator')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
discriminator_hidden_states = self.electra(
input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds
)
discriminator_sequence_output = discriminator_hidden_states[0]
discriminator_sequence_output = self.dropout(discriminator_sequence_output)
logits = self.classifier(discriminator_sequence_output)
output = (logits,)
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.config.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
output = (loss,) + output
output += discriminator_hidden_states[1:]
return output # (loss), scores, (hidden_states), (attentions)
| 46.472222 | 154 | 0.674871 |
ff5ee5e0595ae7d922319aa97da9c9cf8bd3a331 | 1,519 | py | Python | line-notification-awscost/app.py | K2OSystem/line-notification-awscost | b139499848696e1a6b5a0a8a39319c72af9fc5d7 | [
"MIT"
] | null | null | null | line-notification-awscost/app.py | K2OSystem/line-notification-awscost | b139499848696e1a6b5a0a8a39319c72af9fc5d7 | [
"MIT"
] | null | null | null | line-notification-awscost/app.py | K2OSystem/line-notification-awscost | b139499848696e1a6b5a0a8a39319c72af9fc5d7 | [
"MIT"
] | null | null | null | import json
import logging
import os
from datetime import datetime, timedelta
import boto3
import requests
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Line の設定
LINE_POST_URL = os.environ['LINE_POST_URL']
LINE_TOKEN = os.environ['LINE_TOKEN']
client = boto3.client('cloudwatch', region_name='us-east-1')
get_metric_statistics = client.get_metric_statistics(
Namespace='AWS/Billing',
MetricName='EstimatedCharges',
Dimensions=[
{
'Name': 'Currency',
'Value': 'USD'
}
],
StartTime=datetime.today() - timedelta(days=1),
EndTime=datetime.today(),
Period=86400,
Statistics=['Maximum']
)
def build_message(cost, date):
text = "{}までのAWS料金は、${}です。".format(date, cost)
return text
def lambda_handler(event, context):
print('get_metric_statistics', get_metric_statistics)
cost = get_metric_statistics['Datapoints'][0]['Maximum']
date = get_metric_statistics['Datapoints'][0]['Timestamp'].strftime('%Y-%m-%d')
message = build_message(cost, date)
# LINEにPOST
try:
headers = {'Authorization': 'Bearer {}'.format(LINE_TOKEN)}
payload = {'message': message}
# LINE通知
requests.post(LINE_POST_URL, headers=headers, params=payload)
except requests.exceptional.RequestException as e:
logger.error("Request failed: {}".format(e))
return {
"statusCode": 200,
"body": json.dumps({
"message": "success",
}),
}
| 23.369231 | 83 | 0.651086 |
e8da817f75529d2252eba29f11b4e94581f82f74 | 48,676 | py | Python | src/sage/geometry/hyperbolic_space/hyperbolic_model.py | sheerluck/sage | b5e572b7d231f70c139d9978d68add80c4ef353d | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/sage/geometry/hyperbolic_space/hyperbolic_model.py | sheerluck/sage | b5e572b7d231f70c139d9978d68add80c4ef353d | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/sage/geometry/hyperbolic_space/hyperbolic_model.py | sheerluck/sage | b5e572b7d231f70c139d9978d68add80c4ef353d | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | # -*- coding: utf-8 -*-
r"""
Hyperbolic Models
In this module, a hyperbolic model is a collection of data that allow
the user to implement new models of hyperbolic space with minimal effort.
The data include facts about the underlying set (such as whether the
model is bounded), facts about the metric (such as whether the model is
conformal), facts about the isometry group (such as whether it is a
linear or projective group), and more. Generally speaking, any data
or method that pertains to the model itself -- rather than the points,
geodesics, or isometries of the model -- is implemented in this module.
Abstractly, a model of hyperbolic space is a connected, simply connected
manifold equipped with a complete Riemannian metric of constant curvature
`-1`. This module records information sufficient to enable computations
in hyperbolic space without explicitly specifying the underlying set or
its Riemannian metric. Although, see the
`SageManifolds <http://sagemanifolds.obspm.fr/>`_ project if
you would like to take this approach.
This module implements the abstract base class for a model of hyperbolic
space of arbitrary dimension. It also contains the implementations of
specific models of hyperbolic geometry.
AUTHORS:
- Greg Laun (2013): Initial version.
EXAMPLES:
We illustrate how the classes in this module encode data by comparing
the upper half plane (UHP), Poincaré disk (PD) and hyperboloid (HM)
models. First we create::
sage: U = HyperbolicPlane().UHP()
sage: P = HyperbolicPlane().PD()
sage: H = HyperbolicPlane().HM()
We note that the UHP and PD models are bounded while the HM model is
not::
sage: U.is_bounded() and P.is_bounded()
True
sage: H.is_bounded()
False
The isometry groups of UHP and PD are projective, while that of HM is
linear::
sage: U.is_isometry_group_projective()
True
sage: H.is_isometry_group_projective()
False
The models are responsible for determining if the coordinates of points
and the matrix of linear maps are appropriate for constructing points
and isometries in hyperbolic space::
sage: U.point_in_model(2 + I)
True
sage: U.point_in_model(2 - I)
False
sage: U.point_in_model(2)
False
sage: U.boundary_point_in_model(2)
True
"""
#***********************************************************************
#
# Copyright (C) 2013 Greg Laun <glaun@math.umd.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#***********************************************************************
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.misc.bindable_class import BindableClass
from sage.misc.lazy_import import lazy_import
from sage.functions.other import imag, real
from sage.misc.functional import sqrt
from sage.functions.all import arccosh
from sage.rings.cc import CC
from sage.rings.real_double import RDF
from sage.rings.real_mpfr import RR
from sage.rings.infinity import infinity
from sage.symbolic.constants import I
from sage.matrix.constructor import matrix
from sage.categories.homset import Hom
from sage.geometry.hyperbolic_space.hyperbolic_constants import EPSILON, LORENTZ_GRAM
from sage.geometry.hyperbolic_space.hyperbolic_point import (
HyperbolicPoint, HyperbolicPointUHP)
from sage.geometry.hyperbolic_space.hyperbolic_isometry import (
HyperbolicIsometry, HyperbolicIsometryUHP,
HyperbolicIsometryPD, HyperbolicIsometryKM, moebius_transform)
from sage.geometry.hyperbolic_space.hyperbolic_geodesic import (
HyperbolicGeodesic, HyperbolicGeodesicUHP, HyperbolicGeodesicPD,
HyperbolicGeodesicKM, HyperbolicGeodesicHM)
from sage.geometry.hyperbolic_space.hyperbolic_coercion import (
CoercionUHPtoPD, CoercionUHPtoKM, CoercionUHPtoHM,
CoercionPDtoUHP, CoercionPDtoKM, CoercionPDtoHM,
CoercionKMtoUHP, CoercionKMtoPD, CoercionKMtoHM,
CoercionHMtoUHP, CoercionHMtoPD, CoercionHMtoKM)
lazy_import('sage.modules.free_module_element', 'vector')
#####################################################################
## Abstract model
class HyperbolicModel(Parent, UniqueRepresentation, BindableClass):
r"""
Abstract base class for hyperbolic models.
"""
Element = HyperbolicPoint
_Geodesic = HyperbolicGeodesic
_Isometry = HyperbolicIsometry
def __init__(self, space, name, short_name, bounded, conformal,
dimension, isometry_group, isometry_group_is_projective):
"""
Initialize ``self``.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: TestSuite(UHP).run()
sage: PD = HyperbolicPlane().PD()
sage: TestSuite(PD).run()
sage: KM = HyperbolicPlane().KM()
sage: TestSuite(KM).run()
sage: HM = HyperbolicPlane().HM()
sage: TestSuite(HM).run()
"""
self._name = name
self._short_name = short_name
self._bounded = bounded
self._conformal = conformal
self._dimension = dimension
self._isometry_group = isometry_group
self._isometry_group_is_projective = isometry_group_is_projective
from sage.geometry.hyperbolic_space.hyperbolic_interface import HyperbolicModels
Parent.__init__(self, category=HyperbolicModels(space))
def _repr_(self): # Abstract
"""
Return a string representation of ``self``.
EXAMPLES::
sage: HyperbolicPlane().UHP()
Hyperbolic plane in the Upper Half Plane Model
"""
return u'Hyperbolic plane in the {}'.format(self._name)
def _element_constructor_(self, x, is_boundary=None, **graphics_options): #Abstract
"""
Construct an element of ``self``.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP(2 + I)
Point in UHP I + 2
"""
return self.get_point(x, is_boundary, **graphics_options)
def name(self): # Abstract
"""
Return the name of this model.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP.name()
'Upper Half Plane Model'
"""
return self._name
def short_name(self):
"""
Return the short name of this model.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP.short_name()
'UHP'
"""
return self._short_name
def is_bounded(self):
"""
Return ``True`` if ``self`` is a bounded model.
EXAMPLES::
sage: HyperbolicPlane().UHP().is_bounded()
True
sage: HyperbolicPlane().PD().is_bounded()
True
sage: HyperbolicPlane().KM().is_bounded()
True
sage: HyperbolicPlane().HM().is_bounded()
False
"""
return self._bounded
def is_conformal(self):
"""
Return ``True`` if ``self`` is a conformal model.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP.is_conformal()
True
"""
return self._conformal
def is_isometry_group_projective(self):
"""
Return ``True`` if the isometry group of ``self`` is projective.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP.is_isometry_group_projective()
True
"""
return self._isometry_group_is_projective
def point_in_model(self, p):
r"""
Return ``True`` if the point ``p`` is in the interior of the
given model and ``False`` otherwise.
INPUT:
- any object that can converted into a complex number
OUTPUT:
- boolean
EXAMPLES::
sage: HyperbolicPlane().UHP().point_in_model(I)
True
sage: HyperbolicPlane().UHP().point_in_model(-I)
False
"""
return True
def point_test(self, p): #Abstract
r"""
Test whether a point is in the model. If the point is in the
model, do nothing. Otherwise, raise a ``ValueError``.
EXAMPLES::
sage: from sage.geometry.hyperbolic_space.hyperbolic_model import HyperbolicModelUHP
sage: HyperbolicPlane().UHP().point_test(2 + I)
sage: HyperbolicPlane().UHP().point_test(2 - I)
Traceback (most recent call last):
...
ValueError: -I + 2 is not a valid point in the UHP model
"""
if not (self.point_in_model(p) or self.boundary_point_in_model(p)):
error_string = "{0} is not a valid point in the {1} model"
raise ValueError(error_string.format(p, self._short_name))
def boundary_point_in_model(self, p): #Abstract
r"""
Return ``True`` if the point is on the ideal boundary of hyperbolic
space and ``False`` otherwise.
INPUT:
- any object that can converted into a complex number
OUTPUT:
- boolean
EXAMPLES::
sage: HyperbolicPlane().UHP().boundary_point_in_model(I)
False
"""
return True
def bdry_point_test(self, p): #Abstract
r"""
Test whether a point is in the model. If the point is in the
model, do nothing; otherwise raise a ``ValueError``.
EXAMPLES::
sage: HyperbolicPlane().UHP().bdry_point_test(2)
sage: HyperbolicPlane().UHP().bdry_point_test(1 + I)
Traceback (most recent call last):
...
ValueError: I + 1 is not a valid boundary point in the UHP model
"""
if not self._bounded or not self.boundary_point_in_model(p):
error_string = "{0} is not a valid boundary point in the {1} model"
raise ValueError(error_string.format(p, self._short_name))
def isometry_in_model(self, A): #Abstract
r"""
Return ``True`` if the input matrix represents an isometry of the
given model and ``False`` otherwise.
INPUT:
- a matrix that represents an isometry in the appropriate model
OUTPUT:
- boolean
EXAMPLES::
sage: HyperbolicPlane().UHP().isometry_in_model(identity_matrix(2))
True
sage: HyperbolicPlane().UHP().isometry_in_model(identity_matrix(3))
False
"""
return True
def isometry_test(self, A): #Abstract
r"""
Test whether an isometry ``A`` is in the model.
If the isometry is in the model, do nothing. Otherwise, raise
a ``ValueError``.
EXAMPLES::
sage: HyperbolicPlane().UHP().isometry_test(identity_matrix(2))
sage: HyperbolicPlane().UHP().isometry_test(matrix(2, [I,1,2,1]))
Traceback (most recent call last):
...
ValueError:
[I 1]
[2 1] is not a valid isometry in the UHP model
"""
if not self.isometry_in_model(A):
error_string = "\n{0} is not a valid isometry in the {1} model"
raise ValueError(error_string.format(A, self._short_name))
def get_point(self, coordinates, is_boundary=None, **graphics_options):
r"""
Return a point in ``self``.
Automatically determine the type of point to return given either:
#. the coordinates of a point in the interior or ideal boundary
of hyperbolic space, or
#. a :class:`~sage.geometry.hyperbolic_space.hyperbolic_point.HyperbolicPoint` object.
INPUT:
- a point in hyperbolic space or on the ideal boundary
OUTPUT:
- a :class:`~sage.geometry.hyperbolic_space.hyperbolic_point.HyperbolicPoint`
EXAMPLES:
We can create an interior point via the coordinates::
sage: HyperbolicPlane().UHP().get_point(2*I)
Point in UHP 2*I
Or we can create a boundary point via the coordinates::
sage: HyperbolicPlane().UHP().get_point(23)
Boundary point in UHP 23
However we cannot create points outside of our model::
sage: HyperbolicPlane().UHP().get_point(12 - I)
Traceback (most recent call last):
...
ValueError: -I + 12 is not a valid point in the UHP model
::
sage: HyperbolicPlane().UHP().get_point(2 + 3*I)
Point in UHP 3*I + 2
sage: HyperbolicPlane().PD().get_point(0)
Point in PD 0
sage: HyperbolicPlane().KM().get_point((0,0))
Point in KM (0, 0)
sage: HyperbolicPlane().HM().get_point((0,0,1))
Point in HM (0, 0, 1)
sage: p = HyperbolicPlane().UHP().get_point(I, color="red")
sage: p.graphics_options()
{'color': 'red'}
::
sage: HyperbolicPlane().UHP().get_point(12)
Boundary point in UHP 12
sage: HyperbolicPlane().UHP().get_point(infinity)
Boundary point in UHP +Infinity
sage: HyperbolicPlane().PD().get_point(I)
Boundary point in PD I
sage: HyperbolicPlane().KM().get_point((0,-1))
Boundary point in KM (0, -1)
"""
if isinstance(coordinates, HyperbolicPoint):
if coordinates.parent() is not self:
coordinates = self(coordinates)
coordinates.update_graphics(True, **graphics_options)
return coordinates #both Point and BdryPoint
if is_boundary is None:
is_boundary = self.boundary_point_in_model(coordinates)
return self.element_class(self, coordinates, is_boundary, **graphics_options)
def get_geodesic(self, start, end=None, **graphics_options): #Abstract
r"""
Return a geodesic in the appropriate model.
EXAMPLES::
sage: HyperbolicPlane().UHP().get_geodesic(I, 2*I)
Geodesic in UHP from I to 2*I
sage: HyperbolicPlane().PD().get_geodesic(0, I/2)
Geodesic in PD from 0 to 1/2*I
sage: HyperbolicPlane().KM().get_geodesic((1/2, 1/2), (0,0))
Geodesic in KM from (1/2, 1/2) to (0, 0)
sage: HyperbolicPlane().HM().get_geodesic((0,0,1), (1,0, sqrt(2)))
Geodesic in HM from (0, 0, 1) to (1, 0, sqrt(2))
TESTS::
sage: UHP = HyperbolicPlane().UHP()
sage: g = UHP.get_geodesic(UHP.get_point(I), UHP.get_point(2 + I))
sage: h = UHP.get_geodesic(I, 2 + I)
sage: g == h
True
"""
if end is None:
if isinstance(start, HyperbolicGeodesic):
G = start
if G.model() is not self:
G = G.to_model(self)
G.update_graphics(True, **graphics_options)
return G
raise ValueError("the start and end points must be specified")
return self._Geodesic(self, self(start), self(end), **graphics_options)
def get_isometry(self, A):
r"""
Return an isometry in ``self`` from the matrix ``A`` in the
isometry group of ``self``.
EXAMPLES::
sage: HyperbolicPlane().UHP().get_isometry(identity_matrix(2))
Isometry in UHP
[1 0]
[0 1]
sage: HyperbolicPlane().PD().get_isometry(identity_matrix(2))
Isometry in PD
[1 0]
[0 1]
sage: HyperbolicPlane().KM().get_isometry(identity_matrix(3))
Isometry in KM
[1 0 0]
[0 1 0]
[0 0 1]
sage: HyperbolicPlane().HM().get_isometry(identity_matrix(3))
Isometry in HM
[1 0 0]
[0 1 0]
[0 0 1]
"""
if isinstance(A, HyperbolicIsometry):
if A.model() is not self:
return A.to_model(self)
return A
return self._Isometry(self, A)
def random_element(self, **kwargs):
r"""
Return a random point in ``self``.
The points are uniformly distributed over the rectangle
`[-10, 10] \times [0, 10 i]` in the upper half plane model.
EXAMPLES::
sage: p = HyperbolicPlane().UHP().random_element()
sage: bool((p.coordinates().imag()) > 0)
True
sage: p = HyperbolicPlane().PD().random_element()
sage: HyperbolicPlane().PD().point_in_model(p.coordinates())
True
sage: p = HyperbolicPlane().KM().random_element()
sage: HyperbolicPlane().KM().point_in_model(p.coordinates())
True
sage: p = HyperbolicPlane().HM().random_element().coordinates()
sage: bool((p[0]**2 + p[1]**2 - p[2]**2 - 1) < 10**-8)
True
"""
return self.random_point(**kwargs)
def random_point(self, **kwargs):
r"""
Return a random point of ``self``.
The points are uniformly distributed over the rectangle
`[-10, 10] \times [0, 10 i]` in the upper half plane model.
EXAMPLES::
sage: p = HyperbolicPlane().UHP().random_point()
sage: bool((p.coordinates().imag()) > 0)
True
sage: PD = HyperbolicPlane().PD()
sage: p = PD.random_point()
sage: PD.point_in_model(p.coordinates())
True
"""
R = self.realization_of().a_realization()
return self(R.random_point(**kwargs))
def random_geodesic(self, **kwargs):
r"""
Return a random hyperbolic geodesic.
Return the geodesic between two random points.
EXAMPLES::
sage: h = HyperbolicPlane().PD().random_geodesic()
sage: bool((h.endpoints()[0].coordinates()).imag() >= 0)
True
"""
R = self.realization_of().a_realization()
g_ends = [R.random_point(**kwargs) for k in range(2)]
return self.get_geodesic(self(g_ends[0]), self(g_ends[1]))
def random_isometry(self, preserve_orientation=True, **kwargs):
r"""
Return a random isometry in the model of ``self``.
INPUT:
- ``preserve_orientation`` -- if ``True`` return an
orientation-preserving isometry
OUTPUT:
- a hyperbolic isometry
EXAMPLES::
sage: A = HyperbolicPlane().PD().random_isometry()
sage: A.preserves_orientation()
True
sage: B = HyperbolicPlane().PD().random_isometry(preserve_orientation=False)
sage: B.preserves_orientation()
False
"""
R = self.realization_of().a_realization()
A = R.random_isometry(preserve_orientation, **kwargs)
return A.to_model(self)
################
# Dist methods #
################
def dist(self, a, b):
r"""
Calculate the hyperbolic distance between ``a`` and ``b``.
INPUT:
- ``a``, ``b`` -- a point or geodesic
OUTPUT:
- the hyperbolic distance
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: p1 = UHP.get_point(5 + 7*I)
sage: p2 = UHP.get_point(1.0 + I)
sage: UHP.dist(p1, p2)
2.23230104635820
sage: PD = HyperbolicPlane().PD()
sage: p1 = PD.get_point(0)
sage: p2 = PD.get_point(I/2)
sage: PD.dist(p1, p2)
arccosh(5/3)
sage: UHP(p1).dist(UHP(p2))
arccosh(5/3)
sage: KM = HyperbolicPlane().KM()
sage: p1 = KM.get_point((0, 0))
sage: p2 = KM.get_point((1/2, 1/2))
sage: numerical_approx(KM.dist(p1, p2))
0.881373587019543
sage: HM = HyperbolicPlane().HM()
sage: p1 = HM.get_point((0,0,1))
sage: p2 = HM.get_point((1,0,sqrt(2)))
sage: numerical_approx(HM.dist(p1, p2))
0.881373587019543
Distance between a point and itself is 0::
sage: p = UHP.get_point(47 + I)
sage: UHP.dist(p, p)
0
Points on the boundary are infinitely far from interior points::
sage: UHP.get_point(3).dist(UHP.get_point(I))
+Infinity
TESTS::
sage: UHP.dist(UHP.get_point(I), UHP.get_point(2*I))
arccosh(5/4)
sage: UHP.dist(I, 2*I)
arccosh(5/4)
"""
def coords(x):
return self(x).coordinates()
if isinstance(a, HyperbolicGeodesic):
if isinstance(b, HyperbolicGeodesic):
if not a.is_parallel(b):
return 0
if a.is_ultra_parallel(b):
perp = a.common_perpendicular(b)
# Find where a and b intersect the common perp...
p = a.intersection(perp)[0]
q = b.intersection(perp)[0]
# ...and return their distance
return self._dist_points(coords(p), coords(q))
raise NotImplementedError("can only compute distance between"
" ultra-parallel and intersecting geodesics")
# If only one is a geodesic, make sure it's b to make things easier
a,b = b,a
if isinstance(b, HyperbolicGeodesic):
(p, q) = b.ideal_endpoints()
return self._dist_geod_point(coords(p), coords(q), coords(a))
return self._dist_points(coords(a), coords(b))
def _dist_points(self, p1, p2):
r"""
Compute the distance between two points.
INPUT:
- ``p1``, ``p2`` -- the coordinates of the points
EXAMPLES::
sage: HyperbolicPlane().PD()._dist_points(3/5*I, 0)
arccosh(17/8)
"""
R = self.realization_of().a_realization()
phi = R.coerce_map_from(self)
return R._dist_points(phi.image_coordinates(p1), phi.image_coordinates(p2))
def _dist_geod_point(self, start, end, p):
r"""
Return the hyperbolic distance from a given hyperbolic geodesic
and a hyperbolic point.
INPUT:
- ``start`` -- the start ideal point coordinates of the geodesic
- ``end`` -- the end ideal point coordinates of the geodesic
- ``p`` -- the coordinates of the point
OUTPUT:
- the hyperbolic distance
EXAMPLES::
sage: HyperbolicPlane().PD()._dist_geod_point(3/5*I + 4/5, I, 0)
arccosh(1/10*sqrt(5)*((sqrt(5) - 1)^2 + 4) + 1)
If `p` is a boundary point, the distance is infinity::
sage: HyperbolicPlane().PD()._dist_geod_point(3/5*I + 4/5, I, 12/13*I + 5/13)
+Infinity
"""
R = self.realization_of().a_realization()
assert R is not self
def phi(c):
return R.coerce_map_from(self).image_coordinates(c)
return R._dist_geod_point(phi(start), phi(end), phi(p))
####################
# Isometry methods #
####################
def isometry_from_fixed_points(self, repel, attract):
r"""
Given two fixed points ``repel`` and ``attract`` as hyperbolic
points return a hyperbolic isometry with ``repel`` as repelling
fixed point and ``attract`` as attracting fixed point.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: PD = HyperbolicPlane().PD()
sage: PD.isometry_from_fixed_points(-i, i)
Isometry in PD
[ 3/4 1/4*I]
[-1/4*I 3/4]
::
sage: p, q = PD.get_point(1/2 + I/2), PD.get_point(6/13 + 9/13*I)
sage: PD.isometry_from_fixed_points(p, q)
Traceback (most recent call last):
...
ValueError: fixed points of hyperbolic elements must be ideal
sage: p, q = PD.get_point(4/5 + 3/5*I), PD.get_point(-I)
sage: PD.isometry_from_fixed_points(p, q)
Isometry in PD
[ 1/6*I - 2/3 -1/3*I - 1/6]
[ 1/3*I - 1/6 -1/6*I - 2/3]
"""
R = self.realization_of().a_realization()
return R.isometry_from_fixed_points(R(self(repel)), R(self(attract))).to_model(self)
#####################################################################
## Upper half plane model
class HyperbolicModelUHP(HyperbolicModel):
r"""
Upper Half Plane model.
"""
Element = HyperbolicPointUHP
_Geodesic = HyperbolicGeodesicUHP
_Isometry = HyperbolicIsometryUHP
def __init__(self, space):
"""
Initialize ``self``.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: TestSuite(UHP).run()
"""
HyperbolicModel.__init__(self, space,
name="Upper Half Plane Model", short_name="UHP",
bounded=True, conformal=True, dimension=2,
isometry_group="PSL(2, \\RR)", isometry_group_is_projective=True)
def _coerce_map_from_(self, X):
"""
Return if there is a coercion map from ``X`` to ``self``.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP.has_coerce_map_from(HyperbolicPlane().PD())
True
sage: UHP.has_coerce_map_from(HyperbolicPlane().KM())
True
sage: UHP.has_coerce_map_from(HyperbolicPlane().HM())
True
sage: UHP.has_coerce_map_from(QQ)
False
"""
if isinstance(X, HyperbolicModelPD):
return CoercionPDtoUHP(Hom(X, self))
if isinstance(X, HyperbolicModelKM):
return CoercionKMtoUHP(Hom(X, self))
if isinstance(X, HyperbolicModelHM):
return CoercionHMtoUHP(Hom(X, self))
return super(HyperbolicModelUHP, self)._coerce_map_from_(X)
def point_in_model(self, p):
r"""
Check whether a complex number lies in the open upper half plane.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP.point_in_model(1 + I)
True
sage: UHP.point_in_model(infinity)
False
sage: UHP.point_in_model(CC(infinity))
False
sage: UHP.point_in_model(RR(infinity))
False
sage: UHP.point_in_model(1)
False
sage: UHP.point_in_model(12)
False
sage: UHP.point_in_model(1 - I)
False
sage: UHP.point_in_model(-2*I)
False
sage: UHP.point_in_model(I)
True
sage: UHP.point_in_model(0) # Not interior point
False
"""
if isinstance(p, HyperbolicPoint):
return p.is_boundary()
return bool(imag(CC(p)) > 0)
def boundary_point_in_model(self, p):
r"""
Check whether a complex number is a real number or ``\infty``.
In the ``UHP.model_name_name``, this is the ideal boundary of
hyperbolic space.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP.boundary_point_in_model(1 + I)
False
sage: UHP.boundary_point_in_model(infinity)
True
sage: UHP.boundary_point_in_model(CC(infinity))
True
sage: UHP.boundary_point_in_model(RR(infinity))
True
sage: UHP.boundary_point_in_model(1)
True
sage: UHP.boundary_point_in_model(12)
True
sage: UHP.boundary_point_in_model(1 - I)
False
sage: UHP.boundary_point_in_model(-2*I)
False
sage: UHP.boundary_point_in_model(0)
True
sage: UHP.boundary_point_in_model(I)
False
"""
if isinstance(p, HyperbolicPoint):
return p.is_boundary()
im = abs(imag(CC(p)).n())
return (im < EPSILON) or bool(p == infinity)
def isometry_in_model(self, A):
r"""
Check that ``A`` acts as an isometry on the upper half plane.
That is, ``A`` must be an invertible `2 \times 2` matrix with real
entries.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: A = matrix(2,[1,2,3,4])
sage: UHP.isometry_in_model(A)
True
sage: B = matrix(2,[I,2,4,1])
sage: UHP.isometry_in_model(B)
False
An example of a matrix `A` such that `\det(A) \neq 1`, but the `A`
acts isometrically::
sage: C = matrix(2,[10,0,0,10])
sage: UHP.isometry_in_model(C)
True
"""
if isinstance(A, HyperbolicIsometry):
return True
return bool(A.ncols() == 2 and A.nrows() == 2 and
sum([k in RR for k in A.list()]) == 4 and
abs(A.det()) > -EPSILON)
def get_background_graphic(self, **bdry_options):
r"""
Return a graphic object that makes the model easier to visualize.
For the upper half space, the background object is the ideal boundary.
EXAMPLES::
sage: hp = HyperbolicPlane().UHP().get_background_graphic()
"""
from sage.plot.line import line
bd_min = bdry_options.get('bd_min', -5)
bd_max = bdry_options.get('bd_max', 5)
return line(((bd_min, 0), (bd_max, 0)), color='black')
################
# Dist methods #
################
def _dist_points(self, p1, p2):
r"""
Compute the distance between two points in the Upper Half Plane
using the hyperbolic metric.
INPUT:
- ``p1``, ``p2`` -- the coordinates of the points
EXAMPLES::
sage: HyperbolicPlane().UHP()._dist_points(4.0*I, I)
1.38629436111989
"""
num = (real(p2) - real(p1))**2 + (imag(p2) - imag(p1))**2
denom = 2 * imag(p1) * imag(p2)
if denom == 0:
return infinity
return arccosh(1 + num/denom)
def _dist_geod_point(self, start, end, p):
r"""
Return the hyperbolic distance from a given hyperbolic geodesic
and a hyperbolic point.
INPUT:
- ``start`` -- the start ideal point coordinates of the geodesic
- ``end`` -- the end ideal point coordinates of the geodesic
- ``p`` -- the coordinates of the point
OUTPUT:
- the hyperbolic distance
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP._dist_geod_point(2, infinity, I)
arccosh(1/10*sqrt(5)*((sqrt(5) - 1)^2 + 4) + 1)
If `p` is a boundary point, the distance is infinity::
sage: HyperbolicPlane().UHP()._dist_geod_point(2, infinity, 5)
+Infinity
"""
# Here is the trick for computing distance to a geodesic:
# find an isometry mapping the geodesic to the geodesic between
# 0 and infinity (so corresponding to the line imag(z) = 0.
# then any complex number is r exp(i*theta) in polar coordinates.
# the mutual perpendicular between this point and imag(z) = 0
# intersects imag(z) = 0 at ri. So we calculate the distance
# between r exp(i*theta) and ri after we transform the original
# point.
if start + end != infinity:
# Not a straight line:
# Map the endpoints to 0 and infinity and the midpoint to 1.
T = HyperbolicGeodesicUHP._crossratio_matrix(start, (start + end)/2, end)
else:
# Is a straight line:
# Map the endpoints to 0 and infinity and another endpoint to 1.
T = HyperbolicGeodesicUHP._crossratio_matrix(start, start + 1, end)
x = moebius_transform(T, p)
return self._dist_points(x, abs(x)*I)
#################
# Point Methods #
#################
def random_point(self, **kwargs):
r"""
Return a random point in the upper half plane. The points are
uniformly distributed over the rectangle `[-10, 10] \times [0, 10i]`.
EXAMPLES::
sage: p = HyperbolicPlane().UHP().random_point().coordinates()
sage: bool((p.imag()) > 0)
True
"""
# TODO: use **kwargs to allow these to be set
real_min = -10
real_max = 10
imag_min = 0
imag_max = 10
p = RR.random_element(min=real_min, max=real_max) \
+ I * RR.random_element(min=imag_min, max=imag_max)
return self.get_point(p)
####################
# Isometry Methods #
####################
def isometry_from_fixed_points(self, repel, attract):
r"""
Given two fixed points ``repel`` and ``attract`` as complex
numbers return a hyperbolic isometry with ``repel`` as repelling
fixed point and ``attract`` as attracting fixed point.
EXAMPLES::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP.isometry_from_fixed_points(2 + I, 3 + I)
Traceback (most recent call last):
...
ValueError: fixed points of hyperbolic elements must be ideal
sage: UHP.isometry_from_fixed_points(2, 0)
Isometry in UHP
[ -1 0]
[-1/3 -1/3]
TESTS::
sage: UHP = HyperbolicPlane().UHP()
sage: UHP.isometry_from_fixed_points(0, 4)
Isometry in UHP
[ -1 0]
[-1/5 -1/5]
sage: UHP.isometry_from_fixed_points(UHP.get_point(0), UHP.get_point(4))
Isometry in UHP
[ -1 0]
[-1/5 -1/5]
"""
if isinstance(repel, HyperbolicPoint):
repel = repel._coordinates
if isinstance(attract, HyperbolicPoint):
attract = attract._coordinates
if imag(repel) + imag(attract) > EPSILON:
raise ValueError("fixed points of hyperbolic elements must be ideal")
repel = real(repel)
attract = real(attract)
if repel == infinity:
A = self._moebius_sending([infinity, attract, attract + 1],
[infinity, attract, attract + 2])
elif attract == infinity:
A = self._moebius_sending([repel, infinity, repel + 1],
[repel, infinity, repel + 2])
else:
A = self._moebius_sending([repel, attract, infinity],
[repel, attract, max(repel, attract) + 1])
return self.get_isometry(A)
def random_isometry(self, preserve_orientation=True, **kwargs):
r"""
Return a random isometry in the Upper Half Plane model.
INPUT:
- ``preserve_orientation`` -- if ``True`` return an
orientation-preserving isometry
OUTPUT:
- a hyperbolic isometry
EXAMPLES::
sage: A = HyperbolicPlane().UHP().random_isometry()
sage: B = HyperbolicPlane().UHP().random_isometry(preserve_orientation=False)
sage: B.preserves_orientation()
False
"""
[a,b,c,d] = [RR.random_element() for k in range(4)]
while abs(a*d - b*c) < EPSILON:
[a,b,c,d] = [RR.random_element() for k in range(4)]
M = matrix(RDF, 2,[a,b,c,d])
M = M / (M.det()).abs().sqrt()
if M.det() > 0:
if not preserve_orientation:
M = M * matrix(2,[0,1,1,0])
elif preserve_orientation:
M = M * matrix(2,[0,1,1,0])
return self._Isometry(self, M, check=False)
###################
# Helping Methods #
###################
@staticmethod
def _moebius_sending(z, w): #UHP
r"""
Given two lists ``z`` and ``w`` of three points each in
`\mathbb{CP}^1`, return the linear fractional transformation
taking the points in ``z`` to the points in ``w``.
EXAMPLES::
sage: from sage.geometry.hyperbolic_space.hyperbolic_model import HyperbolicModelUHP
sage: from sage.geometry.hyperbolic_space.hyperbolic_isometry import moebius_transform
sage: bool(abs(moebius_transform(HyperbolicModelUHP._moebius_sending([1,2,infinity],[3 - I, 5*I,-12]),1) - 3 + I) < 10^-4)
True
sage: bool(abs(moebius_transform(HyperbolicModelUHP._moebius_sending([1,2,infinity],[3 - I, 5*I,-12]),2) - 5*I) < 10^-4)
True
sage: bool(abs(moebius_transform(HyperbolicModelUHP._moebius_sending([1,2,infinity],[3 - I, 5*I,-12]),infinity) + 12) < 10^-4)
True
"""
if len(z) != 3 or len(w) != 3:
raise TypeError("moebius_sending requires each list to be three points long")
A = HyperbolicGeodesicUHP._crossratio_matrix(z[0],z[1],z[2])
B = HyperbolicGeodesicUHP._crossratio_matrix(w[0],w[1],w[2])
return B.inverse() * A
#####################################################################
## Poincaré disk model
class HyperbolicModelPD(HyperbolicModel):
r"""
Poincaré Disk Model.
"""
_Geodesic = HyperbolicGeodesicPD
_Isometry = HyperbolicIsometryPD
def __init__(self, space):
"""
Initialize ``self``.
EXAMPLES::
sage: PD = HyperbolicPlane().PD()
sage: TestSuite(PD).run()
"""
# name should really be 'Poincaré Disk Model', but utf8 is not
# accepted by repr
HyperbolicModel.__init__(self, space,
name=u'Poincare Disk Model', short_name="PD",
bounded=True, conformal=True, dimension=2,
isometry_group="PU(1, 1)",
isometry_group_is_projective=True)
def _coerce_map_from_(self, X):
"""
Return if there is a coercion map from ``X`` to ``self``.
EXAMPLES::
sage: PD = HyperbolicPlane().PD()
sage: PD.has_coerce_map_from(HyperbolicPlane().UHP())
True
sage: PD.has_coerce_map_from(HyperbolicPlane().KM())
True
sage: PD.has_coerce_map_from(HyperbolicPlane().HM())
True
sage: PD.has_coerce_map_from(QQ)
False
"""
if isinstance(X, HyperbolicModelUHP):
return CoercionUHPtoPD(Hom(X, self))
if isinstance(X, HyperbolicModelKM):
return CoercionKMtoPD(Hom(X, self))
if isinstance(X, HyperbolicModelHM):
return CoercionHMtoPD(Hom(X, self))
return super(HyperbolicModelPD, self)._coerce_map_from_(X)
def point_in_model(self, p):
r"""
Check whether a complex number lies in the open unit disk.
EXAMPLES::
sage: PD = HyperbolicPlane().PD()
sage: PD.point_in_model(1.00)
False
sage: PD.point_in_model(1/2 + I/2)
True
sage: PD.point_in_model(1 + .2*I)
False
"""
if isinstance(p, HyperbolicPoint):
return p.is_boundary()
return bool(abs(CC(p)) < 1)
def boundary_point_in_model(self, p):
r"""
Check whether a complex number lies in the open unit disk.
EXAMPLES::
sage: PD = HyperbolicPlane().PD()
sage: PD.boundary_point_in_model(1.00)
True
sage: PD.boundary_point_in_model(1/2 + I/2)
False
sage: PD.boundary_point_in_model(1 + .2*I)
False
"""
if isinstance(p, HyperbolicPoint):
return p.is_boundary()
return bool(abs(abs(CC(p)) - 1) < EPSILON)
def isometry_in_model(self, A):
r"""
Check if the given matrix ``A`` is in the group `U(1,1)`.
EXAMPLES::
sage: z = [CC.random_element() for k in range(2)]; z.sort(key=abs)
sage: A = matrix(2,[z[1], z[0],z[0].conjugate(),z[1].conjugate()])
sage: HyperbolicPlane().PD().isometry_in_model(A)
True
"""
if isinstance(A, HyperbolicIsometry):
return True
# alpha = A[0][0]
# beta = A[0][1]
# Orientation preserving and reversing
return (HyperbolicIsometryPD._orientation_preserving(A) or
HyperbolicIsometryPD._orientation_preserving(I * A))
def get_background_graphic(self, **bdry_options):
r"""
Return a graphic object that makes the model easier to visualize.
For the Poincaré disk, the background object is the ideal boundary.
EXAMPLES::
sage: circ = HyperbolicPlane().PD().get_background_graphic()
"""
from sage.plot.circle import circle
return circle((0, 0), 1, axes=False, color='black')
#####################################################################
## Klein disk model
class HyperbolicModelKM(HyperbolicModel):
r"""
Klein Model.
"""
_Geodesic = HyperbolicGeodesicKM
_Isometry = HyperbolicIsometryKM
def __init__(self, space):
"""
Initialize ``self``.
EXAMPLES::
sage: KM = HyperbolicPlane().KM()
sage: TestSuite(KM).run()
"""
HyperbolicModel.__init__(self, space,
name="Klein Disk Model", short_name="KM",
bounded=True, conformal=False, dimension=2,
isometry_group="PSO(2, 1)", isometry_group_is_projective=True)
def _coerce_map_from_(self, X):
"""
Return if there is a coercion map from ``X`` to ``self``.
EXAMPLES::
sage: KM = HyperbolicPlane().UHP()
sage: KM.has_coerce_map_from(HyperbolicPlane().UHP())
True
sage: KM.has_coerce_map_from(HyperbolicPlane().PD())
True
sage: KM.has_coerce_map_from(HyperbolicPlane().HM())
True
sage: KM.has_coerce_map_from(QQ)
False
"""
if isinstance(X, HyperbolicModelUHP):
return CoercionUHPtoKM(Hom(X, self))
if isinstance(X, HyperbolicModelPD):
return CoercionPDtoKM(Hom(X, self))
if isinstance(X, HyperbolicModelHM):
return CoercionHMtoKM(Hom(X, self))
return super(HyperbolicModelKM, self)._coerce_map_from_(X)
def point_in_model(self, p):
r"""
Check whether a point lies in the open unit disk.
EXAMPLES::
sage: KM = HyperbolicPlane().KM()
sage: KM.point_in_model((1, 0))
False
sage: KM.point_in_model((1/2, 1/2))
True
sage: KM.point_in_model((1, .2))
False
"""
if isinstance(p, HyperbolicPoint):
return p.is_boundary()
return len(p) == 2 and bool(p[0]**2 + p[1]**2 < 1)
def boundary_point_in_model(self, p):
r"""
Check whether a point lies in the unit circle, which corresponds
to the ideal boundary of the hyperbolic plane in the Klein model.
EXAMPLES::
sage: KM = HyperbolicPlane().KM()
sage: KM.boundary_point_in_model((1, 0))
True
sage: KM.boundary_point_in_model((1/2, 1/2))
False
sage: KM.boundary_point_in_model((1, .2))
False
"""
if isinstance(p, HyperbolicPoint):
return p.is_boundary()
return len(p) == 2 and bool(abs(p[0]**2 + p[1]**2 - 1) < EPSILON)
def isometry_in_model(self, A):
r"""
Check if the given matrix ``A`` is in the group `SO(2,1)`.
EXAMPLES::
sage: A = matrix(3, [[1, 0, 0], [0, 17/8, 15/8], [0, 15/8, 17/8]])
sage: HyperbolicPlane().KM().isometry_in_model(A)
True
"""
if isinstance(A, HyperbolicIsometry):
return True
return bool((A*LORENTZ_GRAM*A.transpose() - LORENTZ_GRAM).norm()**2 <
EPSILON)
def get_background_graphic(self, **bdry_options):
r"""
Return a graphic object that makes the model easier to visualize.
For the Klein model, the background object is the ideal boundary.
EXAMPLES::
sage: circ = HyperbolicPlane().KM().get_background_graphic()
"""
from sage.plot.circle import circle
return circle((0, 0), 1, axes=False, color='black')
#####################################################################
## Hyperboloid model
class HyperbolicModelHM(HyperbolicModel):
r"""
Hyperboloid Model.
"""
_Geodesic = HyperbolicGeodesicHM
def __init__(self, space):
"""
Initialize ``self``.
EXAMPLES::
sage: HM = HyperbolicPlane().HM()
sage: TestSuite(HM).run()
"""
HyperbolicModel.__init__(self, space,
name="Hyperboloid Model", short_name="HM",
bounded=False, conformal=True, dimension=2,
isometry_group="SO(2, 1)", isometry_group_is_projective=False)
def _coerce_map_from_(self, X):
"""
Return if there is a coercion map from ``X`` to ``self``.
EXAMPLES::
sage: HM = HyperbolicPlane().UHP()
sage: HM.has_coerce_map_from(HyperbolicPlane().UHP())
True
sage: HM.has_coerce_map_from(HyperbolicPlane().PD())
True
sage: HM.has_coerce_map_from(HyperbolicPlane().KM())
True
sage: HM.has_coerce_map_from(QQ)
False
"""
if isinstance(X, HyperbolicModelUHP):
return CoercionUHPtoHM(Hom(X, self))
if isinstance(X, HyperbolicModelPD):
return CoercionPDtoHM(Hom(X, self))
if isinstance(X, HyperbolicModelKM):
return CoercionKMtoHM(Hom(X, self))
return super(HyperbolicModelHM, self)._coerce_map_from_(X)
def point_in_model(self, p):
r"""
Check whether a complex number lies in the hyperboloid.
EXAMPLES::
sage: HM = HyperbolicPlane().HM()
sage: HM.point_in_model((0,0,1))
True
sage: HM.point_in_model((1,0,sqrt(2)))
True
sage: HM.point_in_model((1,2,1))
False
"""
if isinstance(p, HyperbolicPoint):
return p.is_boundary()
return len(p) == 3 and bool(abs(p[0]**2 + p[1]**2 - p[2]**2 + 1) < EPSILON)
def boundary_point_in_model(self, p):
r"""
Return ``False`` since the Hyperboloid model has no boundary points.
EXAMPLES::
sage: HM = HyperbolicPlane().HM()
sage: HM.boundary_point_in_model((0,0,1))
False
sage: HM.boundary_point_in_model((1,0,sqrt(2)))
False
sage: HM.boundary_point_in_model((1,2,1))
False
"""
return False
def isometry_in_model(self, A):
r"""
Test that the matrix ``A`` is in the group `SO(2,1)^+`.
EXAMPLES::
sage: A = diagonal_matrix([1,1,-1])
sage: HyperbolicPlane().HM().isometry_in_model(A)
True
"""
if isinstance(A, HyperbolicIsometry):
return True
return bool((A*LORENTZ_GRAM*A.transpose() - LORENTZ_GRAM).norm()**2 < EPSILON)
def get_background_graphic(self, **bdry_options):
r"""
Return a graphic object that makes the model easier to visualize.
For the hyperboloid model, the background object is the hyperboloid
itself.
EXAMPLES::
sage: H = HyperbolicPlane().HM().get_background_graphic()
"""
from sage.plot.plot3d.all import plot3d
from sage.symbolic.ring import SR
hyperboloid_opacity = bdry_options.get('hyperboloid_opacity', .1)
z_height = bdry_options.get('z_height', 7.0)
x_max = sqrt((z_height ** 2 - 1) / 2.0)
x = SR.var('x')
y = SR.var('y')
return plot3d((1 + x ** 2 + y ** 2).sqrt(),
(x, -x_max, x_max), (y,-x_max, x_max),
opacity=hyperboloid_opacity, **bdry_options)
| 32.580991 | 138 | 0.563337 |
126ecf0ed83c795b9324e9a1861c563f3411695e | 18,446 | py | Python | is_number/_version.py | kenkehoe/is-number | e7129b73771d111bd7dcb595752027feae89e33f | [
"MIT"
] | null | null | null | is_number/_version.py | kenkehoe/is-number | e7129b73771d111bd7dcb595752027feae89e33f | [
"MIT"
] | null | null | null | is_number/_version.py | kenkehoe/is-number | e7129b73771d111bd7dcb595752027feae89e33f | [
"MIT"
] | null | null | null |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = ""
cfg.versionfile_source = "is_number/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| 35.40499 | 79 | 0.584463 |
9f5f008b09c5332b28d9658801a68395ba5f0e16 | 26,969 | py | Python | fairseq/utils.py | cepin19/fairseq_lattice | d1a35d849b2aff15c1e7fad3ace748c906bb4521 | [
"MIT"
] | null | null | null | fairseq/utils.py | cepin19/fairseq_lattice | d1a35d849b2aff15c1e7fad3ace748c906bb4521 | [
"MIT"
] | null | null | null | fairseq/utils.py | cepin19/fairseq_lattice | d1a35d849b2aff15c1e7fad3ace748c906bb4521 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import copy
import importlib
import logging
import os
import sys
import tempfile
import warnings
from itertools import accumulate
from typing import Callable, Dict, List, Optional
import torch
import torch.nn.functional as F
from fairseq.modules.multihead_attention import MultiheadAttention
from torch import Tensor
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_available = False
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
logger = logging.getLogger(__name__)
MANIFOLD_PATH_SEP = "|"
class FileContentsAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(FileContentsAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
from fairseq.file_io import PathManager
if PathManager.isfile(values):
with PathManager.open(values) as f:
argument = f.read().strip()
else:
argument = values
setattr(namespace, self.dest, argument)
def split_paths(paths: str) -> List[str]:
return (
paths.split(os.pathsep)
if "://" not in paths
else paths.split(MANIFOLD_PATH_SEP)
)
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
from fairseq import checkpoint_utils
deprecation_warning(
"utils.load_ensemble_for_inference is deprecated. "
"Please use checkpoint_utils.load_model_ensemble instead."
)
return checkpoint_utils.load_model_ensemble(
filenames, arg_overrides=model_arg_overrides, task=task
)
def apply_to_sample(f, sample):
if hasattr(sample, "__len__") and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample, device=None):
device = device or torch.cuda.current_device()
def _move_to_cuda(tensor):
# non_blocking is ignored if tensor is not pinned, so we can always set
# to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620)
return tensor.to(device=device, non_blocking=True)
return apply_to_sample(_move_to_cuda, sample)
def move_to_cpu(sample):
def _move_to_cpu(tensor):
# PyTorch has poor support for half tensors (float16) on CPU.
# Move any such tensors to float32.
if tensor.dtype in {torch.bfloat16, torch.float16}:
tensor = tensor.to(dtype=torch.float32)
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def get_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
return module.get_incremental_state(incremental_state, key)
def set_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
result = module.set_incremental_state(incremental_state, key, value)
if result is not None:
incremental_state = result
return incremental_state
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str) and len(replace_unk) > 0:
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, "r") as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor(
[float(weight) for weight in pieces[1:]]
)
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"]
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return " ".join(hypo_tokens)
def post_process_prediction(
hypo_tokens,
src_str,
alignment,
align_dict,
tgt_dict,
remove_bpe=None,
extra_symbols_to_ignore=None,
):
hypo_str = tgt_dict.string(
hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore
)
if align_dict is not None:
hypo_str = replace_unk(
hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string()
)
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx: int, onnx_trace: bool = False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def make_relative_positions(tensor, padding_idx: int, onnx_trace: bool = False, dictionary=None):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
# if dictionary:
positions_start = torch.zeros(tensor.shape)
positions_end = torch.zeros(tensor.shape)
for si, sentence in enumerate(tensor):
# logging.info("make positions tensor sentence: ")
# logging.info(dictionary.string(sentence))
current_start_pos = 1
in_constraint = False
max_len_in_constraint = 0
num_choice_tokens = 0
choice_offset = 0
for ti, tok in enumerate(sentence):
# logging.info(tok)
if tok.item() != padding_idx:
# logging.info(dictionary.symbols[tok.item()])
if in_constraint:
num_choice_tokens += 1
if dictionary.symbols[tok.item()] == "▁<vstart>": # start variant
choice_offset = 0
elif dictionary.symbols[tok.item()] == "▁<vend>": # end variant
choice_offset = 0
max_len_in_constraint = max(choice_offset, max_len_in_constraint)
positions_start[si][ti] = current_start_pos + choice_offset
choice_offset += 1
else:
positions_start[si][ti] = current_start_pos
positions_end[si][ti] = current_start_pos
current_start_pos += 1
if dictionary.symbols[tok.item()] == "▁<cstart>": # start constraint
in_constraint = True
max_len_in_constraint = 0
elif dictionary.symbols[tok.item()] == "▁<cend>": #end constraint
in_constraint = False
for i in range(
num_choice_tokens + 1): # this sets the end position for all the choices to the value which would the longest choice have, +1 for }
positions_end[si][ti - i] = max_len_in_constraint + current_start_pos + 3
num_choice_tokens = 0
choice_offset = 0
current_start_pos = max_len_in_constraint + current_start_pos + 4 # ] + } + shift
# positions_start[si][ti]=current_start_pos
# if dictionary:
# logging.info(list(zip(dictionary.string(sentence).split(), positions_start[si].tolist()) ))
# logging.info(list(zip(dictionary.string(sentence).split(), positions_end[si].tolist()) ))
# logging.info((positions_start.type_as(mask) * mask).long()+ padding_idx)
# logging.info((positions_end.type_as(mask) * mask).long()+ padding_idx)
# logging.info((torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx)
return (positions_start.type_as(mask) * mask).long() + padding_idx, (
positions_end.type_as(mask) * mask).long() + padding_idx
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(
src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False
):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
buffered = torch.empty(0).long()
if max_len > 0:
torch.arange(max_len, out=buffered)
range = buffered.type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def multi_tensor_total_norm(grads, chunk_size=2048 * 32) -> torch.Tensor:
per_device_grads = {}
norms = []
for grad in grads:
device = grad.device
cur_device_grads = per_device_grads.get(device)
if cur_device_grads is None:
cur_device_grads = []
per_device_grads[device] = cur_device_grads
cur_device_grads.append(grad)
for device in per_device_grads.keys():
cur_device_grads = per_device_grads[device]
if device.type == "cuda":
# TODO(msb) return has_inf
has_inf = torch.zeros((1, 1), dtype=torch.int, device=device)
with torch.cuda.device(device):
norm = multi_tensor_l2norm(
chunk_size, has_inf, [cur_device_grads], False
)
norms.append(norm[0].to(torch.cuda.current_device()))
else:
norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads]
total_norm = torch.norm(torch.stack(norms))
return total_norm
@torch.no_grad()
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.0)
else:
return torch.tensor(0.0)
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
device = torch.cuda.current_device()
elif grads[0].device.type == "xla":
device = grads[0].device
else:
device = torch.device("cpu")
total_norm = torch.norm(
torch.stack(
[torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads]
)
)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads:
g.mul_(clip_coef)
return total_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def _match_types(arg1, arg2):
"""Convert the numerical argument to the same type as the other argument"""
def upgrade(arg_number, arg_structure):
if isinstance(arg_structure, tuple):
return tuple([arg_number] * len(arg_structure))
elif isinstance(arg_structure, dict):
arg = copy.deepcopy(arg_structure)
for k in arg:
arg[k] = upgrade(arg_number, arg_structure[k])
return arg
else:
return arg_number
if isinstance(arg1, float) or isinstance(arg1, int):
return upgrade(arg1, arg2), arg2
elif isinstance(arg2, float) or isinstance(arg2, int):
return arg1, upgrade(arg2, arg1)
return arg1, arg2
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
max_positions, arg = _match_types(max_positions, arg)
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
max_positions = map_value_update(max_positions, arg)
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path) and not os.path.isfile(os.path.dirname(module_path)):
fairseq_rel_path = os.path.join(os.path.dirname(__file__), args.user_dir)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
fairseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
else:
raise FileNotFoundError(module_path)
# ensure that user modules are only imported once
import_user_module.memo = getattr(import_user_module, "memo", set())
if module_path not in import_user_module.memo:
import_user_module.memo.add(module_path)
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
else:
raise ImportError(
"Failed to import --user-dir={} because the corresponding module name "
"({}) is not globally unique. Please rename the directory to "
"something unique and try again.".format(module_path, module_name)
)
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss, round=2, base=2):
from fairseq.logging.meters import safe_round
if loss is None:
return 0.0
try:
return safe_round(base ** loss, round)
except OverflowError:
return float("inf")
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def get_activation_fn(activation: str) -> Callable:
""" Returns the activation function corresponding to `activation` """
from fairseq.modules import gelu, gelu_accurate
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
]
@contextlib.contextmanager
def model_eval(model):
is_training = model.training
model.eval()
yield
model.train(is_training)
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def get_rng_state():
state = {"torch_rng_state": torch.get_rng_state()}
if xm is not None:
state["xla_rng_state"] = xm.get_rng_state()
if torch.cuda.is_available():
state["cuda_rng_state"] = torch.cuda.get_rng_state()
return state
def set_rng_state(state):
torch.set_rng_state(state["torch_rng_state"])
if xm is not None:
xm.set_rng_state(state["xla_rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(state["cuda_rng_state"])
class set_torch_seed(object):
def __init__(self, seed):
assert isinstance(seed, int)
self.rng_state = get_rng_state()
torch.manual_seed(seed)
if xm is not None:
xm.set_rng_state(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
def __enter__(self):
return self
def __exit__(self, *exc):
set_rng_state(self.rng_state)
def parse_alignment(line):
"""
Parses a single line from the alingment file.
Args:
line (str): String containing the alignment of the format:
<src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..
<src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.
Returns:
torch.IntTensor: packed alignments of shape (2 * m).
"""
alignments = line.strip().split()
parsed_alignment = torch.IntTensor(2 * len(alignments))
for idx, alignment in enumerate(alignments):
src_idx, tgt_idx = alignment.split("-")
parsed_alignment[2 * idx] = int(src_idx)
parsed_alignment[2 * idx + 1] = int(tgt_idx)
return parsed_alignment
def get_token_to_word_mapping(tokens, exclude_list):
n = len(tokens)
word_start = [int(token not in exclude_list) for token in tokens]
word_idx = list(accumulate(word_start))
token_to_word = {i: word_idx[i] for i in range(n)}
return token_to_word
def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = (
((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_invalid = (
((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1)
)
src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])
tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])
alignment = []
if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):
attn_valid = attn[tgt_valid]
attn_valid[:, src_invalid] = float("-inf")
_, src_indices = attn_valid.max(dim=1)
for tgt_idx, src_idx in zip(tgt_valid, src_indices):
alignment.append(
(
src_token_to_word[src_idx.item()] - 1,
tgt_token_to_word[tgt_idx.item()] - 1,
)
)
return alignment
def extract_soft_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = (
((tgt_sent != pad)).nonzero(as_tuple=False)
)
src_valid = (
((src_sent != pad)).nonzero(as_tuple=False).squeeze(dim=-1)
)
alignment = []
if len(tgt_valid) != 0 and len(src_valid) != 0:
attn_valid = attn[tgt_valid, src_valid]
alignment = [
["{:.6f}".format(p) for p in src_probs.tolist()]
for src_probs in attn_valid
]
return alignment
def new_arange(x, *size):
"""
Return a Tensor of `size` filled with a range function on the device of x.
If size is empty, using the size of the variable x.
"""
if len(size) == 0:
size = x.size()
return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
def get_tpu_device():
return xm.xla_device()
def tpu_data_loader(itr):
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
from fairseq.data import iterators
xm.rendezvous("tpu_data_loader") # wait for all workers
xm.mark_step()
device = xm.xla_device()
return iterators.CountingIterator(
pl.ParallelLoader(itr, [device]).per_device_loader(device),
start=getattr(itr, "n", 0),
total=len(itr),
)
class CudaEnvironment(object):
def __init__(self):
cur_device = torch.cuda.current_device()
prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device))
self.name = prop.name
self.major = prop.major
self.minor = prop.minor
self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024
@staticmethod
def pretty_print_cuda_env_list(cuda_env_list):
"""
Given a list of CudaEnviorments, pretty print them
"""
num_workers = len(cuda_env_list)
center = "CUDA enviroments for all {} workers".format(num_workers)
banner_len = 40 - len(center) // 2
first_line = "*" * banner_len + center + "*" * banner_len
logger.info(first_line)
for r, env in enumerate(cuda_env_list):
logger.info(
"rank {:3d}: ".format(r)
+ "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor)
+ "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB)
+ "name = {:40s}".format(env.name)
)
logger.info(first_line)
def csv_str_list(x):
return x.split(",")
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_str_dict(x, type=dict):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
return x
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
| 33.295062 | 160 | 0.630761 |
bc04b28a4bcc9f3738f4cc1edd4d777ec2688261 | 119,064 | py | Python | sympy/printing/pretty/tests/test_pretty.py | tachycline/sympy | abf6fec12012852c7e6fae38461da9723cadc8b9 | [
"BSD-3-Clause"
] | null | null | null | sympy/printing/pretty/tests/test_pretty.py | tachycline/sympy | abf6fec12012852c7e6fae38461da9723cadc8b9 | [
"BSD-3-Clause"
] | null | null | null | sympy/printing/pretty/tests/test_pretty.py | tachycline/sympy | abf6fec12012852c7e6fae38461da9723cadc8b9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from sympy import (
Add, And, Basic, Derivative, Dict, Eq, Equivalent, FF,
FiniteSet, Function, Ge, Gt, I, Implies, Integral, SingularityFunction,
Lambda, Le, Limit, Lt, Matrix, Mul, Nand, Ne, Nor, Not, O, Or,
Pow, Product, QQ, RR, Rational, Ray, rootof, RootSum, S,
Segment, Subs, Sum, Symbol, Tuple, Trace, Xor, ZZ, conjugate,
groebner, oo, pi, symbols, ilex, grlex, Range, Contains,
SeqPer, SeqFormula, SeqAdd, SeqMul, fourier_series, fps,
Complement, Interval, Intersection, Union, EulerGamma, GoldenRatio)
from sympy.core.expr import UnevaluatedExpr
from sympy.functions import (Abs, Chi, Ci, Ei, KroneckerDelta,
Piecewise, Shi, Si, atan2, binomial, catalan, ceiling, cos,
euler, exp, expint, factorial, factorial2, floor, hyper, log,
lowergamma, meijerg, sin, sqrt, subfactorial, tan, uppergamma,
elliptic_k, elliptic_f, elliptic_e, elliptic_pi, DiracDelta)
from sympy.codegen.ast import (Assignment, AddAugmentedAssignment,
SubAugmentedAssignment, MulAugmentedAssignment, DivAugmentedAssignment, ModAugmentedAssignment)
from sympy.matrices import Adjoint, Inverse, MatrixSymbol, Transpose
from sympy.printing.pretty import pretty as xpretty
from sympy.printing.pretty import pprint
from sympy.physics.units import joule
from sympy.tensor.array import (ImmutableDenseNDimArray, ImmutableSparseNDimArray,
MutableDenseNDimArray, MutableSparseNDimArray, tensorproduct)
from sympy.utilities.pytest import raises, XFAIL
from sympy.core.trace import Tr
from sympy.core.compatibility import u_decode as u
from sympy.core.compatibility import range
from sympy.vector import CoordSys3D, Gradient, Curl, Divergence, Dot, Cross
a, b, x, y, z, k, n = symbols('a,b,x,y,z,k,n')
th = Symbol('theta')
ph = Symbol('phi')
"""
Expressions whose pretty-printing is tested here:
(A '#' to the right of an expression indicates that its various acceptable
orderings are accounted for by the tests.)
BASIC EXPRESSIONS:
oo
(x**2)
1/x
y*x**-2
x**Rational(-5,2)
(-2)**x
Pow(3, 1, evaluate=False)
(x**2 + x + 1) #
1-x #
1-2*x #
x/y
-x/y
(x+2)/y #
(1+x)*y #3
-5*x/(x+10) # correct placement of negative sign
1 - Rational(3,2)*(x+1)
-(-x + 5)*(-x - 2*sqrt(2) + 5) - (-y + 5)*(-y + 5) # issue 5524
ORDERING:
x**2 + x + 1
1 - x
1 - 2*x
2*x**4 + y**2 - x**2 + y**3
RELATIONAL:
Eq(x, y)
Lt(x, y)
Gt(x, y)
Le(x, y)
Ge(x, y)
Ne(x/(y+1), y**2) #
RATIONAL NUMBERS:
y*x**-2
y**Rational(3,2) * x**Rational(-5,2)
sin(x)**3/tan(x)**2
FUNCTIONS (ABS, CONJ, EXP, FUNCTION BRACES, FACTORIAL, FLOOR, CEILING):
(2*x + exp(x)) #
Abs(x)
Abs(x/(x**2+1)) #
Abs(1 / (y - Abs(x)))
factorial(n)
factorial(2*n)
subfactorial(n)
subfactorial(2*n)
factorial(factorial(factorial(n)))
factorial(n+1) #
conjugate(x)
conjugate(f(x+1)) #
f(x)
f(x, y)
f(x/(y+1), y) #
f(x**x**x**x**x**x)
sin(x)**2
conjugate(a+b*I)
conjugate(exp(a+b*I))
conjugate( f(1 + conjugate(f(x))) ) #
f(x/(y+1), y) # denom of first arg
floor(1 / (y - floor(x)))
ceiling(1 / (y - ceiling(x)))
SQRT:
sqrt(2)
2**Rational(1,3)
2**Rational(1,1000)
sqrt(x**2 + 1)
(1 + sqrt(5))**Rational(1,3)
2**(1/x)
sqrt(2+pi)
(2+(1+x**2)/(2+x))**Rational(1,4)+(1+x**Rational(1,1000))/sqrt(3+x**2)
DERIVATIVES:
Derivative(log(x), x, evaluate=False)
Derivative(log(x), x, evaluate=False) + x #
Derivative(log(x) + x**2, x, y, evaluate=False)
Derivative(2*x*y, y, x, evaluate=False) + x**2 #
beta(alpha).diff(alpha)
INTEGRALS:
Integral(log(x), x)
Integral(x**2, x)
Integral((sin(x))**2 / (tan(x))**2)
Integral(x**(2**x), x)
Integral(x**2, (x,1,2))
Integral(x**2, (x,Rational(1,2),10))
Integral(x**2*y**2, x,y)
Integral(x**2, (x, None, 1))
Integral(x**2, (x, 1, None))
Integral(sin(th)/cos(ph), (th,0,pi), (ph, 0, 2*pi))
MATRICES:
Matrix([[x**2+1, 1], [y, x+y]]) #
Matrix([[x/y, y, th], [0, exp(I*k*ph), 1]])
PIECEWISE:
Piecewise((x,x<1),(x**2,True))
SEQUENCES (TUPLES, LISTS, DICTIONARIES):
()
[]
{}
(1/x,)
[x**2, 1/x, x, y, sin(th)**2/cos(ph)**2]
(x**2, 1/x, x, y, sin(th)**2/cos(ph)**2)
{x: sin(x)}
{1/x: 1/y, x: sin(x)**2} #
[x**2]
(x**2,)
{x**2: 1}
LIMITS:
Limit(x, x, oo)
Limit(x**2, x, 0)
Limit(1/x, x, 0)
Limit(sin(x)/x, x, 0)
UNITS:
joule => kg*m**2/s
SUBS:
Subs(f(x), x, ph**2)
Subs(f(x).diff(x), x, 0)
Subs(f(x).diff(x)/y, (x, y), (0, Rational(1, 2)))
ORDER:
O(1)
O(1/x)
O(x**2 + y**2)
"""
def pretty(expr, order=None):
"""ASCII pretty-printing"""
return xpretty(expr, order=order, use_unicode=False, wrap_line=False)
def upretty(expr, order=None):
"""Unicode pretty-printing"""
return xpretty(expr, order=order, use_unicode=True, wrap_line=False)
def test_pretty_ascii_str():
assert pretty( 'xxx' ) == 'xxx'
assert pretty( "xxx" ) == 'xxx'
assert pretty( 'xxx\'xxx' ) == 'xxx\'xxx'
assert pretty( 'xxx"xxx' ) == 'xxx\"xxx'
assert pretty( 'xxx\"xxx' ) == 'xxx\"xxx'
assert pretty( "xxx'xxx" ) == 'xxx\'xxx'
assert pretty( "xxx\'xxx" ) == 'xxx\'xxx'
assert pretty( "xxx\"xxx" ) == 'xxx\"xxx'
assert pretty( "xxx\"xxx\'xxx" ) == 'xxx"xxx\'xxx'
assert pretty( "xxx\nxxx" ) == 'xxx\nxxx'
def test_pretty_unicode_str():
assert pretty( u'xxx' ) == u'xxx'
assert pretty( u'xxx' ) == u'xxx'
assert pretty( u'xxx\'xxx' ) == u'xxx\'xxx'
assert pretty( u'xxx"xxx' ) == u'xxx\"xxx'
assert pretty( u'xxx\"xxx' ) == u'xxx\"xxx'
assert pretty( u"xxx'xxx" ) == u'xxx\'xxx'
assert pretty( u"xxx\'xxx" ) == u'xxx\'xxx'
assert pretty( u"xxx\"xxx" ) == u'xxx\"xxx'
assert pretty( u"xxx\"xxx\'xxx" ) == u'xxx"xxx\'xxx'
assert pretty( u"xxx\nxxx" ) == u'xxx\nxxx'
def test_upretty_greek():
assert upretty( oo ) == u'∞'
assert upretty( Symbol('alpha^+_1') ) == u'α⁺₁'
assert upretty( Symbol('beta') ) == u'β'
assert upretty(Symbol('lambda')) == u'λ'
def test_upretty_multiindex():
assert upretty( Symbol('beta12') ) == u'β₁₂'
assert upretty( Symbol('Y00') ) == u'Y₀₀'
assert upretty( Symbol('Y_00') ) == u'Y₀₀'
assert upretty( Symbol('F^+-') ) == u'F⁺⁻'
def test_upretty_sub_super():
assert upretty( Symbol('beta_1_2') ) == u'β₁ ₂'
assert upretty( Symbol('beta^1^2') ) == u'β¹ ²'
assert upretty( Symbol('beta_1^2') ) == u'β²₁'
assert upretty( Symbol('beta_10_20') ) == u'β₁₀ ₂₀'
assert upretty( Symbol('beta_ax_gamma^i') ) == u'βⁱₐₓ ᵧ'
assert upretty( Symbol("F^1^2_3_4") ) == u'F¹ ²₃ ₄'
assert upretty( Symbol("F_1_2^3^4") ) == u'F³ ⁴₁ ₂'
assert upretty( Symbol("F_1_2_3_4") ) == u'F₁ ₂ ₃ ₄'
assert upretty( Symbol("F^1^2^3^4") ) == u'F¹ ² ³ ⁴'
def test_upretty_subs_missing_in_24():
assert upretty( Symbol('F_beta') ) == u'Fᵦ'
assert upretty( Symbol('F_gamma') ) == u'Fᵧ'
assert upretty( Symbol('F_rho') ) == u'Fᵨ'
assert upretty( Symbol('F_phi') ) == u'Fᵩ'
assert upretty( Symbol('F_chi') ) == u'Fᵪ'
assert upretty( Symbol('F_a') ) == u'Fₐ'
assert upretty( Symbol('F_e') ) == u'Fₑ'
assert upretty( Symbol('F_i') ) == u'Fᵢ'
assert upretty( Symbol('F_o') ) == u'Fₒ'
assert upretty( Symbol('F_u') ) == u'Fᵤ'
assert upretty( Symbol('F_r') ) == u'Fᵣ'
assert upretty( Symbol('F_v') ) == u'Fᵥ'
assert upretty( Symbol('F_x') ) == u'Fₓ'
@XFAIL
def test_missing_in_2X_issue_9047():
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert upretty( Symbol('F_h') ) == u'Fₕ'
assert upretty( Symbol('F_k') ) == u'Fₖ'
assert upretty( Symbol('F_l') ) == u'Fₗ'
assert upretty( Symbol('F_m') ) == u'Fₘ'
assert upretty( Symbol('F_n') ) == u'Fₙ'
assert upretty( Symbol('F_p') ) == u'Fₚ'
assert upretty( Symbol('F_s') ) == u'Fₛ'
assert upretty( Symbol('F_t') ) == u'Fₜ'
def test_upretty_modifiers():
# Accents
assert upretty( Symbol('Fmathring') ) == u'F̊'
assert upretty( Symbol('Fddddot') ) == u'F̈̈'
assert upretty( Symbol('Fdddot') ) == u'F̈̇'
assert upretty( Symbol('Fddot') ) == u'F̈'
assert upretty( Symbol('Fdot') ) == u'Ḟ'
assert upretty( Symbol('Fcheck') ) == u'F̌'
assert upretty( Symbol('Fbreve') ) == u'F̆'
assert upretty( Symbol('Facute') ) == u'F́'
assert upretty( Symbol('Fgrave') ) == u'F̀'
assert upretty( Symbol('Ftilde') ) == u'F̃'
assert upretty( Symbol('Fhat') ) == u'F̂'
assert upretty( Symbol('Fbar') ) == u'F̅'
assert upretty( Symbol('Fvec') ) == u'F⃗'
assert upretty( Symbol('Fprime') ) == u'F′'
assert upretty( Symbol('Fprm') ) == u'F′'
# No faces are actually implemented, but test to make sure the modifiers are stripped
assert upretty( Symbol('Fbold') ) == u'Fbold'
assert upretty( Symbol('Fbm') ) == u'Fbm'
assert upretty( Symbol('Fcal') ) == u'Fcal'
assert upretty( Symbol('Fscr') ) == u'Fscr'
assert upretty( Symbol('Ffrak') ) == u'Ffrak'
# Brackets
assert upretty( Symbol('Fnorm') ) == u'‖F‖'
assert upretty( Symbol('Favg') ) == u'⟨F⟩'
assert upretty( Symbol('Fabs') ) == u'|F|'
assert upretty( Symbol('Fmag') ) == u'|F|'
# Combinations
assert upretty( Symbol('xvecdot') ) == u'x⃗̇'
assert upretty( Symbol('xDotVec') ) == u'ẋ⃗'
assert upretty( Symbol('xHATNorm') ) == u'‖x̂‖'
assert upretty( Symbol('xMathring_yCheckPRM__zbreveAbs') ) == u'x̊_y̌′__|z̆|'
assert upretty( Symbol('alphadothat_nVECDOT__tTildePrime') ) == u'α̇̂_n⃗̇__t̃′'
assert upretty( Symbol('x_dot') ) == u'x_dot'
assert upretty( Symbol('x__dot') ) == u'x__dot'
def test_pretty_Cycle():
from sympy.combinatorics.permutations import Cycle
assert pretty(Cycle(1, 2)) == '(1 2)'
assert pretty(Cycle(2)) == '(2)'
assert pretty(Cycle(1, 3)(4, 5)) == '(1 3)(4 5)'
assert pretty(Cycle()) == '()'
def test_pretty_basic():
assert pretty( -Rational(1)/2 ) == '-1/2'
assert pretty( -Rational(13)/22 ) == \
"""\
-13 \n\
----\n\
22 \
"""
expr = oo
ascii_str = \
"""\
oo\
"""
ucode_str = \
u("""\
∞\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (x**2)
ascii_str = \
"""\
2\n\
x \
"""
ucode_str = \
u("""\
2\n\
x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 1/x
ascii_str = \
"""\
1\n\
-\n\
x\
"""
ucode_str = \
u("""\
1\n\
─\n\
x\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
# not the same as 1/x
expr = x**-1.0
ascii_str = \
"""\
-1.0\n\
x \
"""
ucode_str = \
("""\
-1.0\n\
x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
# see issue #2860
expr = Pow(S(2), -1.0, evaluate=False)
ascii_str = \
"""\
-1.0\n\
2 \
"""
ucode_str = \
("""\
-1.0\n\
2 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = y*x**-2
ascii_str = \
"""\
y \n\
--\n\
2\n\
x \
"""
ucode_str = \
u("""\
y \n\
──\n\
2\n\
x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = x**Rational(-5, 2)
ascii_str = \
"""\
1 \n\
----\n\
5/2\n\
x \
"""
ucode_str = \
u("""\
1 \n\
────\n\
5/2\n\
x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (-2)**x
ascii_str = \
"""\
x\n\
(-2) \
"""
ucode_str = \
u("""\
x\n\
(-2) \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
# See issue 4923
expr = Pow(3, 1, evaluate=False)
ascii_str = \
"""\
1\n\
3 \
"""
ucode_str = \
u("""\
1\n\
3 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (x**2 + x + 1)
ascii_str_1 = \
"""\
2\n\
1 + x + x \
"""
ascii_str_2 = \
"""\
2 \n\
x + x + 1\
"""
ascii_str_3 = \
"""\
2 \n\
x + 1 + x\
"""
ucode_str_1 = \
u("""\
2\n\
1 + x + x \
""")
ucode_str_2 = \
u("""\
2 \n\
x + x + 1\
""")
ucode_str_3 = \
u("""\
2 \n\
x + 1 + x\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2, ascii_str_3]
assert upretty(expr) in [ucode_str_1, ucode_str_2, ucode_str_3]
expr = 1 - x
ascii_str_1 = \
"""\
1 - x\
"""
ascii_str_2 = \
"""\
-x + 1\
"""
ucode_str_1 = \
u("""\
1 - x\
""")
ucode_str_2 = \
u("""\
-x + 1\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = 1 - 2*x
ascii_str_1 = \
"""\
1 - 2*x\
"""
ascii_str_2 = \
"""\
-2*x + 1\
"""
ucode_str_1 = \
u("""\
1 - 2⋅x\
""")
ucode_str_2 = \
u("""\
-2⋅x + 1\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = x/y
ascii_str = \
"""\
x\n\
-\n\
y\
"""
ucode_str = \
u("""\
x\n\
─\n\
y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -x/y
ascii_str = \
"""\
-x \n\
---\n\
y \
"""
ucode_str = \
u("""\
-x \n\
───\n\
y \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (x + 2)/y
ascii_str_1 = \
"""\
2 + x\n\
-----\n\
y \
"""
ascii_str_2 = \
"""\
x + 2\n\
-----\n\
y \
"""
ucode_str_1 = \
u("""\
2 + x\n\
─────\n\
y \
""")
ucode_str_2 = \
u("""\
x + 2\n\
─────\n\
y \
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = (1 + x)*y
ascii_str_1 = \
"""\
y*(1 + x)\
"""
ascii_str_2 = \
"""\
(1 + x)*y\
"""
ascii_str_3 = \
"""\
y*(x + 1)\
"""
ucode_str_1 = \
u("""\
y⋅(1 + x)\
""")
ucode_str_2 = \
u("""\
(1 + x)⋅y\
""")
ucode_str_3 = \
u("""\
y⋅(x + 1)\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2, ascii_str_3]
assert upretty(expr) in [ucode_str_1, ucode_str_2, ucode_str_3]
# Test for correct placement of the negative sign
expr = -5*x/(x + 10)
ascii_str_1 = \
"""\
-5*x \n\
------\n\
10 + x\
"""
ascii_str_2 = \
"""\
-5*x \n\
------\n\
x + 10\
"""
ucode_str_1 = \
u("""\
-5⋅x \n\
──────\n\
10 + x\
""")
ucode_str_2 = \
u("""\
-5⋅x \n\
──────\n\
x + 10\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = -S(1)/2 - 3*x
ascii_str = \
"""\
-3*x - 1/2\
"""
ucode_str = \
u("""\
-3⋅x - 1/2\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = S(1)/2 - 3*x
ascii_str = \
"""\
-3*x + 1/2\
"""
ucode_str = \
u("""\
-3⋅x + 1/2\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -S(1)/2 - 3*x/2
ascii_str = \
"""\
3*x 1\n\
- --- - -\n\
2 2\
"""
ucode_str = \
u("""\
3⋅x 1\n\
- ─── - ─\n\
2 2\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = S(1)/2 - 3*x/2
ascii_str = \
"""\
3*x 1\n\
- --- + -\n\
2 2\
"""
ucode_str = \
u("""\
3⋅x 1\n\
- ─── + ─\n\
2 2\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_negative_fractions():
expr = -x/y
ascii_str =\
"""\
-x \n\
---\n\
y \
"""
ucode_str =\
u("""\
-x \n\
───\n\
y \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -x*z/y
ascii_str =\
"""\
-x*z \n\
-----\n\
y \
"""
ucode_str =\
u("""\
-x⋅z \n\
─────\n\
y \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = x**2/y
ascii_str =\
"""\
2\n\
x \n\
--\n\
y \
"""
ucode_str =\
u("""\
2\n\
x \n\
──\n\
y \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -x**2/y
ascii_str =\
"""\
2 \n\
-x \n\
----\n\
y \
"""
ucode_str =\
u("""\
2 \n\
-x \n\
────\n\
y \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -x/(y*z)
ascii_str =\
"""\
-x \n\
---\n\
y*z\
"""
ucode_str =\
u("""\
-x \n\
───\n\
y⋅z\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -a/y**2
ascii_str =\
"""\
-a \n\
---\n\
2\n\
y \
"""
ucode_str =\
u("""\
-a \n\
───\n\
2\n\
y \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = y**(-a/b)
ascii_str =\
"""\
-a \n\
---\n\
b \n\
y \
"""
ucode_str =\
u("""\
-a \n\
───\n\
b \n\
y \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -1/y**2
ascii_str =\
"""\
-1 \n\
---\n\
2\n\
y \
"""
ucode_str =\
u("""\
-1 \n\
───\n\
2\n\
y \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -10/b**2
ascii_str =\
"""\
-10 \n\
----\n\
2 \n\
b \
"""
ucode_str =\
u("""\
-10 \n\
────\n\
2 \n\
b \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Rational(-200, 37)
ascii_str =\
"""\
-200 \n\
-----\n\
37 \
"""
ucode_str =\
u("""\
-200 \n\
─────\n\
37 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_issue_5524():
assert pretty(-(-x + 5)*(-x - 2*sqrt(2) + 5) - (-y + 5)*(-y + 5)) == \
"""\
/ ___ \\ 2\n\
(x - 5)*\\-x - 2*\\/ 2 + 5/ - (-y + 5) \
"""
assert upretty(-(-x + 5)*(-x - 2*sqrt(2) + 5) - (-y + 5)*(-y + 5)) == \
u("""\
2\n\
(x - 5)⋅(-x - 2⋅√2 + 5) - (-y + 5) \
""")
def test_pretty_ordering():
assert pretty(x**2 + x + 1, order='lex') == \
"""\
2 \n\
x + x + 1\
"""
assert pretty(x**2 + x + 1, order='rev-lex') == \
"""\
2\n\
1 + x + x \
"""
assert pretty(1 - x, order='lex') == '-x + 1'
assert pretty(1 - x, order='rev-lex') == '1 - x'
assert pretty(1 - 2*x, order='lex') == '-2*x + 1'
assert pretty(1 - 2*x, order='rev-lex') == '1 - 2*x'
f = 2*x**4 + y**2 - x**2 + y**3
assert pretty(f, order=None) == \
"""\
4 2 3 2\n\
2*x - x + y + y \
"""
assert pretty(f, order='lex') == \
"""\
4 2 3 2\n\
2*x - x + y + y \
"""
assert pretty(f, order='rev-lex') == \
"""\
2 3 2 4\n\
y + y - x + 2*x \
"""
expr = x - x**3/6 + x**5/120 + O(x**6)
ascii_str = \
"""\
3 5 \n\
x x / 6\\\n\
x - -- + --- + O\\x /\n\
6 120 \
"""
ucode_str = \
u("""\
3 5 \n\
x x ⎛ 6⎞\n\
x - ── + ─── + O⎝x ⎠\n\
6 120 \
""")
assert pretty(expr, order=None) == ascii_str
assert upretty(expr, order=None) == ucode_str
assert pretty(expr, order='lex') == ascii_str
assert upretty(expr, order='lex') == ucode_str
assert pretty(expr, order='rev-lex') == ascii_str
assert upretty(expr, order='rev-lex') == ucode_str
def test_EulerGamma():
assert pretty(EulerGamma) == str(EulerGamma) == "EulerGamma"
assert upretty(EulerGamma) == u"γ"
def test_GoldenRatio():
assert pretty(GoldenRatio) == str(GoldenRatio) == "GoldenRatio"
assert upretty(GoldenRatio) == u"φ"
def test_pretty_relational():
expr = Eq(x, y)
ascii_str = \
"""\
x = y\
"""
ucode_str = \
u("""\
x = y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Lt(x, y)
ascii_str = \
"""\
x < y\
"""
ucode_str = \
u("""\
x < y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Gt(x, y)
ascii_str = \
"""\
x > y\
"""
ucode_str = \
u("""\
x > y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Le(x, y)
ascii_str = \
"""\
x <= y\
"""
ucode_str = \
u("""\
x ≤ y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Ge(x, y)
ascii_str = \
"""\
x >= y\
"""
ucode_str = \
u("""\
x ≥ y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Ne(x/(y + 1), y**2)
ascii_str_1 = \
"""\
x 2\n\
----- != y \n\
1 + y \
"""
ascii_str_2 = \
"""\
x 2\n\
----- != y \n\
y + 1 \
"""
ucode_str_1 = \
u("""\
x 2\n\
───── ≠ y \n\
1 + y \
""")
ucode_str_2 = \
u("""\
x 2\n\
───── ≠ y \n\
y + 1 \
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
def test_Assignment():
expr = Assignment(x, y)
ascii_str = \
"""\
x := y\
"""
ucode_str = \
u("""\
x := y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_AugmentedAssignment():
expr = AddAugmentedAssignment(x, y)
ascii_str = \
"""\
x += y\
"""
ucode_str = \
u("""\
x += y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = SubAugmentedAssignment(x, y)
ascii_str = \
"""\
x -= y\
"""
ucode_str = \
u("""\
x -= y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = MulAugmentedAssignment(x, y)
ascii_str = \
"""\
x *= y\
"""
ucode_str = \
u("""\
x *= y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = DivAugmentedAssignment(x, y)
ascii_str = \
"""\
x /= y\
"""
ucode_str = \
u("""\
x /= y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = ModAugmentedAssignment(x, y)
ascii_str = \
"""\
x %= y\
"""
ucode_str = \
u("""\
x %= y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_issue_7117():
# See also issue #5031 (hence the evaluate=False in these).
e = Eq(x + 1, x/2)
q = Mul(2, e, evaluate=False)
assert upretty(q) == u("""\
⎛ x⎞\n\
2⋅⎜x + 1 = ─⎟\n\
⎝ 2⎠\
""")
q = Add(e, 6, evaluate=False)
assert upretty(q) == u("""\
⎛ x⎞\n\
6 + ⎜x + 1 = ─⎟\n\
⎝ 2⎠\
""")
q = Pow(e, 2, evaluate=False)
assert upretty(q) == u("""\
2\n\
⎛ x⎞ \n\
⎜x + 1 = ─⎟ \n\
⎝ 2⎠ \
""")
e2 = Eq(x, 2)
q = Mul(e, e2, evaluate=False)
assert upretty(q) == u("""\
⎛ x⎞ \n\
⎜x + 1 = ─⎟⋅(x = 2)\n\
⎝ 2⎠ \
""")
def test_pretty_rational():
expr = y*x**-2
ascii_str = \
"""\
y \n\
--\n\
2\n\
x \
"""
ucode_str = \
u("""\
y \n\
──\n\
2\n\
x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = y**Rational(3, 2) * x**Rational(-5, 2)
ascii_str = \
"""\
3/2\n\
y \n\
----\n\
5/2\n\
x \
"""
ucode_str = \
u("""\
3/2\n\
y \n\
────\n\
5/2\n\
x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = sin(x)**3/tan(x)**2
ascii_str = \
"""\
3 \n\
sin (x)\n\
-------\n\
2 \n\
tan (x)\
"""
ucode_str = \
u("""\
3 \n\
sin (x)\n\
───────\n\
2 \n\
tan (x)\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_functions():
"""Tests for Abs, conjugate, exp, function braces, and factorial."""
expr = (2*x + exp(x))
ascii_str_1 = \
"""\
x\n\
2*x + e \
"""
ascii_str_2 = \
"""\
x \n\
e + 2*x\
"""
ucode_str_1 = \
u("""\
x\n\
2⋅x + ℯ \
""")
ucode_str_2 = \
u("""\
x \n\
ℯ + 2⋅x\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = Abs(x)
ascii_str = \
"""\
|x|\
"""
ucode_str = \
u("""\
│x│\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Abs(x/(x**2 + 1))
ascii_str_1 = \
"""\
| x |\n\
|------|\n\
| 2|\n\
|1 + x |\
"""
ascii_str_2 = \
"""\
| x |\n\
|------|\n\
| 2 |\n\
|x + 1|\
"""
ucode_str_1 = \
u("""\
│ x │\n\
│──────│\n\
│ 2│\n\
│1 + x │\
""")
ucode_str_2 = \
u("""\
│ x │\n\
│──────│\n\
│ 2 │\n\
│x + 1│\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = Abs(1 / (y - Abs(x)))
ascii_str = \
"""\
| 1 |\n\
|-------|\n\
|y - |x||\
"""
ucode_str = \
u("""\
│ 1 │\n\
│───────│\n\
│y - │x││\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
n = Symbol('n', integer=True)
expr = factorial(n)
ascii_str = \
"""\
n!\
"""
ucode_str = \
u("""\
n!\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial(2*n)
ascii_str = \
"""\
(2*n)!\
"""
ucode_str = \
u("""\
(2⋅n)!\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial(factorial(factorial(n)))
ascii_str = \
"""\
((n!)!)!\
"""
ucode_str = \
u("""\
((n!)!)!\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial(n + 1)
ascii_str_1 = \
"""\
(1 + n)!\
"""
ascii_str_2 = \
"""\
(n + 1)!\
"""
ucode_str_1 = \
u("""\
(1 + n)!\
""")
ucode_str_2 = \
u("""\
(n + 1)!\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = subfactorial(n)
ascii_str = \
"""\
!n\
"""
ucode_str = \
u("""\
!n\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = subfactorial(2*n)
ascii_str = \
"""\
!(2*n)\
"""
ucode_str = \
u("""\
!(2⋅n)\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
n = Symbol('n', integer=True)
expr = factorial2(n)
ascii_str = \
"""\
n!!\
"""
ucode_str = \
u("""\
n!!\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial2(2*n)
ascii_str = \
"""\
(2*n)!!\
"""
ucode_str = \
u("""\
(2⋅n)!!\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial2(factorial2(factorial2(n)))
ascii_str = \
"""\
((n!!)!!)!!\
"""
ucode_str = \
u("""\
((n!!)!!)!!\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = factorial2(n + 1)
ascii_str_1 = \
"""\
(1 + n)!!\
"""
ascii_str_2 = \
"""\
(n + 1)!!\
"""
ucode_str_1 = \
u("""\
(1 + n)!!\
""")
ucode_str_2 = \
u("""\
(n + 1)!!\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = 2*binomial(n, k)
ascii_str = \
"""\
/n\\\n\
2*| |\n\
\\k/\
"""
ucode_str = \
u("""\
⎛n⎞\n\
2⋅⎜ ⎟\n\
⎝k⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2*binomial(2*n, k)
ascii_str = \
"""\
/2*n\\\n\
2*| |\n\
\\ k /\
"""
ucode_str = \
u("""\
⎛2⋅n⎞\n\
2⋅⎜ ⎟\n\
⎝ k ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2*binomial(n**2, k)
ascii_str = \
"""\
/ 2\\\n\
|n |\n\
2*| |\n\
\\k /\
"""
ucode_str = \
u("""\
⎛ 2⎞\n\
⎜n ⎟\n\
2⋅⎜ ⎟\n\
⎝k ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = catalan(n)
ascii_str = \
"""\
C \n\
n\
"""
ucode_str = \
u("""\
C \n\
n\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = conjugate(x)
ascii_str = \
"""\
_\n\
x\
"""
ucode_str = \
u("""\
_\n\
x\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
f = Function('f')
expr = conjugate(f(x + 1))
ascii_str_1 = \
"""\
________\n\
f(1 + x)\
"""
ascii_str_2 = \
"""\
________\n\
f(x + 1)\
"""
ucode_str_1 = \
u("""\
________\n\
f(1 + x)\
""")
ucode_str_2 = \
u("""\
________\n\
f(x + 1)\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = f(x)
ascii_str = \
"""\
f(x)\
"""
ucode_str = \
u("""\
f(x)\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = f(x, y)
ascii_str = \
"""\
f(x, y)\
"""
ucode_str = \
u("""\
f(x, y)\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = f(x/(y + 1), y)
ascii_str_1 = \
"""\
/ x \\\n\
f|-----, y|\n\
\\1 + y /\
"""
ascii_str_2 = \
"""\
/ x \\\n\
f|-----, y|\n\
\\y + 1 /\
"""
ucode_str_1 = \
u("""\
⎛ x ⎞\n\
f⎜─────, y⎟\n\
⎝1 + y ⎠\
""")
ucode_str_2 = \
u("""\
⎛ x ⎞\n\
f⎜─────, y⎟\n\
⎝y + 1 ⎠\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = f(x**x**x**x**x**x)
ascii_str = \
"""\
/ / / / / x\\\\\\\\\\
| | | | \\x /||||
| | | \\x /|||
| | \\x /||
| \\x /|
f\\x /\
"""
ucode_str = \
u("""\
⎛ ⎛ ⎛ ⎛ ⎛ x⎞⎞⎞⎞⎞
⎜ ⎜ ⎜ ⎜ ⎝x ⎠⎟⎟⎟⎟
⎜ ⎜ ⎜ ⎝x ⎠⎟⎟⎟
⎜ ⎜ ⎝x ⎠⎟⎟
⎜ ⎝x ⎠⎟
f⎝x ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = sin(x)**2
ascii_str = \
"""\
2 \n\
sin (x)\
"""
ucode_str = \
u("""\
2 \n\
sin (x)\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = conjugate(a + b*I)
ascii_str = \
"""\
_ _\n\
a - I*b\
"""
ucode_str = \
u("""\
_ _\n\
a - ⅈ⋅b\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = conjugate(exp(a + b*I))
ascii_str = \
"""\
_ _\n\
a - I*b\n\
e \
"""
ucode_str = \
u("""\
_ _\n\
a - ⅈ⋅b\n\
ℯ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = conjugate( f(1 + conjugate(f(x))) )
ascii_str_1 = \
"""\
___________\n\
/ ____\\\n\
f\\1 + f(x)/\
"""
ascii_str_2 = \
"""\
___________\n\
/____ \\\n\
f\\f(x) + 1/\
"""
ucode_str_1 = \
u("""\
___________\n\
⎛ ____⎞\n\
f⎝1 + f(x)⎠\
""")
ucode_str_2 = \
u("""\
___________\n\
⎛____ ⎞\n\
f⎝f(x) + 1⎠\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = f(x/(y + 1), y)
ascii_str_1 = \
"""\
/ x \\\n\
f|-----, y|\n\
\\1 + y /\
"""
ascii_str_2 = \
"""\
/ x \\\n\
f|-----, y|\n\
\\y + 1 /\
"""
ucode_str_1 = \
u("""\
⎛ x ⎞\n\
f⎜─────, y⎟\n\
⎝1 + y ⎠\
""")
ucode_str_2 = \
u("""\
⎛ x ⎞\n\
f⎜─────, y⎟\n\
⎝y + 1 ⎠\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = floor(1 / (y - floor(x)))
ascii_str = \
"""\
/ 1 \\\n\
floor|------------|\n\
\\y - floor(x)/\
"""
ucode_str = \
u("""\
⎢ 1 ⎥\n\
⎢───────⎥\n\
⎣y - ⌊x⌋⎦\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = ceiling(1 / (y - ceiling(x)))
ascii_str = \
"""\
/ 1 \\\n\
ceiling|--------------|\n\
\\y - ceiling(x)/\
"""
ucode_str = \
u("""\
⎡ 1 ⎤\n\
⎢───────⎥\n\
⎢y - ⌈x⌉⎥\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = euler(n)
ascii_str = \
"""\
E \n\
n\
"""
ucode_str = \
u("""\
E \n\
n\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = euler(1/(1 + 1/(1 + 1/n)))
ascii_str = \
"""\
E \n\
1 \n\
---------\n\
1 \n\
1 + -----\n\
1\n\
1 + -\n\
n\
"""
ucode_str = \
u("""\
E \n\
1 \n\
─────────\n\
1 \n\
1 + ─────\n\
1\n\
1 + ─\n\
n\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_sqrt():
expr = sqrt(2)
ascii_str = \
"""\
___\n\
\\/ 2 \
"""
ucode_str = \
u"√2"
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2**Rational(1, 3)
ascii_str = \
"""\
3 ___\n\
\\/ 2 \
"""
ucode_str = \
u("""\
3 ___\n\
╲╱ 2 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2**Rational(1, 1000)
ascii_str = \
"""\
1000___\n\
\\/ 2 \
"""
ucode_str = \
u("""\
1000___\n\
╲╱ 2 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = sqrt(x**2 + 1)
ascii_str = \
"""\
________\n\
/ 2 \n\
\\/ x + 1 \
"""
ucode_str = \
u("""\
________\n\
╱ 2 \n\
╲╱ x + 1 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (1 + sqrt(5))**Rational(1, 3)
ascii_str = \
"""\
___________\n\
3 / ___ \n\
\\/ 1 + \\/ 5 \
"""
ucode_str = \
u("""\
3 ________\n\
╲╱ 1 + √5 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2**(1/x)
ascii_str = \
"""\
x ___\n\
\\/ 2 \
"""
ucode_str = \
u("""\
x ___\n\
╲╱ 2 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = sqrt(2 + pi)
ascii_str = \
"""\
________\n\
\\/ 2 + pi \
"""
ucode_str = \
u("""\
_______\n\
╲╱ 2 + π \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (2 + (
1 + x**2)/(2 + x))**Rational(1, 4) + (1 + x**Rational(1, 1000))/sqrt(3 + x**2)
ascii_str = \
"""\
____________ \n\
/ 2 1000___ \n\
/ x + 1 \\/ x + 1\n\
4 / 2 + ------ + -----------\n\
\\/ x + 2 ________\n\
/ 2 \n\
\\/ x + 3 \
"""
ucode_str = \
u("""\
____________ \n\
╱ 2 1000___ \n\
╱ x + 1 ╲╱ x + 1\n\
4 ╱ 2 + ────── + ───────────\n\
╲╱ x + 2 ________\n\
╱ 2 \n\
╲╱ x + 3 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_sqrt_char_knob():
# See PR #9234.
expr = sqrt(2)
ucode_str1 = \
u("""\
___\n\
╲╱ 2 \
""")
ucode_str2 = \
u"√2"
assert xpretty(expr, use_unicode=True,
use_unicode_sqrt_char=False) == ucode_str1
assert xpretty(expr, use_unicode=True,
use_unicode_sqrt_char=True) == ucode_str2
def test_pretty_sqrt_longsymbol_no_sqrt_char():
# Do not use unicode sqrt char for long symbols (see PR #9234).
expr = sqrt(Symbol('C1'))
ucode_str = \
u("""\
____\n\
╲╱ C₁ \
""")
assert upretty(expr) == ucode_str
def test_pretty_KroneckerDelta():
x, y = symbols("x, y")
expr = KroneckerDelta(x, y)
ascii_str = \
"""\
d \n\
x,y\
"""
ucode_str = \
u("""\
δ \n\
x,y\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_product():
n, m, k, l = symbols('n m k l')
f = symbols('f', cls=Function)
expr = Product(f((n/3)**2), (n, k**2, l))
unicode_str = \
u("""\
l \n\
┬────────┬ \n\
│ │ ⎛ 2⎞\n\
│ │ ⎜n ⎟\n\
│ │ f⎜──⎟\n\
│ │ ⎝9 ⎠\n\
│ │ \n\
2 \n\
n = k """)
ascii_str = \
"""\
l \n\
__________ \n\
| | / 2\\\n\
| | |n |\n\
| | f|--|\n\
| | \\9 /\n\
| | \n\
2 \n\
n = k """
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
expr = Product(f((n/3)**2), (n, k**2, l), (l, 1, m))
unicode_str = \
u("""\
m l \n\
┬────────┬ ┬────────┬ \n\
│ │ │ │ ⎛ 2⎞\n\
│ │ │ │ ⎜n ⎟\n\
│ │ │ │ f⎜──⎟\n\
│ │ │ │ ⎝9 ⎠\n\
│ │ │ │ \n\
l = 1 2 \n\
n = k """)
ascii_str = \
"""\
m l \n\
__________ __________ \n\
| | | | / 2\\\n\
| | | | |n |\n\
| | | | f|--|\n\
| | | | \\9 /\n\
| | | | \n\
l = 1 2 \n\
n = k """
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
def test_pretty_lambda():
# S.IdentityFunction is a special case
expr = Lambda(y, y)
assert pretty(expr) == "x -> x"
assert upretty(expr) == u"x ↦ x"
expr = Lambda(x, x+1)
assert pretty(expr) == "x -> x + 1"
assert upretty(expr) == u"x ↦ x + 1"
expr = Lambda(x, x**2)
ascii_str = \
"""\
2\n\
x -> x \
"""
ucode_str = \
u("""\
2\n\
x ↦ x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Lambda(x, x**2)**2
ascii_str = \
"""\
2
/ 2\\ \n\
\\x -> x / \
"""
ucode_str = \
u("""\
2
⎛ 2⎞ \n\
⎝x ↦ x ⎠ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Lambda((x, y), x)
ascii_str = "(x, y) -> x"
ucode_str = u"(x, y) ↦ x"
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Lambda((x, y), x**2)
ascii_str = \
"""\
2\n\
(x, y) -> x \
"""
ucode_str = \
u("""\
2\n\
(x, y) ↦ x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_order():
expr = O(1)
ascii_str = \
"""\
O(1)\
"""
ucode_str = \
u("""\
O(1)\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = O(1/x)
ascii_str = \
"""\
/1\\\n\
O|-|\n\
\\x/\
"""
ucode_str = \
u("""\
⎛1⎞\n\
O⎜─⎟\n\
⎝x⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = O(x**2 + y**2)
ascii_str = \
"""\
/ 2 2 \\\n\
O\\x + y ; (x, y) -> (0, 0)/\
"""
ucode_str = \
u("""\
⎛ 2 2 ⎞\n\
O⎝x + y ; (x, y) → (0, 0)⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = O(1, (x, oo))
ascii_str = \
"""\
O(1; x -> oo)\
"""
ucode_str = \
u("""\
O(1; x → ∞)\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = O(1/x, (x, oo))
ascii_str = \
"""\
/1 \\\n\
O|-; x -> oo|\n\
\\x /\
"""
ucode_str = \
u("""\
⎛1 ⎞\n\
O⎜─; x → ∞⎟\n\
⎝x ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = O(x**2 + y**2, (x, oo), (y, oo))
ascii_str = \
"""\
/ 2 2 \\\n\
O\\x + y ; (x, y) -> (oo, oo)/\
"""
ucode_str = \
u("""\
⎛ 2 2 ⎞\n\
O⎝x + y ; (x, y) → (∞, ∞)⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_derivatives():
# Simple
expr = Derivative(log(x), x, evaluate=False)
ascii_str = \
"""\
d \n\
--(log(x))\n\
dx \
"""
ucode_str = \
u("""\
d \n\
──(log(x))\n\
dx \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Derivative(log(x), x, evaluate=False) + x
ascii_str_1 = \
"""\
d \n\
x + --(log(x))\n\
dx \
"""
ascii_str_2 = \
"""\
d \n\
--(log(x)) + x\n\
dx \
"""
ucode_str_1 = \
u("""\
d \n\
x + ──(log(x))\n\
dx \
""")
ucode_str_2 = \
u("""\
d \n\
──(log(x)) + x\n\
dx \
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
# basic partial derivatives
expr = Derivative(log(x + y) + x, x)
ascii_str_1 = \
"""\
d \n\
--(log(x + y) + x)\n\
dx \
"""
ascii_str_2 = \
"""\
d \n\
--(x + log(x + y))\n\
dx \
"""
ucode_str_1 = \
u("""\
∂ \n\
──(log(x + y) + x)\n\
∂x \
""")
ucode_str_2 = \
u("""\
∂ \n\
──(x + log(x + y))\n\
∂x \
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2], upretty(expr)
# Multiple symbols
expr = Derivative(log(x) + x**2, x, y)
ascii_str_1 = \
"""\
2 \n\
d / 2\\\n\
-----\\log(x) + x /\n\
dy dx \
"""
ascii_str_2 = \
"""\
2 \n\
d / 2 \\\n\
-----\\x + log(x)/\n\
dy dx \
"""
ucode_str_1 = \
u("""\
2 \n\
d ⎛ 2⎞\n\
─────⎝log(x) + x ⎠\n\
dy dx \
""")
ucode_str_2 = \
u("""\
2 \n\
d ⎛ 2 ⎞\n\
─────⎝x + log(x)⎠\n\
dy dx \
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = Derivative(2*x*y, y, x) + x**2
ascii_str_1 = \
"""\
2 \n\
d 2\n\
-----(2*x*y) + x \n\
dx dy \
"""
ascii_str_2 = \
"""\
2 \n\
2 d \n\
x + -----(2*x*y)\n\
dx dy \
"""
ucode_str_1 = \
u("""\
2 \n\
∂ 2\n\
─────(2⋅x⋅y) + x \n\
∂x ∂y \
""")
ucode_str_2 = \
u("""\
2 \n\
2 ∂ \n\
x + ─────(2⋅x⋅y)\n\
∂x ∂y \
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = Derivative(2*x*y, x, x)
ascii_str = \
"""\
2 \n\
d \n\
---(2*x*y)\n\
2 \n\
dx \
"""
ucode_str = \
u("""\
2 \n\
∂ \n\
───(2⋅x⋅y)\n\
2 \n\
∂x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Derivative(2*x*y, x, 17)
ascii_str = \
"""\
17 \n\
d \n\
----(2*x*y)\n\
17 \n\
dx \
"""
ucode_str = \
u("""\
17 \n\
∂ \n\
────(2⋅x⋅y)\n\
17 \n\
∂x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Derivative(2*x*y, x, x, y)
ascii_str = \
"""\
3 \n\
d \n\
------(2*x*y)\n\
2 \n\
dy dx \
"""
ucode_str = \
u("""\
3 \n\
∂ \n\
──────(2⋅x⋅y)\n\
2 \n\
∂y ∂x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
# Greek letters
alpha = Symbol('alpha')
beta = Function('beta')
expr = beta(alpha).diff(alpha)
ascii_str = \
"""\
d \n\
------(beta(alpha))\n\
dalpha \
"""
ucode_str = \
u("""\
d \n\
──(β(α))\n\
dα \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_integrals():
expr = Integral(log(x), x)
ascii_str = \
"""\
/ \n\
| \n\
| log(x) dx\n\
| \n\
/ \
"""
ucode_str = \
u("""\
⌠ \n\
⎮ log(x) dx\n\
⌡ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(x**2, x)
ascii_str = \
"""\
/ \n\
| \n\
| 2 \n\
| x dx\n\
| \n\
/ \
"""
ucode_str = \
u("""\
⌠ \n\
⎮ 2 \n\
⎮ x dx\n\
⌡ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral((sin(x))**2 / (tan(x))**2)
ascii_str = \
"""\
/ \n\
| \n\
| 2 \n\
| sin (x) \n\
| ------- dx\n\
| 2 \n\
| tan (x) \n\
| \n\
/ \
"""
ucode_str = \
u("""\
⌠ \n\
⎮ 2 \n\
⎮ sin (x) \n\
⎮ ─────── dx\n\
⎮ 2 \n\
⎮ tan (x) \n\
⌡ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(x**(2**x), x)
ascii_str = \
"""\
/ \n\
| \n\
| / x\\ \n\
| \\2 / \n\
| x dx\n\
| \n\
/ \
"""
ucode_str = \
u("""\
⌠ \n\
⎮ ⎛ x⎞ \n\
⎮ ⎝2 ⎠ \n\
⎮ x dx\n\
⌡ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(x**2, (x, 1, 2))
ascii_str = \
"""\
2 \n\
/ \n\
| \n\
| 2 \n\
| x dx\n\
| \n\
/ \n\
1 \
"""
ucode_str = \
u("""\
2 \n\
⌠ \n\
⎮ 2 \n\
⎮ x dx\n\
⌡ \n\
1 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(x**2, (x, Rational(1, 2), 10))
ascii_str = \
"""\
10 \n\
/ \n\
| \n\
| 2 \n\
| x dx\n\
| \n\
/ \n\
1/2 \
"""
ucode_str = \
u("""\
10 \n\
⌠ \n\
⎮ 2 \n\
⎮ x dx\n\
⌡ \n\
1/2 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(x**2*y**2, x, y)
ascii_str = \
"""\
/ / \n\
| | \n\
| | 2 2 \n\
| | x *y dx dy\n\
| | \n\
/ / \
"""
ucode_str = \
u("""\
⌠ ⌠ \n\
⎮ ⎮ 2 2 \n\
⎮ ⎮ x ⋅y dx dy\n\
⌡ ⌡ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(sin(th)/cos(ph), (th, 0, pi), (ph, 0, 2*pi))
ascii_str = \
"""\
2*pi pi \n\
/ / \n\
| | \n\
| | sin(theta) \n\
| | ---------- d(theta) d(phi)\n\
| | cos(phi) \n\
| | \n\
/ / \n\
0 0 \
"""
ucode_str = \
u("""\
2⋅π π \n\
⌠ ⌠ \n\
⎮ ⎮ sin(θ) \n\
⎮ ⎮ ────── dθ dφ\n\
⎮ ⎮ cos(φ) \n\
⌡ ⌡ \n\
0 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_matrix():
# Empty Matrix
expr = Matrix()
ascii_str = "[]"
unicode_str = "[]"
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
expr = Matrix(2, 0, lambda i, j: 0)
ascii_str = "[]"
unicode_str = "[]"
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
expr = Matrix(0, 2, lambda i, j: 0)
ascii_str = "[]"
unicode_str = "[]"
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
expr = Matrix([[x**2 + 1, 1], [y, x + y]])
ascii_str_1 = \
"""\
[ 2 ]
[1 + x 1 ]
[ ]
[ y x + y]\
"""
ascii_str_2 = \
"""\
[ 2 ]
[x + 1 1 ]
[ ]
[ y x + y]\
"""
ucode_str_1 = \
u("""\
⎡ 2 ⎤
⎢1 + x 1 ⎥
⎢ ⎥
⎣ y x + y⎦\
""")
ucode_str_2 = \
u("""\
⎡ 2 ⎤
⎢x + 1 1 ⎥
⎢ ⎥
⎣ y x + y⎦\
""")
assert pretty(expr) in [ascii_str_1, ascii_str_2]
assert upretty(expr) in [ucode_str_1, ucode_str_2]
expr = Matrix([[x/y, y, th], [0, exp(I*k*ph), 1]])
ascii_str = \
"""\
[x ]
[- y theta]
[y ]
[ ]
[ I*k*phi ]
[0 e 1 ]\
"""
ucode_str = \
u("""\
⎡x ⎤
⎢─ y θ⎥
⎢y ⎥
⎢ ⎥
⎢ ⅈ⋅k⋅φ ⎥
⎣0 ℯ 1⎦\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_ndim_arrays():
x, y, z, w = symbols("x y z w")
for ArrayType in (ImmutableDenseNDimArray, ImmutableSparseNDimArray, MutableDenseNDimArray, MutableSparseNDimArray):
M = ArrayType([[1/x, y], [z, w]])
M1 = ArrayType([1/x, y, z])
M2 = tensorproduct(M1, M)
M3 = tensorproduct(M, M)
ascii_str = \
"""\
[1 ]\n\
[- y]\n\
[x ]\n\
[ ]\n\
[z w]\
"""
ucode_str = \
u("""\
⎡1 ⎤\n\
⎢─ y⎥\n\
⎢x ⎥\n\
⎢ ⎥\n\
⎣z w⎦\
""")
assert pretty(M) == ascii_str
assert upretty(M) == ucode_str
ascii_str = \
"""\
[1 ]\n\
[- y z]\n\
[x ]\
"""
ucode_str = \
u("""\
⎡1 ⎤\n\
⎢─ y z⎥\n\
⎣x ⎦\
""")
assert pretty(M1) == ascii_str
assert upretty(M1) == ucode_str
ascii_str = \
"""\
[[1 y] ]\n\
[[-- -] [z ]]\n\
[[ 2 x] [ y 2 ] [- y*z]]\n\
[[x ] [ - y ] [x ]]\n\
[[ ] [ x ] [ ]]\n\
[[z w] [ ] [ 2 ]]\n\
[[- -] [y*z w*y] [z w*z]]\n\
[[x x] ]\
"""
ucode_str = \
u("""\
⎡⎡1 y⎤ ⎤\n\
⎢⎢── ─⎥ ⎡z ⎤⎥\n\
⎢⎢ 2 x⎥ ⎡ y 2 ⎤ ⎢─ y⋅z⎥⎥\n\
⎢⎢x ⎥ ⎢ ─ y ⎥ ⎢x ⎥⎥\n\
⎢⎢ ⎥ ⎢ x ⎥ ⎢ ⎥⎥\n\
⎢⎢z w⎥ ⎢ ⎥ ⎢ 2 ⎥⎥\n\
⎢⎢─ ─⎥ ⎣y⋅z w⋅y⎦ ⎣z w⋅z⎦⎥\n\
⎣⎣x x⎦ ⎦\
""")
assert pretty(M2) == ascii_str
assert upretty(M2) == ucode_str
ascii_str = \
"""\
[ [1 y] ]\n\
[ [-- -] ]\n\
[ [ 2 x] [ y 2 ]]\n\
[ [x ] [ - y ]]\n\
[ [ ] [ x ]]\n\
[ [z w] [ ]]\n\
[ [- -] [y*z w*y]]\n\
[ [x x] ]\n\
[ ]\n\
[[z ] [ w ]]\n\
[[- y*z] [ - w*y]]\n\
[[x ] [ x ]]\n\
[[ ] [ ]]\n\
[[ 2 ] [ 2 ]]\n\
[[z w*z] [w*z w ]]\
"""
ucode_str = \
u("""\
⎡ ⎡1 y⎤ ⎤\n\
⎢ ⎢── ─⎥ ⎥\n\
⎢ ⎢ 2 x⎥ ⎡ y 2 ⎤⎥\n\
⎢ ⎢x ⎥ ⎢ ─ y ⎥⎥\n\
⎢ ⎢ ⎥ ⎢ x ⎥⎥\n\
⎢ ⎢z w⎥ ⎢ ⎥⎥\n\
⎢ ⎢─ ─⎥ ⎣y⋅z w⋅y⎦⎥\n\
⎢ ⎣x x⎦ ⎥\n\
⎢ ⎥\n\
⎢⎡z ⎤ ⎡ w ⎤⎥\n\
⎢⎢─ y⋅z⎥ ⎢ ─ w⋅y⎥⎥\n\
⎢⎢x ⎥ ⎢ x ⎥⎥\n\
⎢⎢ ⎥ ⎢ ⎥⎥\n\
⎢⎢ 2 ⎥ ⎢ 2 ⎥⎥\n\
⎣⎣z w⋅z⎦ ⎣w⋅z w ⎦⎦\
""")
assert pretty(M3) == ascii_str
assert upretty(M3) == ucode_str
Mrow = ArrayType([[x, y, 1 / z]])
Mcolumn = ArrayType([[x], [y], [1 / z]])
Mcol2 = ArrayType([Mcolumn.tolist()])
ascii_str = \
"""\
[[ 1]]\n\
[[x y -]]\n\
[[ z]]\
"""
ucode_str = \
u("""\
⎡⎡ 1⎤⎤\n\
⎢⎢x y ─⎥⎥\n\
⎣⎣ z⎦⎦\
""")
assert pretty(Mrow) == ascii_str
assert upretty(Mrow) == ucode_str
ascii_str = \
"""\
[x]\n\
[ ]\n\
[y]\n\
[ ]\n\
[1]\n\
[-]\n\
[z]\
"""
ucode_str = \
u("""\
⎡x⎤\n\
⎢ ⎥\n\
⎢y⎥\n\
⎢ ⎥\n\
⎢1⎥\n\
⎢─⎥\n\
⎣z⎦\
""")
assert pretty(Mcolumn) == ascii_str
assert upretty(Mcolumn) == ucode_str
ascii_str = \
"""\
[[x]]\n\
[[ ]]\n\
[[y]]\n\
[[ ]]\n\
[[1]]\n\
[[-]]\n\
[[z]]\
"""
ucode_str = \
u("""\
⎡⎡x⎤⎤\n\
⎢⎢ ⎥⎥\n\
⎢⎢y⎥⎥\n\
⎢⎢ ⎥⎥\n\
⎢⎢1⎥⎥\n\
⎢⎢─⎥⎥\n\
⎣⎣z⎦⎦\
""")
assert pretty(Mcol2) == ascii_str
assert upretty(Mcol2) == ucode_str
def test_Adjoint():
X = MatrixSymbol('X', 2, 2)
Y = MatrixSymbol('Y', 2, 2)
assert pretty(Adjoint(X)) == " +\nX "
assert pretty(Adjoint(X + Y)) == " +\n(X + Y) "
assert pretty(Adjoint(X) + Adjoint(Y)) == " + +\nX + Y "
assert pretty(Adjoint(X*Y)) == " +\n(X*Y) "
assert pretty(Adjoint(Y)*Adjoint(X)) == " + +\nY *X "
assert pretty(Adjoint(X**2)) == " +\n/ 2\\ \n\\X / "
assert pretty(Adjoint(X)**2) == " 2\n/ +\\ \n\\X / "
assert pretty(Adjoint(Inverse(X))) == " +\n/ -1\\ \n\\X / "
assert pretty(Inverse(Adjoint(X))) == " -1\n/ +\\ \n\\X / "
assert pretty(Adjoint(Transpose(X))) == " +\n/ T\\ \n\\X / "
assert pretty(Transpose(Adjoint(X))) == " T\n/ +\\ \n\\X / "
assert upretty(Adjoint(X)) == u" †\nX "
assert upretty(Adjoint(X + Y)) == u" †\n(X + Y) "
assert upretty(Adjoint(X) + Adjoint(Y)) == u" † †\nX + Y "
assert upretty(Adjoint(X*Y)) == u" †\n(X⋅Y) "
assert upretty(Adjoint(Y)*Adjoint(X)) == u" † †\nY ⋅X "
assert upretty(Adjoint(X**2)) == \
u" †\n⎛ 2⎞ \n⎝X ⎠ "
assert upretty(Adjoint(X)**2) == \
u" 2\n⎛ †⎞ \n⎝X ⎠ "
assert upretty(Adjoint(Inverse(X))) == \
u" †\n⎛ -1⎞ \n⎝X ⎠ "
assert upretty(Inverse(Adjoint(X))) == \
u" -1\n⎛ †⎞ \n⎝X ⎠ "
assert upretty(Adjoint(Transpose(X))) == \
u" †\n⎛ T⎞ \n⎝X ⎠ "
assert upretty(Transpose(Adjoint(X))) == \
u" T\n⎛ †⎞ \n⎝X ⎠ "
def test_pretty_Trace_issue_9044():
X = Matrix([[1, 2], [3, 4]])
Y = Matrix([[2, 4], [6, 8]])
ascii_str_1 = \
"""\
/[1 2]\\
tr|[ ]|
\\[3 4]/\
"""
ucode_str_1 = \
u("""\
⎛⎡1 2⎤⎞
tr⎜⎢ ⎥⎟
⎝⎣3 4⎦⎠\
""")
ascii_str_2 = \
"""\
/[1 2]\\ /[2 4]\\
tr|[ ]| + tr|[ ]|
\\[3 4]/ \\[6 8]/\
"""
ucode_str_2 = \
u("""\
⎛⎡1 2⎤⎞ ⎛⎡2 4⎤⎞
tr⎜⎢ ⎥⎟ + tr⎜⎢ ⎥⎟
⎝⎣3 4⎦⎠ ⎝⎣6 8⎦⎠\
""")
assert pretty(Trace(X)) == ascii_str_1
assert upretty(Trace(X)) == ucode_str_1
assert pretty(Trace(X) + Trace(Y)) == ascii_str_2
assert upretty(Trace(X) + Trace(Y)) == ucode_str_2
def test_MatrixExpressions():
n = Symbol('n', integer=True)
X = MatrixSymbol('X', n, n)
assert pretty(X) == upretty(X) == "X"
Y = X[1:2:3, 4:5:6]
ascii_str = ucode_str = "X[1:3, 4:6]"
assert pretty(Y) == ascii_str
assert upretty(Y) == ucode_str
Z = X[1:10:2]
ascii_str = ucode_str = "X[1:10:2, :n]"
assert pretty(Z) == ascii_str
assert upretty(Z) == ucode_str
def test_pretty_dotproduct():
from sympy.matrices import Matrix, MatrixSymbol
from sympy.matrices.expressions.dotproduct import DotProduct
n = symbols("n", integer=True)
A = MatrixSymbol('A', n, 1)
B = MatrixSymbol('B', n, 1)
C = Matrix(1, 3, [1, 2, 3])
D = Matrix(1, 3, [1, 3, 4])
assert pretty(DotProduct(A, B)) == u"A*B"
assert pretty(DotProduct(C, D)) == u"[1 2 3]*[1 3 4]"
assert upretty(DotProduct(A, B)) == u"A⋅B"
assert upretty(DotProduct(C, D)) == u"[1 2 3]⋅[1 3 4]"
def test_pretty_piecewise():
expr = Piecewise((x, x < 1), (x**2, True))
ascii_str = \
"""\
/x for x < 1\n\
| \n\
< 2 \n\
|x otherwise\n\
\\ \
"""
ucode_str = \
u("""\
⎧x for x < 1\n\
⎪ \n\
⎨ 2 \n\
⎪x otherwise\n\
⎩ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -Piecewise((x, x < 1), (x**2, True))
ascii_str = \
"""\
//x for x < 1\\\n\
|| |\n\
-|< 2 |\n\
||x otherwise|\n\
\\\\ /\
"""
ucode_str = \
u("""\
⎛⎧x for x < 1⎞\n\
⎜⎪ ⎟\n\
-⎜⎨ 2 ⎟\n\
⎜⎪x otherwise⎟\n\
⎝⎩ ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = x + Piecewise((x, x > 0), (y, True)) + Piecewise((x/y, x < 2),
(y**2, x > 2), (1, True)) + 1
ascii_str = \
"""\
//x \\ \n\
||- for x < 2| \n\
||y | \n\
//x for x > 0\\ || | \n\
x + |< | + |< 2 | + 1\n\
\\\\y otherwise/ ||y for x > 2| \n\
|| | \n\
||1 otherwise| \n\
\\\\ / \
"""
ucode_str = \
u("""\
⎛⎧x ⎞ \n\
⎜⎪─ for x < 2⎟ \n\
⎜⎪y ⎟ \n\
⎛⎧x for x > 0⎞ ⎜⎪ ⎟ \n\
x + ⎜⎨ ⎟ + ⎜⎨ 2 ⎟ + 1\n\
⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟ \n\
⎜⎪ ⎟ \n\
⎜⎪1 otherwise⎟ \n\
⎝⎩ ⎠ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = x - Piecewise((x, x > 0), (y, True)) + Piecewise((x/y, x < 2),
(y**2, x > 2), (1, True)) + 1
ascii_str = \
"""\
//x \\ \n\
||- for x < 2| \n\
||y | \n\
//x for x > 0\\ || | \n\
x - |< | + |< 2 | + 1\n\
\\\\y otherwise/ ||y for x > 2| \n\
|| | \n\
||1 otherwise| \n\
\\\\ / \
"""
ucode_str = \
u("""\
⎛⎧x ⎞ \n\
⎜⎪─ for x < 2⎟ \n\
⎜⎪y ⎟ \n\
⎛⎧x for x > 0⎞ ⎜⎪ ⎟ \n\
x - ⎜⎨ ⎟ + ⎜⎨ 2 ⎟ + 1\n\
⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟ \n\
⎜⎪ ⎟ \n\
⎜⎪1 otherwise⎟ \n\
⎝⎩ ⎠ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = x*Piecewise((x, x > 0), (y, True))
ascii_str = \
"""\
//x for x > 0\\\n\
x*|< |\n\
\\\\y otherwise/\
"""
ucode_str = \
u("""\
⎛⎧x for x > 0⎞\n\
x⋅⎜⎨ ⎟\n\
⎝⎩y otherwise⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Piecewise((x, x > 0), (y, True))*Piecewise((x/y, x < 2), (y**2, x >
2), (1, True))
ascii_str = \
"""\
//x \\\n\
||- for x < 2|\n\
||y |\n\
//x for x > 0\\ || |\n\
|< |*|< 2 |\n\
\\\\y otherwise/ ||y for x > 2|\n\
|| |\n\
||1 otherwise|\n\
\\\\ /\
"""
ucode_str = \
u("""\
⎛⎧x ⎞\n\
⎜⎪─ for x < 2⎟\n\
⎜⎪y ⎟\n\
⎛⎧x for x > 0⎞ ⎜⎪ ⎟\n\
⎜⎨ ⎟⋅⎜⎨ 2 ⎟\n\
⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟\n\
⎜⎪ ⎟\n\
⎜⎪1 otherwise⎟\n\
⎝⎩ ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = -Piecewise((x, x > 0), (y, True))*Piecewise((x/y, x < 2), (y**2, x
> 2), (1, True))
ascii_str = \
"""\
//x \\\n\
||- for x < 2|\n\
||y |\n\
//x for x > 0\\ || |\n\
-|< |*|< 2 |\n\
\\\\y otherwise/ ||y for x > 2|\n\
|| |\n\
||1 otherwise|\n\
\\\\ /\
"""
ucode_str = \
u("""\
⎛⎧x ⎞\n\
⎜⎪─ for x < 2⎟\n\
⎜⎪y ⎟\n\
⎛⎧x for x > 0⎞ ⎜⎪ ⎟\n\
-⎜⎨ ⎟⋅⎜⎨ 2 ⎟\n\
⎝⎩y otherwise⎠ ⎜⎪y for x > 2⎟\n\
⎜⎪ ⎟\n\
⎜⎪1 otherwise⎟\n\
⎝⎩ ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Piecewise((0, Abs(1/y) < 1), (1, Abs(y) < 1), (y*meijerg(((2, 1),
()), ((), (1, 0)), 1/y), True))
ascii_str = \
"""\
/ |1| \n\
| 0 for |-| < 1\n\
| |y| \n\
| \n\
< 1 for |y| < 1\n\
| \n\
| __0, 2 /2, 1 | 1\\ \n\
|y*/__ | | -| otherwise \n\
\\ \\_|2, 2 \\ 1, 0 | y/ \
"""
ucode_str = \
u("""\
⎧ │1│ \n\
⎪ 0 for │─│ < 1\n\
⎪ │y│ \n\
⎪ \n\
⎨ 1 for │y│ < 1\n\
⎪ \n\
⎪ ╭─╮0, 2 ⎛2, 1 │ 1⎞ \n\
⎪y⋅│╶┐ ⎜ │ ─⎟ otherwise \n\
⎩ ╰─╯2, 2 ⎝ 1, 0 │ y⎠ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
# XXX: We have to use evaluate=False here because Piecewise._eval_power
# denests the power.
expr = Pow(Piecewise((x, x > 0), (y, True)), 2, evaluate=False)
ascii_str = \
"""\
2\n\
//x for x > 0\\ \n\
|< | \n\
\\\\y otherwise/ \
"""
ucode_str = \
u("""\
2\n\
⎛⎧x for x > 0⎞ \n\
⎜⎨ ⎟ \n\
⎝⎩y otherwise⎠ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_seq():
expr = ()
ascii_str = \
"""\
()\
"""
ucode_str = \
u("""\
()\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = []
ascii_str = \
"""\
[]\
"""
ucode_str = \
u("""\
[]\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = {}
expr_2 = {}
ascii_str = \
"""\
{}\
"""
ucode_str = \
u("""\
{}\
""")
assert pretty(expr) == ascii_str
assert pretty(expr_2) == ascii_str
assert upretty(expr) == ucode_str
assert upretty(expr_2) == ucode_str
expr = (1/x,)
ascii_str = \
"""\
1 \n\
(-,)\n\
x \
"""
ucode_str = \
u("""\
⎛1 ⎞\n\
⎜─,⎟\n\
⎝x ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = [x**2, 1/x, x, y, sin(th)**2/cos(ph)**2]
ascii_str = \
"""\
2 \n\
2 1 sin (theta) \n\
[x , -, x, y, -----------]\n\
x 2 \n\
cos (phi) \
"""
ucode_str = \
u("""\
⎡ 2 ⎤\n\
⎢ 2 1 sin (θ)⎥\n\
⎢x , ─, x, y, ───────⎥\n\
⎢ x 2 ⎥\n\
⎣ cos (φ)⎦\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (x**2, 1/x, x, y, sin(th)**2/cos(ph)**2)
ascii_str = \
"""\
2 \n\
2 1 sin (theta) \n\
(x , -, x, y, -----------)\n\
x 2 \n\
cos (phi) \
"""
ucode_str = \
u("""\
⎛ 2 ⎞\n\
⎜ 2 1 sin (θ)⎟\n\
⎜x , ─, x, y, ───────⎟\n\
⎜ x 2 ⎟\n\
⎝ cos (φ)⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Tuple(x**2, 1/x, x, y, sin(th)**2/cos(ph)**2)
ascii_str = \
"""\
2 \n\
2 1 sin (theta) \n\
(x , -, x, y, -----------)\n\
x 2 \n\
cos (phi) \
"""
ucode_str = \
u("""\
⎛ 2 ⎞\n\
⎜ 2 1 sin (θ)⎟\n\
⎜x , ─, x, y, ───────⎟\n\
⎜ x 2 ⎟\n\
⎝ cos (φ)⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = {x: sin(x)}
expr_2 = Dict({x: sin(x)})
ascii_str = \
"""\
{x: sin(x)}\
"""
ucode_str = \
u("""\
{x: sin(x)}\
""")
assert pretty(expr) == ascii_str
assert pretty(expr_2) == ascii_str
assert upretty(expr) == ucode_str
assert upretty(expr_2) == ucode_str
expr = {1/x: 1/y, x: sin(x)**2}
expr_2 = Dict({1/x: 1/y, x: sin(x)**2})
ascii_str = \
"""\
1 1 2 \n\
{-: -, x: sin (x)}\n\
x y \
"""
ucode_str = \
u("""\
⎧1 1 2 ⎫\n\
⎨─: ─, x: sin (x)⎬\n\
⎩x y ⎭\
""")
assert pretty(expr) == ascii_str
assert pretty(expr_2) == ascii_str
assert upretty(expr) == ucode_str
assert upretty(expr_2) == ucode_str
# There used to be a bug with pretty-printing sequences of even height.
expr = [x**2]
ascii_str = \
"""\
2 \n\
[x ]\
"""
ucode_str = \
u("""\
⎡ 2⎤\n\
⎣x ⎦\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = (x**2,)
ascii_str = \
"""\
2 \n\
(x ,)\
"""
ucode_str = \
u("""\
⎛ 2 ⎞\n\
⎝x ,⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Tuple(x**2)
ascii_str = \
"""\
2 \n\
(x ,)\
"""
ucode_str = \
u("""\
⎛ 2 ⎞\n\
⎝x ,⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = {x**2: 1}
expr_2 = Dict({x**2: 1})
ascii_str = \
"""\
2 \n\
{x : 1}\
"""
ucode_str = \
u("""\
⎧ 2 ⎫\n\
⎨x : 1⎬\n\
⎩ ⎭\
""")
assert pretty(expr) == ascii_str
assert pretty(expr_2) == ascii_str
assert upretty(expr) == ucode_str
assert upretty(expr_2) == ucode_str
def test_any_object_in_sequence():
# Cf. issue 5306
b1 = Basic()
b2 = Basic(Basic())
expr = [b2, b1]
assert pretty(expr) == "[Basic(Basic()), Basic()]"
assert upretty(expr) == u"[Basic(Basic()), Basic()]"
expr = {b2, b1}
assert pretty(expr) == "set([Basic(), Basic(Basic())])"
assert upretty(expr) == u"set([Basic(), Basic(Basic())])"
expr = {b2: b1, b1: b2}
expr2 = Dict({b2: b1, b1: b2})
assert pretty(expr) == "{Basic(): Basic(Basic()), Basic(Basic()): Basic()}"
assert pretty(
expr2) == "{Basic(): Basic(Basic()), Basic(Basic()): Basic()}"
assert upretty(
expr) == u"{Basic(): Basic(Basic()), Basic(Basic()): Basic()}"
assert upretty(
expr2) == u"{Basic(): Basic(Basic()), Basic(Basic()): Basic()}"
def test_pretty_sets():
s = FiniteSet
assert pretty(s(*[x*y, x**2])) == \
"""\
2 \n\
{x , x*y}\
"""
assert pretty(s(*range(1, 6))) == "{1, 2, 3, 4, 5}"
assert pretty(s(*range(1, 13))) == "{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}"
for s in (frozenset, set):
assert pretty(s([x*y, x**2])) == \
"""\
%s 2 \n\
%s([x , x*y])\
""" % (" " * len(s.__name__), s.__name__)
assert pretty(s(range(1, 6))) == "%s([1, 2, 3, 4, 5])" % s.__name__
assert pretty(s(range(1, 13))) == \
"%s([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])" % s.__name__
assert pretty(Range(0, 3, 1)) == '{0, 1, 2}'
ascii_str = '{0, 1, ..., 29}'
ucode_str = u'{0, 1, …, 29}'
assert pretty(Range(0, 30, 1)) == ascii_str
assert upretty(Range(0, 30, 1)) == ucode_str
ascii_str = '{30, 29, ..., 2}'
ucode_str = u('{30, 29, …, 2}')
assert pretty(Range(30, 1, -1)) == ascii_str
assert upretty(Range(30, 1, -1)) == ucode_str
ascii_str = '{0, 2, ..., oo}'
ucode_str = u'{0, 2, …, ∞}'
assert pretty(Range(0, oo, 2)) == ascii_str
assert upretty(Range(0, oo, 2)) == ucode_str
ascii_str = '{oo, ..., 2, 0}'
ucode_str = u('{∞, …, 2, 0}')
assert pretty(Range(oo, -2, -2)) == ascii_str
assert upretty(Range(oo, -2, -2)) == ucode_str
ascii_str = '{-2, -3, ..., -oo}'
ucode_str = u('{-2, -3, …, -∞}')
assert pretty(Range(-2, -oo, -1)) == ascii_str
assert upretty(Range(-2, -oo, -1)) == ucode_str
def test_pretty_ConditionSet():
from sympy import ConditionSet
ascii_str = '{x | x in (-oo, oo) and sin(x) = 0}'
ucode_str = u'{x | x ∊ ℝ ∧ sin(x) = 0}'
assert pretty(ConditionSet(x, Eq(sin(x), 0), S.Reals)) == ascii_str
assert upretty(ConditionSet(x, Eq(sin(x), 0), S.Reals)) == ucode_str
assert pretty(ConditionSet(x, Contains(x, S.Reals, evaluate=False), FiniteSet(1))) == '{1}'
assert upretty(ConditionSet(x, Contains(x, S.Reals, evaluate=False), FiniteSet(1))) == u'{1}'
assert pretty(ConditionSet(x, And(x > 1, x < -1), FiniteSet(1, 2, 3))) == "EmptySet()"
assert upretty(ConditionSet(x, And(x > 1, x < -1), FiniteSet(1, 2, 3))) == u"∅"
assert pretty(ConditionSet(x, Or(x > 1, x < -1), FiniteSet(1, 2))) == '{2}'
assert upretty(ConditionSet(x, Or(x > 1, x < -1), FiniteSet(1, 2))) == u'{2}'
def test_pretty_ComplexRegion():
from sympy import ComplexRegion
ucode_str = u'{x + y⋅ⅈ | x, y ∊ [3, 5] × [4, 6]}'
assert upretty(ComplexRegion(Interval(3, 5)*Interval(4, 6))) == ucode_str
ucode_str = u'{r⋅(ⅈ⋅sin(θ) + cos(θ)) | r, θ ∊ [0, 1] × [0, 2⋅π)}'
assert upretty(ComplexRegion(Interval(0, 1)*Interval(0, 2*pi), polar=True)) == ucode_str
def test_pretty_Union_issue_10414():
a, b = Interval(2, 3), Interval(4, 7)
ucode_str = u'[2, 3] ∪ [4, 7]'
ascii_str = '[2, 3] U [4, 7]'
assert upretty(Union(a, b)) == ucode_str
assert pretty(Union(a, b)) == ascii_str
def test_pretty_Intersection_issue_10414():
x, y, z, w = symbols('x, y, z, w')
a, b = Interval(x, y), Interval(z, w)
ucode_str = u'[x, y] ∩ [z, w]'
ascii_str = '[x, y] n [z, w]'
assert upretty(Intersection(a, b)) == ucode_str
assert pretty(Intersection(a, b)) == ascii_str
def test_ProductSet_paranthesis():
ucode_str = u'([4, 7] × {1, 2}) ∪ ([2, 3] × [4, 7])'
a, b, c = Interval(2, 3), Interval(4, 7), Interval(1, 9)
assert upretty(Union(a*b, b*FiniteSet(1, 2))) == ucode_str
def test_ProductSet_prod_char_issue_10413():
ascii_str = '[2, 3] x [4, 7]'
ucode_str = u'[2, 3] × [4, 7]'
a, b = Interval(2, 3), Interval(4, 7)
assert pretty(a*b) == ascii_str
assert upretty(a*b) == ucode_str
def test_pretty_sequences():
s1 = SeqFormula(a**2, (0, oo))
s2 = SeqPer((1, 2))
ascii_str = '[0, 1, 4, 9, ...]'
ucode_str = u'[0, 1, 4, 9, …]'
assert pretty(s1) == ascii_str
assert upretty(s1) == ucode_str
ascii_str = '[1, 2, 1, 2, ...]'
ucode_str = u'[1, 2, 1, 2, …]'
assert pretty(s2) == ascii_str
assert upretty(s2) == ucode_str
s3 = SeqFormula(a**2, (0, 2))
s4 = SeqPer((1, 2), (0, 2))
ascii_str = '[0, 1, 4]'
ucode_str = u'[0, 1, 4]'
assert pretty(s3) == ascii_str
assert upretty(s3) == ucode_str
ascii_str = '[1, 2, 1]'
ucode_str = u'[1, 2, 1]'
assert pretty(s4) == ascii_str
assert upretty(s4) == ucode_str
s5 = SeqFormula(a**2, (-oo, 0))
s6 = SeqPer((1, 2), (-oo, 0))
ascii_str = '[..., 9, 4, 1, 0]'
ucode_str = u'[…, 9, 4, 1, 0]'
assert pretty(s5) == ascii_str
assert upretty(s5) == ucode_str
ascii_str = '[..., 2, 1, 2, 1]'
ucode_str = u'[…, 2, 1, 2, 1]'
assert pretty(s6) == ascii_str
assert upretty(s6) == ucode_str
ascii_str = '[1, 3, 5, 11, ...]'
ucode_str = u'[1, 3, 5, 11, …]'
assert pretty(SeqAdd(s1, s2)) == ascii_str
assert upretty(SeqAdd(s1, s2)) == ucode_str
ascii_str = '[1, 3, 5]'
ucode_str = u'[1, 3, 5]'
assert pretty(SeqAdd(s3, s4)) == ascii_str
assert upretty(SeqAdd(s3, s4)) == ucode_str
ascii_str = '[..., 11, 5, 3, 1]'
ucode_str = u'[…, 11, 5, 3, 1]'
assert pretty(SeqAdd(s5, s6)) == ascii_str
assert upretty(SeqAdd(s5, s6)) == ucode_str
ascii_str = '[0, 2, 4, 18, ...]'
ucode_str = u'[0, 2, 4, 18, …]'
assert pretty(SeqMul(s1, s2)) == ascii_str
assert upretty(SeqMul(s1, s2)) == ucode_str
ascii_str = '[0, 2, 4]'
ucode_str = u'[0, 2, 4]'
assert pretty(SeqMul(s3, s4)) == ascii_str
assert upretty(SeqMul(s3, s4)) == ucode_str
ascii_str = '[..., 18, 4, 2, 0]'
ucode_str = u'[…, 18, 4, 2, 0]'
assert pretty(SeqMul(s5, s6)) == ascii_str
assert upretty(SeqMul(s5, s6)) == ucode_str
def test_pretty_FourierSeries():
f = fourier_series(x, (x, -pi, pi))
ascii_str = \
"""\
2*sin(3*x) \n\
2*sin(x) - sin(2*x) + ---------- + ...\n\
3 \
"""
ucode_str = \
u("""\
2⋅sin(3⋅x) \n\
2⋅sin(x) - sin(2⋅x) + ────────── + …\n\
3 \
""")
assert pretty(f) == ascii_str
assert upretty(f) == ucode_str
def test_pretty_FormalPowerSeries():
f = fps(log(1 + x))
ascii_str = \
"""\
oo \n\
____ \n\
\\ ` \n\
\\ -k k \n\
\\ -(-1) *x \n\
/ -----------\n\
/ k \n\
/___, \n\
k = 1 \
"""
ucode_str = \
u("""\
∞ \n\
____ \n\
╲ \n\
╲ -k k \n\
╲ -(-1) ⋅x \n\
╱ ───────────\n\
╱ k \n\
╱ \n\
‾‾‾‾ \n\
k = 1 \
""")
assert pretty(f) == ascii_str
assert upretty(f) == ucode_str
def test_pretty_limits():
expr = Limit(x, x, oo)
ascii_str = \
"""\
lim x\n\
x->oo \
"""
ucode_str = \
u("""\
lim x\n\
x─→∞ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(x**2, x, 0)
ascii_str = \
"""\
2\n\
lim x \n\
x->0+ \
"""
ucode_str = \
u("""\
2\n\
lim x \n\
x─→0⁺ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(1/x, x, 0)
ascii_str = \
"""\
1\n\
lim -\n\
x->0+x\
"""
ucode_str = \
u("""\
1\n\
lim ─\n\
x─→0⁺x\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(sin(x)/x, x, 0)
ascii_str = \
"""\
/sin(x)\\\n\
lim |------|\n\
x->0+\\ x /\
"""
ucode_str = \
u("""\
⎛sin(x)⎞\n\
lim ⎜──────⎟\n\
x─→0⁺⎝ x ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(sin(x)/x, x, 0, "-")
ascii_str = \
"""\
/sin(x)\\\n\
lim |------|\n\
x->0-\\ x /\
"""
ucode_str = \
u("""\
⎛sin(x)⎞\n\
lim ⎜──────⎟\n\
x─→0⁻⎝ x ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(x + sin(x), x, 0)
ascii_str = \
"""\
lim (x + sin(x))\n\
x->0+ \
"""
ucode_str = \
u("""\
lim (x + sin(x))\n\
x─→0⁺ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(x, x, 0)**2
ascii_str = \
"""\
2\n\
/ lim x\\ \n\
\\x->0+ / \
"""
ucode_str = \
u("""\
2\n\
⎛ lim x⎞ \n\
⎝x─→0⁺ ⎠ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Limit(x*Limit(y/2,y,0), x, 0)
ascii_str = \
"""\
/ /y\\\\\n\
lim |x* lim |-||\n\
x->0+\\ y->0+\\2//\
"""
ucode_str = \
u("""\
⎛ ⎛y⎞⎞\n\
lim ⎜x⋅ lim ⎜─⎟⎟\n\
x─→0⁺⎝ y─→0⁺⎝2⎠⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = 2*Limit(x*Limit(y/2,y,0), x, 0)
ascii_str = \
"""\
/ /y\\\\\n\
2* lim |x* lim |-||\n\
x->0+\\ y->0+\\2//\
"""
ucode_str = \
u("""\
⎛ ⎛y⎞⎞\n\
2⋅ lim ⎜x⋅ lim ⎜─⎟⎟\n\
x─→0⁺⎝ y─→0⁺⎝2⎠⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_ComplexRootOf():
expr = rootof(x**5 + 11*x - 2, 0)
ascii_str = \
"""\
/ 5 \\\n\
CRootOf\\x + 11*x - 2, 0/\
"""
ucode_str = \
u("""\
⎛ 5 ⎞\n\
CRootOf⎝x + 11⋅x - 2, 0⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_RootSum():
expr = RootSum(x**5 + 11*x - 2, auto=False)
ascii_str = \
"""\
/ 5 \\\n\
RootSum\\x + 11*x - 2/\
"""
ucode_str = \
u("""\
⎛ 5 ⎞\n\
RootSum⎝x + 11⋅x - 2⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = RootSum(x**5 + 11*x - 2, Lambda(z, exp(z)))
ascii_str = \
"""\
/ 5 z\\\n\
RootSum\\x + 11*x - 2, z -> e /\
"""
ucode_str = \
u("""\
⎛ 5 z⎞\n\
RootSum⎝x + 11⋅x - 2, z ↦ ℯ ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_GroebnerBasis():
expr = groebner([], x, y)
ascii_str = \
"""\
GroebnerBasis([], x, y, domain=ZZ, order=lex)\
"""
ucode_str = \
u("""\
GroebnerBasis([], x, y, domain=ℤ, order=lex)\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
expr = groebner(F, x, y, order='grlex')
ascii_str = \
"""\
/[ 2 2 ] \\\n\
GroebnerBasis\\[x - x - 3*y + 1, y - 2*x + y - 1], x, y, domain=ZZ, order=grlex/\
"""
ucode_str = \
u("""\
⎛⎡ 2 2 ⎤ ⎞\n\
GroebnerBasis⎝⎣x - x - 3⋅y + 1, y - 2⋅x + y - 1⎦, x, y, domain=ℤ, order=grlex⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = expr.fglm('lex')
ascii_str = \
"""\
/[ 2 4 3 2 ] \\\n\
GroebnerBasis\\[2*x - y - y + 1, y + 2*y - 3*y - 16*y + 7], x, y, domain=ZZ, order=lex/\
"""
ucode_str = \
u("""\
⎛⎡ 2 4 3 2 ⎤ ⎞\n\
GroebnerBasis⎝⎣2⋅x - y - y + 1, y + 2⋅y - 3⋅y - 16⋅y + 7⎦, x, y, domain=ℤ, order=lex⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_Boolean():
expr = Not(x, evaluate=False)
assert pretty(expr) == "Not(x)"
assert upretty(expr) == u"¬x"
expr = And(x, y)
assert pretty(expr) == "And(x, y)"
assert upretty(expr) == u"x ∧ y"
expr = Or(x, y)
assert pretty(expr) == "Or(x, y)"
assert upretty(expr) == u"x ∨ y"
syms = symbols('a:f')
expr = And(*syms)
assert pretty(expr) == "And(a, b, c, d, e, f)"
assert upretty(expr) == u"a ∧ b ∧ c ∧ d ∧ e ∧ f"
expr = Or(*syms)
assert pretty(expr) == "Or(a, b, c, d, e, f)"
assert upretty(expr) == u"a ∨ b ∨ c ∨ d ∨ e ∨ f"
expr = Xor(x, y, evaluate=False)
assert pretty(expr) == "Xor(x, y)"
assert upretty(expr) == u"x ⊻ y"
expr = Nand(x, y, evaluate=False)
assert pretty(expr) == "Nand(x, y)"
assert upretty(expr) == u"x ⊼ y"
expr = Nor(x, y, evaluate=False)
assert pretty(expr) == "Nor(x, y)"
assert upretty(expr) == u"x ⊽ y"
expr = Implies(x, y, evaluate=False)
assert pretty(expr) == "Implies(x, y)"
assert upretty(expr) == u"x → y"
# don't sort args
expr = Implies(y, x, evaluate=False)
assert pretty(expr) == "Implies(y, x)"
assert upretty(expr) == u"y → x"
expr = Equivalent(x, y, evaluate=False)
assert pretty(expr) == "Equivalent(x, y)"
assert upretty(expr) == u"x ≡ y"
expr = Equivalent(y, x, evaluate=False)
assert pretty(expr) == "Equivalent(x, y)"
assert upretty(expr) == u"x ≡ y"
def test_pretty_Domain():
expr = FF(23)
assert pretty(expr) == "GF(23)"
assert upretty(expr) == u"ℤ₂₃"
expr = ZZ
assert pretty(expr) == "ZZ"
assert upretty(expr) == u"ℤ"
expr = QQ
assert pretty(expr) == "QQ"
assert upretty(expr) == u"ℚ"
expr = RR
assert pretty(expr) == "RR"
assert upretty(expr) == u"ℝ"
expr = QQ[x]
assert pretty(expr) == "QQ[x]"
assert upretty(expr) == u"ℚ[x]"
expr = QQ[x, y]
assert pretty(expr) == "QQ[x, y]"
assert upretty(expr) == u"ℚ[x, y]"
expr = ZZ.frac_field(x)
assert pretty(expr) == "ZZ(x)"
assert upretty(expr) == u"ℤ(x)"
expr = ZZ.frac_field(x, y)
assert pretty(expr) == "ZZ(x, y)"
assert upretty(expr) == u"ℤ(x, y)"
expr = QQ.poly_ring(x, y, order=grlex)
assert pretty(expr) == "QQ[x, y, order=grlex]"
assert upretty(expr) == u"ℚ[x, y, order=grlex]"
expr = QQ.poly_ring(x, y, order=ilex)
assert pretty(expr) == "QQ[x, y, order=ilex]"
assert upretty(expr) == u"ℚ[x, y, order=ilex]"
def test_pretty_prec():
assert xpretty(S("0.3"), full_prec=True, wrap_line=False) == "0.300000000000000"
assert xpretty(S("0.3"), full_prec="auto", wrap_line=False) == "0.300000000000000"
assert xpretty(S("0.3"), full_prec=False, wrap_line=False) == "0.3"
assert xpretty(S("0.3")*x, full_prec=True, use_unicode=False, wrap_line=False) in [
"0.300000000000000*x",
"x*0.300000000000000"
]
assert xpretty(S("0.3")*x, full_prec="auto", use_unicode=False, wrap_line=False) in [
"0.3*x",
"x*0.3"
]
assert xpretty(S("0.3")*x, full_prec=False, use_unicode=False, wrap_line=False) in [
"0.3*x",
"x*0.3"
]
def test_pprint():
import sys
from sympy.core.compatibility import StringIO
fd = StringIO()
sso = sys.stdout
sys.stdout = fd
try:
pprint(pi, use_unicode=False, wrap_line=False)
finally:
sys.stdout = sso
assert fd.getvalue() == 'pi\n'
def test_pretty_class():
"""Test that the printer dispatcher correctly handles classes."""
class C:
pass # C has no .__class__ and this was causing problems
class D(object):
pass
assert pretty( C ) == str( C )
assert pretty( D ) == str( D )
def test_pretty_no_wrap_line():
huge_expr = 0
for i in range(20):
huge_expr += i*sin(i + x)
assert xpretty(huge_expr ).find('\n') != -1
assert xpretty(huge_expr, wrap_line=False).find('\n') == -1
def test_settings():
raises(TypeError, lambda: pretty(S(4), method="garbage"))
def test_pretty_sum():
from sympy.abc import x, a, b, k, m, n
expr = Sum(k**k, (k, 0, n))
ascii_str = \
"""\
n \n\
___ \n\
\\ ` \n\
\\ k\n\
/ k \n\
/__, \n\
k = 0 \
"""
ucode_str = \
u("""\
n \n\
___ \n\
╲ \n\
╲ k\n\
╱ k \n\
╱ \n\
‾‾‾ \n\
k = 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(k**k, (k, oo, n))
ascii_str = \
"""\
n \n\
___ \n\
\\ ` \n\
\\ k\n\
/ k \n\
/__, \n\
k = oo \
"""
ucode_str = \
u("""\
n \n\
___ \n\
╲ \n\
╲ k\n\
╱ k \n\
╱ \n\
‾‾‾ \n\
k = ∞ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(k**(Integral(x**n, (x, -oo, oo))), (k, 0, n**n))
ascii_str = \
"""\
n \n\
n \n\
______ \n\
\\ ` \n\
\\ oo \n\
\\ / \n\
\\ | \n\
\\ | n \n\
) | x dx\n\
/ | \n\
/ / \n\
/ -oo \n\
/ k \n\
/_____, \n\
k = 0 \
"""
ucode_str = \
u("""\
n \n\
n \n\
______ \n\
╲ \n\
╲ ∞ \n\
╲ ⌠ \n\
╲ ⎮ n \n\
╲ ⎮ x dx\n\
╱ ⌡ \n\
╱ -∞ \n\
╱ k \n\
╱ \n\
╱ \n\
‾‾‾‾‾‾ \n\
k = 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(k**(
Integral(x**n, (x, -oo, oo))), (k, 0, Integral(x**x, (x, -oo, oo))))
ascii_str = \
"""\
oo \n\
/ \n\
| \n\
| x \n\
| x dx \n\
| \n\
/ \n\
-oo \n\
______ \n\
\\ ` \n\
\\ oo \n\
\\ / \n\
\\ | \n\
\\ | n \n\
) | x dx\n\
/ | \n\
/ / \n\
/ -oo \n\
/ k \n\
/_____, \n\
k = 0 \
"""
ucode_str = \
u("""\
∞ \n\
⌠ \n\
⎮ x \n\
⎮ x dx \n\
⌡ \n\
-∞ \n\
______ \n\
╲ \n\
╲ ∞ \n\
╲ ⌠ \n\
╲ ⎮ n \n\
╲ ⎮ x dx\n\
╱ ⌡ \n\
╱ -∞ \n\
╱ k \n\
╱ \n\
╱ \n\
‾‾‾‾‾‾ \n\
k = 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(k**(Integral(x**n, (x, -oo, oo))), (
k, x + n + x**2 + n**2 + (x/n) + (1/x), Integral(x**x, (x, -oo, oo))))
ascii_str = \
"""\
oo \n\
/ \n\
| \n\
| x \n\
| x dx \n\
| \n\
/ \n\
-oo \n\
______ \n\
\\ ` \n\
\\ oo \n\
\\ / \n\
\\ | \n\
\\ | n \n\
) | x dx\n\
/ | \n\
/ / \n\
/ -oo \n\
/ k \n\
/_____, \n\
2 2 1 x \n\
k = n + n + x + x + - + - \n\
x n \
"""
ucode_str = \
u("""\
∞ \n\
⌠ \n\
⎮ x \n\
⎮ x dx \n\
⌡ \n\
-∞ \n\
______ \n\
╲ \n\
╲ ∞ \n\
╲ ⌠ \n\
╲ ⎮ n \n\
╲ ⎮ x dx\n\
╱ ⌡ \n\
╱ -∞ \n\
╱ k \n\
╱ \n\
╱ \n\
‾‾‾‾‾‾ \n\
2 2 1 x \n\
k = n + n + x + x + ─ + ─ \n\
x n \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(k**(
Integral(x**n, (x, -oo, oo))), (k, 0, x + n + x**2 + n**2 + (x/n) + (1/x)))
ascii_str = \
"""\
2 2 1 x \n\
n + n + x + x + - + - \n\
x n \n\
______ \n\
\\ ` \n\
\\ oo \n\
\\ / \n\
\\ | \n\
\\ | n \n\
) | x dx\n\
/ | \n\
/ / \n\
/ -oo \n\
/ k \n\
/_____, \n\
k = 0 \
"""
ucode_str = \
u("""\
2 2 1 x \n\
n + n + x + x + ─ + ─ \n\
x n \n\
______ \n\
╲ \n\
╲ ∞ \n\
╲ ⌠ \n\
╲ ⎮ n \n\
╲ ⎮ x dx\n\
╱ ⌡ \n\
╱ -∞ \n\
╱ k \n\
╱ \n\
╱ \n\
‾‾‾‾‾‾ \n\
k = 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(x, (x, 0, oo))
ascii_str = \
"""\
oo \n\
__ \n\
\\ ` \n\
) x\n\
/_, \n\
x = 0 \
"""
ucode_str = \
u("""\
∞ \n\
___ \n\
╲ \n\
╲ x\n\
╱ \n\
╱ \n\
‾‾‾ \n\
x = 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(x**2, (x, 0, oo))
ascii_str = \
u("""\
oo \n\
___ \n\
\\ ` \n\
\\ 2\n\
/ x \n\
/__, \n\
x = 0 \
""")
ucode_str = \
u("""\
∞ \n\
___ \n\
╲ \n\
╲ 2\n\
╱ x \n\
╱ \n\
‾‾‾ \n\
x = 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(x/2, (x, 0, oo))
ascii_str = \
"""\
oo \n\
___ \n\
\\ ` \n\
\\ x\n\
) -\n\
/ 2\n\
/__, \n\
x = 0 \
"""
ucode_str = \
u("""\
∞ \n\
____ \n\
╲ \n\
╲ x\n\
╲ ─\n\
╱ 2\n\
╱ \n\
╱ \n\
‾‾‾‾ \n\
x = 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(x**3/2, (x, 0, oo))
ascii_str = \
"""\
oo \n\
____ \n\
\\ ` \n\
\\ 3\n\
\\ x \n\
/ --\n\
/ 2 \n\
/___, \n\
x = 0 \
"""
ucode_str = \
u("""\
∞ \n\
____ \n\
╲ \n\
╲ 3\n\
╲ x \n\
╱ ──\n\
╱ 2 \n\
╱ \n\
‾‾‾‾ \n\
x = 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum((x**3*y**(x/2))**n, (x, 0, oo))
ascii_str = \
"""\
oo \n\
____ \n\
\\ ` \n\
\\ n\n\
\\ / x\\ \n\
) | -| \n\
/ | 3 2| \n\
/ \\x *y / \n\
/___, \n\
x = 0 \
"""
ucode_str = \
u("""\
∞ \n\
_____ \n\
╲ \n\
╲ n\n\
╲ ⎛ x⎞ \n\
╲ ⎜ ─⎟ \n\
╱ ⎜ 3 2⎟ \n\
╱ ⎝x ⋅y ⎠ \n\
╱ \n\
╱ \n\
‾‾‾‾‾ \n\
x = 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(1/x**2, (x, 0, oo))
ascii_str = \
"""\
oo \n\
____ \n\
\\ ` \n\
\\ 1 \n\
\\ --\n\
/ 2\n\
/ x \n\
/___, \n\
x = 0 \
"""
ucode_str = \
u("""\
∞ \n\
____ \n\
╲ \n\
╲ 1 \n\
╲ ──\n\
╱ 2\n\
╱ x \n\
╱ \n\
‾‾‾‾ \n\
x = 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(1/y**(a/b), (x, 0, oo))
ascii_str = \
"""\
oo \n\
____ \n\
\\ ` \n\
\\ -a \n\
\\ ---\n\
/ b \n\
/ y \n\
/___, \n\
x = 0 \
"""
ucode_str = \
u("""\
∞ \n\
____ \n\
╲ \n\
╲ -a \n\
╲ ───\n\
╱ b \n\
╱ y \n\
╱ \n\
‾‾‾‾ \n\
x = 0 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Sum(1/y**(a/b), (x, 0, oo), (y, 1, 2))
ascii_str = \
"""\
2 oo \n\
____ ____ \n\
\\ ` \\ ` \n\
\\ \\ -a\n\
\\ \\ --\n\
/ / b \n\
/ / y \n\
/___, /___, \n\
y = 1 x = 0 \
"""
ucode_str = \
u("""\
2 ∞ \n\
____ ____ \n\
╲ ╲ \n\
╲ ╲ -a\n\
╲ ╲ ──\n\
╱ ╱ b \n\
╱ ╱ y \n\
╱ ╱ \n\
‾‾‾‾ ‾‾‾‾ \n\
y = 1 x = 0 \
""")
expr = Sum(1/(1 + 1/(
1 + 1/k)) + 1, (k, 111, 1 + 1/n), (k, 1/(1 + m), oo)) + 1/(1 + 1/k)
ascii_str = \
"""\
1 \n\
1 + - \n\
oo n \n\
_____ _____ \n\
\\ ` \\ ` \n\
\\ \\ / 1 \\ \n\
\\ \\ |1 + ---------| \n\
\\ \\ | 1 | 1 \n\
) ) | 1 + -----| + -----\n\
/ / | 1| 1\n\
/ / | 1 + -| 1 + -\n\
/ / \\ k/ k\n\
/____, /____, \n\
1 k = 111 \n\
k = ----- \n\
m + 1 \
"""
ucode_str = \
u("""\
1 \n\
1 + ─ \n\
∞ n \n\
______ ______ \n\
╲ ╲ \n\
╲ ╲ ⎛ 1 ⎞ \n\
╲ ╲ ⎜1 + ─────────⎟ \n\
╲ ╲ ⎜ 1 ⎟ \n\
╲ ╲ ⎜ 1 + ─────⎟ 1 \n\
╱ ╱ ⎜ 1⎟ + ─────\n\
╱ ╱ ⎜ 1 + ─⎟ 1\n\
╱ ╱ ⎝ k⎠ 1 + ─\n\
╱ ╱ k\n\
╱ ╱ \n\
‾‾‾‾‾‾ ‾‾‾‾‾‾ \n\
1 k = 111 \n\
k = ───── \n\
m + 1 \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_units():
expr = joule
ascii_str = \
"""\
2\n\
kilogram*meter \n\
---------------\n\
2 \n\
second \
"""
unicode_str = \
u("""\
2\n\
kilogram⋅meter \n\
───────────────\n\
2 \n\
second \
""")
from sympy.physics.units import kg, m, s
assert upretty(expr) == u("joule")
assert pretty(expr) == "joule"
assert upretty(expr.convert_to(kg*m**2/s**2)) == unicode_str
assert pretty(expr.convert_to(kg*m**2/s**2)) == ascii_str
def test_pretty_Subs():
f = Function('f')
expr = Subs(f(x), x, ph**2)
ascii_str = \
"""\
(f(x))| 2\n\
|x=phi \
"""
unicode_str = \
u("""\
(f(x))│ 2\n\
│x=φ \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
expr = Subs(f(x).diff(x), x, 0)
ascii_str = \
"""\
/d \\| \n\
|--(f(x))|| \n\
\\dx /|x=0\
"""
unicode_str = \
u("""\
⎛d ⎞│ \n\
⎜──(f(x))⎟│ \n\
⎝dx ⎠│x=0\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
expr = Subs(f(x).diff(x)/y, (x, y), (0, Rational(1, 2)))
ascii_str = \
"""\
/d \\| \n\
|--(f(x))|| \n\
|dx || \n\
|--------|| \n\
\\ y /|x=0, y=1/2\
"""
unicode_str = \
u("""\
⎛d ⎞│ \n\
⎜──(f(x))⎟│ \n\
⎜dx ⎟│ \n\
⎜────────⎟│ \n\
⎝ y ⎠│x=0, y=1/2\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == unicode_str
def test_gammas():
from sympy import gamma
assert upretty(lowergamma(x, y)) == u"γ(x, y)"
assert upretty(uppergamma(x, y)) == u"Γ(x, y)"
assert xpretty(gamma(x), use_unicode=True) == u'Γ(x)'
assert xpretty(symbols('gamma', cls=Function)(x), use_unicode=True) == u'γ(x)'
def test_SingularityFunction():
assert xpretty(SingularityFunction(x, 0, n), use_unicode=True) == (
"""\
n\n\
<x> \
""")
assert xpretty(SingularityFunction(x, 1, n), use_unicode=True) == (
"""\
n\n\
<x - 1> \
""")
assert xpretty(SingularityFunction(x, -1, n), use_unicode=True) == (
"""\
n\n\
<x + 1> \
""")
assert xpretty(SingularityFunction(x, a, n), use_unicode=True) == (
"""\
n\n\
<-a + x> \
""")
assert xpretty(SingularityFunction(x, y, n), use_unicode=True) == (
"""\
n\n\
<x - y> \
""")
assert xpretty(SingularityFunction(x, 0, n), use_unicode=False) == (
"""\
n\n\
<x> \
""")
assert xpretty(SingularityFunction(x, 1, n), use_unicode=False) == (
"""\
n\n\
<x - 1> \
""")
assert xpretty(SingularityFunction(x, -1, n), use_unicode=False) == (
"""\
n\n\
<x + 1> \
""")
assert xpretty(SingularityFunction(x, a, n), use_unicode=False) == (
"""\
n\n\
<-a + x> \
""")
assert xpretty(SingularityFunction(x, y, n), use_unicode=False) == (
"""\
n\n\
<x - y> \
""")
def test_deltas():
assert xpretty(DiracDelta(x), use_unicode=True) == u'δ(x)'
assert xpretty(DiracDelta(x, 1), use_unicode=True) == \
u("""\
(1) \n\
δ (x)\
""")
def test_hyper():
expr = hyper((), (), z)
ucode_str = \
u("""\
┌─ ⎛ │ ⎞\n\
├─ ⎜ │ z⎟\n\
0╵ 0 ⎝ │ ⎠\
""")
ascii_str = \
"""\
_ \n\
|_ / | \\\n\
| | | z|\n\
0 0 \\ | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hyper((), (1,), x)
ucode_str = \
u("""\
┌─ ⎛ │ ⎞\n\
├─ ⎜ │ x⎟\n\
0╵ 1 ⎝1 │ ⎠\
""")
ascii_str = \
"""\
_ \n\
|_ / | \\\n\
| | | x|\n\
0 1 \\1 | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hyper([2], [1], x)
ucode_str = \
u("""\
┌─ ⎛2 │ ⎞\n\
├─ ⎜ │ x⎟\n\
1╵ 1 ⎝1 │ ⎠\
""")
ascii_str = \
"""\
_ \n\
|_ /2 | \\\n\
| | | x|\n\
1 1 \\1 | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hyper((pi/3, -2*k), (3, 4, 5, -3), x)
ucode_str = \
u("""\
⎛ π │ ⎞\n\
┌─ ⎜ ─, -2⋅k │ ⎟\n\
├─ ⎜ 3 │ x⎟\n\
2╵ 4 ⎜ │ ⎟\n\
⎝3, 4, 5, -3 │ ⎠\
""")
ascii_str = \
"""\
\n\
_ / pi | \\\n\
|_ | --, -2*k | |\n\
| | 3 | x|\n\
2 4 | | |\n\
\\3, 4, 5, -3 | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hyper((pi, S('2/3'), -2*k), (3, 4, 5, -3), x**2)
ucode_str = \
u("""\
┌─ ⎛π, 2/3, -2⋅k │ 2⎞\n\
├─ ⎜ │ x ⎟\n\
3╵ 4 ⎝3, 4, 5, -3 │ ⎠\
""")
ascii_str = \
"""\
_ \n\
|_ /pi, 2/3, -2*k | 2\\\n\
| | | x |\n\
3 4 \\ 3, 4, 5, -3 | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = hyper([1, 2], [3, 4], 1/(1/(1/(1/x + 1) + 1) + 1))
ucode_str = \
u("""\
⎛ │ 1 ⎞\n\
⎜ │ ─────────────⎟\n\
⎜ │ 1 ⎟\n\
┌─ ⎜1, 2 │ 1 + ─────────⎟\n\
├─ ⎜ │ 1 ⎟\n\
2╵ 2 ⎜3, 4 │ 1 + ─────⎟\n\
⎜ │ 1⎟\n\
⎜ │ 1 + ─⎟\n\
⎝ │ x⎠\
""")
ascii_str = \
"""\
\n\
/ | 1 \\\n\
| | -------------|\n\
_ | | 1 |\n\
|_ |1, 2 | 1 + ---------|\n\
| | | 1 |\n\
2 2 |3, 4 | 1 + -----|\n\
| | 1|\n\
| | 1 + -|\n\
\\ | x/\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_meijerg():
expr = meijerg([pi, pi, x], [1], [0, 1], [1, 2, 3], z)
ucode_str = \
u("""\
╭─╮2, 3 ⎛π, π, x 1 │ ⎞\n\
│╶┐ ⎜ │ z⎟\n\
╰─╯4, 5 ⎝ 0, 1 1, 2, 3 │ ⎠\
""")
ascii_str = \
"""\
__2, 3 /pi, pi, x 1 | \\\n\
/__ | | z|\n\
\\_|4, 5 \\ 0, 1 1, 2, 3 | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = meijerg([1, pi/7], [2, pi, 5], [], [], z**2)
ucode_str = \
u("""\
⎛ π │ ⎞\n\
╭─╮0, 2 ⎜1, ─ 2, π, 5 │ 2⎟\n\
│╶┐ ⎜ 7 │ z ⎟\n\
╰─╯5, 0 ⎜ │ ⎟\n\
⎝ │ ⎠\
""")
ascii_str = \
"""\
/ pi | \\\n\
__0, 2 |1, -- 2, pi, 5 | 2|\n\
/__ | 7 | z |\n\
\\_|5, 0 | | |\n\
\\ | /\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ucode_str = \
u("""\
╭─╮ 1, 10 ⎛1, 1, 1, 1, 1, 1, 1, 1, 1, 1 1 │ ⎞\n\
│╶┐ ⎜ │ z⎟\n\
╰─╯11, 2 ⎝ 1 1 │ ⎠\
""")
ascii_str = \
"""\
__ 1, 10 /1, 1, 1, 1, 1, 1, 1, 1, 1, 1 1 | \\\n\
/__ | | z|\n\
\\_|11, 2 \\ 1 1 | /\
"""
expr = meijerg([1]*10, [1], [1], [1], z)
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = meijerg([1, 2, ], [4, 3], [3], [4, 5], 1/(1/(1/(1/x + 1) + 1) + 1))
ucode_str = \
u("""\
⎛ │ 1 ⎞\n\
⎜ │ ─────────────⎟\n\
⎜ │ 1 ⎟\n\
╭─╮1, 2 ⎜1, 2 4, 3 │ 1 + ─────────⎟\n\
│╶┐ ⎜ │ 1 ⎟\n\
╰─╯4, 3 ⎜ 3 4, 5 │ 1 + ─────⎟\n\
⎜ │ 1⎟\n\
⎜ │ 1 + ─⎟\n\
⎝ │ x⎠\
""")
ascii_str = \
"""\
/ | 1 \\\n\
| | -------------|\n\
| | 1 |\n\
__1, 2 |1, 2 4, 3 | 1 + ---------|\n\
/__ | | 1 |\n\
\\_|4, 3 | 3 4, 5 | 1 + -----|\n\
| | 1|\n\
| | 1 + -|\n\
\\ | x/\
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = Integral(expr, x)
ucode_str = \
u("""\
⌠ \n\
⎮ ⎛ │ 1 ⎞ \n\
⎮ ⎜ │ ─────────────⎟ \n\
⎮ ⎜ │ 1 ⎟ \n\
⎮ ╭─╮1, 2 ⎜1, 2 4, 3 │ 1 + ─────────⎟ \n\
⎮ │╶┐ ⎜ │ 1 ⎟ dx\n\
⎮ ╰─╯4, 3 ⎜ 3 4, 5 │ 1 + ─────⎟ \n\
⎮ ⎜ │ 1⎟ \n\
⎮ ⎜ │ 1 + ─⎟ \n\
⎮ ⎝ │ x⎠ \n\
⌡ \
""")
ascii_str = \
"""\
/ \n\
| \n\
| / | 1 \\ \n\
| | | -------------| \n\
| | | 1 | \n\
| __1, 2 |1, 2 4, 3 | 1 + ---------| \n\
| /__ | | 1 | dx\n\
| \\_|4, 3 | 3 4, 5 | 1 + -----| \n\
| | | 1| \n\
| | | 1 + -| \n\
| \\ | x/ \n\
| \n\
/ \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_noncommutative():
A, B, C = symbols('A,B,C', commutative=False)
expr = A*B*C**-1
ascii_str = \
"""\
-1\n\
A*B*C \
"""
ucode_str = \
u("""\
-1\n\
A⋅B⋅C \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = C**-1*A*B
ascii_str = \
"""\
-1 \n\
C *A*B\
"""
ucode_str = \
u("""\
-1 \n\
C ⋅A⋅B\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = A*C**-1*B
ascii_str = \
"""\
-1 \n\
A*C *B\
"""
ucode_str = \
u("""\
-1 \n\
A⋅C ⋅B\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
expr = A*C**-1*B/x
ascii_str = \
"""\
-1 \n\
A*C *B\n\
-------\n\
x \
"""
ucode_str = \
u("""\
-1 \n\
A⋅C ⋅B\n\
───────\n\
x \
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_special_functions():
x, y = symbols("x y")
# atan2
expr = atan2(y/sqrt(200), sqrt(x))
ascii_str = \
"""\
/ ___ \\\n\
|\\/ 2 *y ___|\n\
atan2|-------, \\/ x |\n\
\\ 20 /\
"""
ucode_str = \
u("""\
⎛√2⋅y ⎞\n\
atan2⎜────, √x⎟\n\
⎝ 20 ⎠\
""")
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_pretty_geometry():
e = Segment((0, 1), (0, 2))
assert pretty(e) == 'Segment2D(Point2D(0, 1), Point2D(0, 2))'
e = Ray((1, 1), angle=4.02*pi)
assert pretty(e) == 'Ray2D(Point2D(1, 1), Point2D(2, tan(pi/50) + 1))'
def test_expint():
expr = Ei(x)
string = 'Ei(x)'
assert pretty(expr) == string
assert upretty(expr) == string
expr = expint(1, z)
ucode_str = u"E₁(z)"
ascii_str = "expint(1, z)"
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
assert pretty(Shi(x)) == 'Shi(x)'
assert pretty(Si(x)) == 'Si(x)'
assert pretty(Ci(x)) == 'Ci(x)'
assert pretty(Chi(x)) == 'Chi(x)'
assert upretty(Shi(x)) == 'Shi(x)'
assert upretty(Si(x)) == 'Si(x)'
assert upretty(Ci(x)) == 'Ci(x)'
assert upretty(Chi(x)) == 'Chi(x)'
def test_elliptic_functions():
ascii_str = \
"""\
/ 1 \\\n\
K|-----|\n\
\\z + 1/\
"""
ucode_str = \
u("""\
⎛ 1 ⎞\n\
K⎜─────⎟\n\
⎝z + 1⎠\
""")
expr = elliptic_k(1/(z + 1))
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ascii_str = \
"""\
/ | 1 \\\n\
F|1|-----|\n\
\\ |z + 1/\
"""
ucode_str = \
u("""\
⎛ │ 1 ⎞\n\
F⎜1│─────⎟\n\
⎝ │z + 1⎠\
""")
expr = elliptic_f(1, 1/(1 + z))
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ascii_str = \
"""\
/ 1 \\\n\
E|-----|\n\
\\z + 1/\
"""
ucode_str = \
u("""\
⎛ 1 ⎞\n\
E⎜─────⎟\n\
⎝z + 1⎠\
""")
expr = elliptic_e(1/(z + 1))
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ascii_str = \
"""\
/ | 1 \\\n\
E|1|-----|\n\
\\ |z + 1/\
"""
ucode_str = \
u("""\
⎛ │ 1 ⎞\n\
E⎜1│─────⎟\n\
⎝ │z + 1⎠\
""")
expr = elliptic_e(1, 1/(1 + z))
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ascii_str = \
"""\
/ |4\\\n\
Pi|3|-|\n\
\\ |x/\
"""
ucode_str = \
u("""\
⎛ │4⎞\n\
Π⎜3│─⎟\n\
⎝ │x⎠\
""")
expr = elliptic_pi(3, 4/x)
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
ascii_str = \
"""\
/ 4| \\\n\
Pi|3; -|6|\n\
\\ x| /\
"""
ucode_str = \
u("""\
⎛ 4│ ⎞\n\
Π⎜3; ─│6⎟\n\
⎝ x│ ⎠\
""")
expr = elliptic_pi(3, 4/x, 6)
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_RandomDomain():
from sympy.stats import Normal, Die, Exponential, pspace, where
X = Normal('x1', 0, 1)
assert upretty(where(X > 0)) == u"Domain: 0 < x₁ ∧ x₁ < ∞"
D = Die('d1', 6)
assert upretty(where(D > 4)) == u'Domain: d₁ = 5 ∨ d₁ = 6'
A = Exponential('a', 1)
B = Exponential('b', 1)
assert upretty(pspace(Tuple(A, B)).domain) == \
u'Domain: 0 ≤ a ∧ 0 ≤ b ∧ a < ∞ ∧ b < ∞'
def test_PrettyPoly():
F = QQ.frac_field(x, y)
R = QQ.poly_ring(x, y)
expr = F.convert(x/(x + y))
assert pretty(expr) == "x/(x + y)"
assert upretty(expr) == u"x/(x + y)"
expr = R.convert(x + y)
assert pretty(expr) == "x + y"
assert upretty(expr) == u"x + y"
def test_issue_6285():
assert pretty(Pow(2, -5, evaluate=False)) == '1 \n--\n 5\n2 '
assert pretty(Pow(x, (1/pi))) == 'pi___\n\\/ x '
def test_issue_6359():
assert pretty(Integral(x**2, x)**2) == \
"""\
2
/ / \\ \n\
| | | \n\
| | 2 | \n\
| | x dx| \n\
| | | \n\
\\/ / \
"""
assert upretty(Integral(x**2, x)**2) == \
u("""\
2
⎛⌠ ⎞ \n\
⎜⎮ 2 ⎟ \n\
⎜⎮ x dx⎟ \n\
⎝⌡ ⎠ \
""")
assert pretty(Sum(x**2, (x, 0, 1))**2) == \
"""\
2
/ 1 \\ \n\
| ___ | \n\
| \\ ` | \n\
| \\ 2| \n\
| / x | \n\
| /__, | \n\
\\x = 0 / \
"""
assert upretty(Sum(x**2, (x, 0, 1))**2) == \
u("""\
2
⎛ 1 ⎞ \n\
⎜ ___ ⎟ \n\
⎜ ╲ ⎟ \n\
⎜ ╲ 2⎟ \n\
⎜ ╱ x ⎟ \n\
⎜ ╱ ⎟ \n\
⎜ ‾‾‾ ⎟ \n\
⎝x = 0 ⎠ \
""")
assert pretty(Product(x**2, (x, 1, 2))**2) == \
"""\
2
/ 2 \\ \n\
|______ | \n\
|| | 2| \n\
|| | x | \n\
|| | | \n\
\\x = 1 / \
"""
assert upretty(Product(x**2, (x, 1, 2))**2) == \
u("""\
2
⎛ 2 ⎞ \n\
⎜┬────┬ ⎟ \n\
⎜│ │ 2⎟ \n\
⎜│ │ x ⎟ \n\
⎜│ │ ⎟ \n\
⎝x = 1 ⎠ \
""")
f = Function('f')
assert pretty(Derivative(f(x), x)**2) == \
"""\
2
/d \\ \n\
|--(f(x))| \n\
\\dx / \
"""
assert upretty(Derivative(f(x), x)**2) == \
u("""\
2
⎛d ⎞ \n\
⎜──(f(x))⎟ \n\
⎝dx ⎠ \
""")
def test_issue_6739():
ascii_str = \
"""\
1 \n\
-----\n\
___\n\
\\/ x \
"""
ucode_str = \
u("""\
1 \n\
──\n\
√x\
""")
assert pretty(1/sqrt(x)) == ascii_str
assert upretty(1/sqrt(x)) == ucode_str
def test_complicated_symbol_unchanged():
for symb_name in ["dexpr2_d1tau", "dexpr2^d1tau"]:
assert pretty(Symbol(symb_name)) == symb_name
def test_categories():
from sympy.categories import (Object, IdentityMorphism,
NamedMorphism, Category, Diagram, DiagramGrid)
A1 = Object("A1")
A2 = Object("A2")
A3 = Object("A3")
f1 = NamedMorphism(A1, A2, "f1")
f2 = NamedMorphism(A2, A3, "f2")
id_A1 = IdentityMorphism(A1)
K1 = Category("K1")
assert pretty(A1) == "A1"
assert upretty(A1) == u"A₁"
assert pretty(f1) == "f1:A1-->A2"
assert upretty(f1) == u"f₁:A₁——▶A₂"
assert pretty(id_A1) == "id:A1-->A1"
assert upretty(id_A1) == u"id:A₁——▶A₁"
assert pretty(f2*f1) == "f2*f1:A1-->A3"
assert upretty(f2*f1) == u"f₂∘f₁:A₁——▶A₃"
assert pretty(K1) == "K1"
assert upretty(K1) == u"K₁"
# Test how diagrams are printed.
d = Diagram()
assert pretty(d) == "EmptySet()"
assert upretty(d) == u"∅"
d = Diagram({f1: "unique", f2: S.EmptySet})
assert pretty(d) == "{f2*f1:A1-->A3: EmptySet(), id:A1-->A1: " \
"EmptySet(), id:A2-->A2: EmptySet(), id:A3-->A3: " \
"EmptySet(), f1:A1-->A2: {unique}, f2:A2-->A3: EmptySet()}"
assert upretty(d) == u("{f₂∘f₁:A₁——▶A₃: ∅, id:A₁——▶A₁: ∅, " \
"id:A₂——▶A₂: ∅, id:A₃——▶A₃: ∅, f₁:A₁——▶A₂: {unique}, f₂:A₂——▶A₃: ∅}")
d = Diagram({f1: "unique", f2: S.EmptySet}, {f2 * f1: "unique"})
assert pretty(d) == "{f2*f1:A1-->A3: EmptySet(), id:A1-->A1: " \
"EmptySet(), id:A2-->A2: EmptySet(), id:A3-->A3: " \
"EmptySet(), f1:A1-->A2: {unique}, f2:A2-->A3: EmptySet()}" \
" ==> {f2*f1:A1-->A3: {unique}}"
assert upretty(d) == u("{f₂∘f₁:A₁——▶A₃: ∅, id:A₁——▶A₁: ∅, id:A₂——▶A₂: " \
"∅, id:A₃——▶A₃: ∅, f₁:A₁——▶A₂: {unique}, f₂:A₂——▶A₃: ∅}" \
" ══▶ {f₂∘f₁:A₁——▶A₃: {unique}}")
grid = DiagramGrid(d)
assert pretty(grid) == "A1 A2\n \nA3 "
assert upretty(grid) == u"A₁ A₂\n \nA₃ "
def test_PrettyModules():
R = QQ.old_poly_ring(x, y)
F = R.free_module(2)
M = F.submodule([x, y], [1, x**2])
ucode_str = \
u("""\
2\n\
ℚ[x, y] \
""")
ascii_str = \
"""\
2\n\
QQ[x, y] \
"""
assert upretty(F) == ucode_str
assert pretty(F) == ascii_str
ucode_str = \
u("""\
╱ ⎡ 2⎤╲\n\
╲[x, y], ⎣1, x ⎦╱\
""")
ascii_str = \
"""\
2 \n\
<[x, y], [1, x ]>\
"""
assert upretty(M) == ucode_str
assert pretty(M) == ascii_str
I = R.ideal(x**2, y)
ucode_str = \
u("""\
╱ 2 ╲\n\
╲x , y╱\
""")
ascii_str = \
"""\
2 \n\
<x , y>\
"""
assert upretty(I) == ucode_str
assert pretty(I) == ascii_str
Q = F / M
ucode_str = \
u("""\
2 \n\
ℚ[x, y] \n\
─────────────────\n\
╱ ⎡ 2⎤╲\n\
╲[x, y], ⎣1, x ⎦╱\
""")
ascii_str = \
"""\
2 \n\
QQ[x, y] \n\
-----------------\n\
2 \n\
<[x, y], [1, x ]>\
"""
assert upretty(Q) == ucode_str
assert pretty(Q) == ascii_str
ucode_str = \
u("""\
╱⎡ 3⎤ ╲\n\
│⎢ x ⎥ ╱ ⎡ 2⎤╲ ╱ ⎡ 2⎤╲│\n\
│⎢1, ──⎥ + ╲[x, y], ⎣1, x ⎦╱, [2, y] + ╲[x, y], ⎣1, x ⎦╱│\n\
╲⎣ 2 ⎦ ╱\
""")
ascii_str = \
"""\
3 \n\
x 2 2 \n\
<[1, --] + <[x, y], [1, x ]>, [2, y] + <[x, y], [1, x ]>>\n\
2 \
"""
def test_QuotientRing():
R = QQ.old_poly_ring(x)/[x**2 + 1]
ucode_str = \
u("""\
ℚ[x] \n\
────────\n\
╱ 2 ╲\n\
╲x + 1╱\
""")
ascii_str = \
"""\
QQ[x] \n\
--------\n\
2 \n\
<x + 1>\
"""
assert upretty(R) == ucode_str
assert pretty(R) == ascii_str
ucode_str = \
u("""\
╱ 2 ╲\n\
1 + ╲x + 1╱\
""")
ascii_str = \
"""\
2 \n\
1 + <x + 1>\
"""
assert upretty(R.one) == ucode_str
assert pretty(R.one) == ascii_str
def test_Homomorphism():
from sympy.polys.agca import homomorphism
R = QQ.old_poly_ring(x)
expr = homomorphism(R.free_module(1), R.free_module(1), [0])
ucode_str = \
u("""\
1 1\n\
[0] : ℚ[x] ──> ℚ[x] \
""")
ascii_str = \
"""\
1 1\n\
[0] : QQ[x] --> QQ[x] \
"""
assert upretty(expr) == ucode_str
assert pretty(expr) == ascii_str
expr = homomorphism(R.free_module(2), R.free_module(2), [0, 0])
ucode_str = \
u("""\
⎡0 0⎤ 2 2\n\
⎢ ⎥ : ℚ[x] ──> ℚ[x] \n\
⎣0 0⎦ \
""")
ascii_str = \
"""\
[0 0] 2 2\n\
[ ] : QQ[x] --> QQ[x] \n\
[0 0] \
"""
assert upretty(expr) == ucode_str
assert pretty(expr) == ascii_str
expr = homomorphism(R.free_module(1), R.free_module(1) / [[x]], [0])
ucode_str = \
u("""\
1\n\
1 ℚ[x] \n\
[0] : ℚ[x] ──> ─────\n\
<[x]>\
""")
ascii_str = \
"""\
1\n\
1 QQ[x] \n\
[0] : QQ[x] --> ------\n\
<[x]> \
"""
assert upretty(expr) == ucode_str
assert pretty(expr) == ascii_str
def test_Tr():
A, B = symbols('A B', commutative=False)
t = Tr(A*B)
assert pretty(t) == r'Tr(A*B)'
assert upretty(t) == u'Tr(A⋅B)'
def test_pretty_Add():
eq = Mul(-2, x - 2, evaluate=False) + 5
assert pretty(eq) == '-2*(x - 2) + 5'
def test_issue_7179():
assert upretty(Not(Equivalent(x, y))) == u'x ≢ y'
assert upretty(Not(Implies(x, y))) == u'x ↛ y'
def test_issue_7180():
assert upretty(Equivalent(x, y)) == u'x ≡ y'
def test_pretty_Complement():
assert pretty(S.Reals - S.Naturals) == '(-oo, oo) \\ S.Naturals'
assert upretty(S.Reals - S.Naturals) == u'ℝ \\ ℕ'
assert pretty(S.Reals - S.Naturals0) == '(-oo, oo) \\ S.Naturals0'
assert upretty(S.Reals - S.Naturals0) == u'ℝ \\ ℕ₀'
def test_pretty_SymmetricDifference():
from sympy import SymmetricDifference, Interval
from sympy.utilities.pytest import raises
assert upretty(SymmetricDifference(Interval(2,3), Interval(3,5), \
evaluate = False)) == u'[2, 3] ∆ [3, 5]'
with raises(NotImplementedError):
pretty(SymmetricDifference(Interval(2,3), Interval(3,5), evaluate = False))
def test_pretty_Contains():
assert pretty(Contains(x, S.Integers)) == 'Contains(x, S.Integers)'
assert upretty(Contains(x, S.Integers)) == u'x ∈ ℤ'
def test_issue_8292():
from sympy.core import sympify
e = sympify('((x+x**4)/(x-1))-(2*(x-1)**4/(x-1)**4)', evaluate=False)
ucode_str = \
u("""\
4 4 \n\
2⋅(x - 1) x + x\n\
- ────────── + ──────\n\
4 x - 1 \n\
(x - 1) \
""")
ascii_str = \
"""\
4 4 \n\
2*(x - 1) x + x\n\
- ---------- + ------\n\
4 x - 1 \n\
(x - 1) \
"""
assert pretty(e) == ascii_str
assert upretty(e) == ucode_str
def test_issue_4335():
expr = -y(x).diff(x)
ucode_str = \
u("""\
d \n\
-──(y(x))\n\
dx \
""")
ascii_str = \
"""\
d \n\
- --(y(x))\n\
dx \
"""
assert pretty(expr) == ascii_str
assert upretty(expr) == ucode_str
def test_issue_8344():
from sympy.core import sympify
e = sympify('2*x*y**2/1**2 + 1', evaluate=False)
ucode_str = \
u("""\
2 \n\
2⋅x⋅y \n\
────── + 1\n\
2 \n\
1 \
""")
assert upretty(e) == ucode_str
def test_issue_6324():
x = Pow(2, 3, evaluate=False)
y = Pow(10, -2, evaluate=False)
e = Mul(x, y, evaluate=False)
ucode_str = \
u("""\
3\n\
2 \n\
───\n\
2\n\
10 \
""")
assert upretty(e) == ucode_str
def test_issue_7927():
e = sin(x/2)**cos(x/2)
ucode_str = \
u("""\
⎛x⎞\n\
cos⎜─⎟\n\
⎝2⎠\n\
⎛ ⎛x⎞⎞ \n\
⎜sin⎜─⎟⎟ \n\
⎝ ⎝2⎠⎠ \
""")
assert upretty(e) == ucode_str
e = sin(x)**(S(11)/13)
ucode_str = \
u("""\
11\n\
──\n\
13\n\
(sin(x)) \
""")
assert upretty(e) == ucode_str
def test_issue_6134():
from sympy.abc import lamda, phi, t
e = lamda*x*Integral(phi(t)*pi*sin(pi*t), (t, 0, 1)) + lamda*x**2*Integral(phi(t)*2*pi*sin(2*pi*t), (t, 0, 1))
ucode_str = \
u("""\
1 1 \n\
2 ⌠ ⌠ \n\
λ⋅x ⋅⎮ 2⋅π⋅φ(t)⋅sin(2⋅π⋅t) dt + λ⋅x⋅⎮ π⋅φ(t)⋅sin(π⋅t) dt\n\
⌡ ⌡ \n\
0 0 \
""")
assert upretty(e) == ucode_str
def test_issue_9877():
ucode_str1 = u'(2, 3) ∪ ([1, 2] \\ {x})'
a, b, c = Interval(2, 3, True, True), Interval(1, 2), FiniteSet(x)
assert upretty(Union(a, Complement(b, c))) == ucode_str1
ucode_str2 = u'{x} ∩ {y} ∩ ({z} \\ [1, 2])'
d, e, f, g = FiniteSet(x), FiniteSet(y), FiniteSet(z), Interval(1, 2)
assert upretty(Intersection(d, e, Complement(f, g))) == ucode_str2
def test_pretty_primenu():
from sympy.ntheory.factor_ import primenu
ascii_str1 = "nu(n)"
ucode_str1 = u("ν(n)")
n = symbols('n', integer=True)
assert pretty(primenu(n)) == ascii_str1
assert upretty(primenu(n)) == ucode_str1
def test_pretty_primeomega():
from sympy.ntheory.factor_ import primeomega
ascii_str1 = "Omega(n)"
ucode_str1 = u("Ω(n)")
n = symbols('n', integer=True)
assert pretty(primeomega(n)) == ascii_str1
assert upretty(primeomega(n)) == ucode_str1
def test_pretty_Mod():
from sympy.core import Mod
ascii_str1 = "x mod 7"
ucode_str1 = u("x mod 7")
ascii_str2 = "(x + 1) mod 7"
ucode_str2 = u("(x + 1) mod 7")
ascii_str3 = "2*x mod 7"
ucode_str3 = u("2⋅x mod 7")
ascii_str4 = "(x mod 7) + 1"
ucode_str4 = u("(x mod 7) + 1")
ascii_str5 = "2*(x mod 7)"
ucode_str5 = u("2⋅(x mod 7)")
x = symbols('x', integer=True)
assert pretty(Mod(x, 7)) == ascii_str1
assert upretty(Mod(x, 7)) == ucode_str1
assert pretty(Mod(x + 1, 7)) == ascii_str2
assert upretty(Mod(x + 1, 7)) == ucode_str2
assert pretty(Mod(2 * x, 7)) == ascii_str3
assert upretty(Mod(2 * x, 7)) == ucode_str3
assert pretty(Mod(x, 7) + 1) == ascii_str4
assert upretty(Mod(x, 7) + 1) == ucode_str4
assert pretty(2 * Mod(x, 7)) == ascii_str5
assert upretty(2 * Mod(x, 7)) == ucode_str5
def test_issue_11801():
assert pretty(Symbol("")) == ""
assert upretty(Symbol("")) == ""
def test_pretty_UnevaluatedExpr():
x = symbols('x')
he = UnevaluatedExpr(1/x)
ucode_str = \
u("""\
1\n\
─\n\
x\
""")
assert upretty(he) == ucode_str
ucode_str = \
u("""\
2\n\
⎛1⎞ \n\
⎜─⎟ \n\
⎝x⎠ \
""")
assert upretty(he**2) == ucode_str
ucode_str = \
u("""\
1\n\
1 + ─\n\
x\
""")
assert upretty(he + 1) == ucode_str
ucode_str = \
u('''\
1\n\
x⋅─\n\
x\
''')
assert upretty(x*he) == ucode_str
def test_issue_10472():
M = (Matrix([[0, 0], [0, 0]]), Matrix([0, 0]))
ucode_str = \
u("""\
⎛⎡0 0⎤ ⎡0⎤⎞
⎜⎢ ⎥, ⎢ ⎥⎟
⎝⎣0 0⎦ ⎣0⎦⎠\
""")
assert upretty(M) == ucode_str
def test_MatrixElement_printing():
# test cases for issue #11821
A = MatrixSymbol("A", 1, 3)
B = MatrixSymbol("B", 1, 3)
C = MatrixSymbol("C", 1, 3)
ascii_str1 = "A_00"
ucode_str1 = u("A₀₀")
assert pretty(A[0, 0]) == ascii_str1
assert upretty(A[0, 0]) == ucode_str1
ascii_str1 = "3*A_00"
ucode_str1 = u("3⋅A₀₀")
assert pretty(3*A[0, 0]) == ascii_str1
assert upretty(3*A[0, 0]) == ucode_str1
ascii_str1 = "(-B + A)[0, 0]"
ucode_str1 = u("(-B + A)[0, 0]")
F = C[0, 0].subs(C, A - B)
assert pretty(F) == ascii_str1
assert upretty(F) == ucode_str1
def test_vector_expr_pretty_printing():
A = CoordSys3D('A')
assert upretty(Cross(A.i, A.x*A.i+3*A.y*A.j)) == u("(A_i)×((A_x) A_i + (3⋅A_y) A_j)")
assert upretty(x*Cross(A.i, A.j)) == u('x⋅(A_i)×(A_j)')
assert upretty(Curl(A.x*A.i + 3*A.y*A.j)) == u("∇×((A_x) A_i + (3⋅A_y) A_j)")
assert upretty(Divergence(A.x*A.i + 3*A.y*A.j)) == u("∇⋅((A_x) A_i + (3⋅A_y) A_j)")
assert upretty(Dot(A.i, A.x*A.i+3*A.y*A.j)) == u("(A_i)⋅((A_x) A_i + (3⋅A_y) A_j)")
assert upretty(Gradient(A.x+3*A.y)) == u("∇⋅(A_x + 3⋅A_y)")
# TODO: add support for ASCII pretty.
| 20.28002 | 120 | 0.414374 |
1d3bd5f1087f429858b843befbe890c4e51bc805 | 139,305 | py | Python | mrcnn/modeldepthv2.py | akeaveny/Mask_RCNN | b898286e3bebb4af16cc2dfc6f1167eeadd4292f | [
"MIT"
] | null | null | null | mrcnn/modeldepthv2.py | akeaveny/Mask_RCNN | b898286e3bebb4af16cc2dfc6f1167eeadd4292f | [
"MIT"
] | null | null | null | mrcnn/modeldepthv2.py | akeaveny/Mask_RCNN | b898286e3bebb4af16cc2dfc6f1167eeadd4292f | [
"MIT"
] | null | null | null |
import os
import random
import datetime
import re
import math
import logging
from collections import OrderedDict
import multiprocessing
import numpy as np
import skimage.transform
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.engine as KE
import keras.models as KM
from mrcnn import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} min: {:10.5f} max: {:10.5f} {}".format(
str(array.shape),
array.min() if array.size else "",
array.max() if array.size else "",
array.dtype))
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when inferencing
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True, lType=''):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layres
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'+lType
bn_name_base = 'bn' + str(stage) + block + '_branch'+lType
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out'+lType)(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True, lType=''):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layres
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'+lType
bn_name_base = 'bn' + str(stage) + block + '_branch'+lType
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out'+lType)(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True, lType=''):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layres
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1'+lType, use_bias=True)(x)
x = BatchNorm(name='bn_conv1'+lType)(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn, lType=lType)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn, lType=lType)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn,lType=lType)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn,lType=lType)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn, lType=lType)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn, lType=lType)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn, lType=lType)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn,lType=lType)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn, lType=lType)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn, lType=lType)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn, lType=lType)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn, lType=lType)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, anchors, (bg prob, fg prob)]
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(6000, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementatin of Log2. TF doesn't have a native implemenation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [height, width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- Feature maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, height, width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indicies for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
pooled = tf.expand_dims(pooled, 0)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeate boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeate() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
Class-specific bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width). Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,
name="trim_gt_masks")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
crowd_masks = tf.gather(gt_masks, crowd_ix, axis=2)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine postive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinements.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks],
lambda w, x, y, z: detection_targets_graph(
w, x, y, z, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps,depth_feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layres
Returns:
logits: [N, NUM_CLASSES] classifier logits (before softmax)
probs: [N, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
## depth
x_depth = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier_depth")([rois, image_meta] + depth_feature_maps)
x_depth = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1_depth")(x_depth)
x_depth = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1_depth')(x_depth, training=train_bn)
x_depth = KL.Activation('relu')(x_depth)
x_depth = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="mrcnn_class_conv2_depth")(x_depth)
x_depth = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2_depth')(x_depth, training=train_bn)
x_depth = KL.Activation('relu')(x_depth)
shared_depth = KL.Lambda(lambda x: K.squeeze(K.squeeze(x_depth, 3), 2),
name="pool_squeeze_depth")(x_depth)
shared_concatenated = keras.layers.concatenate([shared,shared_depth], axis=-1)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared_concatenated)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared_concatenated)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, mrcnn_depth_feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layres
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
######################
# RGB
######################
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask_depth")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1_depth")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1_depth')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2_depth")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2_depth')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3_depth")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3_depth')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4_depth")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4_depth')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv1_depth")(x)
### TODO: 56 x 56
# x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
# name="mrcnn_mask_deconv2_depth")(x)
######################
# DEPTH
######################
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x_depth = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + mrcnn_depth_feature_maps)
# Conv layers
x_depth = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x_depth)
x_depth = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x_depth, training=train_bn)
x_depth = KL.Activation('relu')(x_depth)
x_depth = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x_depth)
x_depth = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x_depth, training=train_bn)
x_depth = KL.Activation('relu')(x_depth)
x_depth = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x_depth)
x_depth = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x_depth, training=train_bn)
x_depth = KL.Activation('relu')(x_depth)
x_depth = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x_depth)
x_depth = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x_depth, training=train_bn)
x_depth = KL.Activation('relu')(x_depth)
x_depth = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv1")(x_depth)
### TODO: 56 x 56
# x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
# name="mrcnn_mask_deconv2")(x)
x_shared = keras.layers.concatenate([x, x_depth], axis=-1)
x_shared = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x_shared)
return x_shared
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
# TODO: use smooth_l1_loss() rather than reimplementing here
# to reduce code duplication
diff = K.abs(target_bbox - rpn_bbox)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indicies.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: (Depricated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
if class_ids.size == 0:
return image, None, class_ids, None, None
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Random horizontal flips.
# TODO: will be removed in a future update in favor of augmentation
if augment:
logging.warning("'augment' is depricated. Use 'augmentation' instead.")
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmentors that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return (augmenter.__class__.__name__ in MASK_AUGMENTERS)
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask
def load_images_gt(dataset, config, image_id, augment=False, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: (Depricated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image, depth_image = dataset.load_image_rgb_depth(dataset.image_reference(image_id))
mask, class_ids = dataset.load_mask(image_id)
if class_ids.size == 0:
return image, depth_image, None, class_ids, None, None
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
depth_image, _, _, _, _ = utils.resize_image(
depth_image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
# Random horizontal flips.
# TODO: will be removed in a future update in favor of augmentation
if augment:
logging.warning("'augment' is depricated. Use 'augmentation' instead.")
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmentors that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return (augmenter.__class__.__name__ in MASK_AUGMENTERS)
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
mask = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# if image_shape[-1] == 3:
# image = det.augment_image(image)
# mask = det.augment_image(mask.astype(np.uint8),
# hooks=imgaug.HooksImages(activator=hook))
# else:
# image = det.augment_images(image)
# mask = det.augment_images(mask.astype(np.uint8),
# hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, depth_image, image_meta, class_ids, bbox, mask
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Grund truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indicies of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),
dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \
np.round(skimage.transform.resize(
class_mask, (gt_h, gt_w), order=1, mode="constant")).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
mask = skimage.transform.resize(m, config.MASK_SHAPE, order=1, mode="constant")
masks[i, :, :, class_id] = mask
return rois, roi_gt_class_ids, bboxes, masks
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# TODO: If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argmax(overlaps, axis=0)
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: (Depricated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The containtes
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinately.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
image, depth_image,image_meta, gt_class_ids, gt_boxes, gt_masks = \
load_images_gt(dataset, config, image_id, augment=augment,
augmentation=augmentation,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_depth_images = np.zeros(
(batch_size,) + depth_image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(batch_size, gt_masks.shape[0], gt_masks.shape[1],
config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_depth_images[b] = mold_image(depth_image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_depth_images ,batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, 3], name="input_image")
input_depth_image = KL.Input(
shape=[None, None, 3], name="input_depth_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
rpn_feature_maps,mrcnn_feature_maps, mrcnn_depth_feature_maps = self.buildResnetGraphTopDownLayers(input_image, input_depth_image, config)
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), 256)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, mrcnn_depth_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps, mrcnn_depth_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
# Model
inputs = [input_image, input_depth_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, mrcnn_depth_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps, mrcnn_depth_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image,input_depth_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def buildResnetGraphTopDownLayers(self, input_image,input_depth_image, config):
################
# RGB
################
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
################
# DEPTH
################
_, C2, C3, C4, C5 = resnet_graph(input_depth_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN, lType='_depth')
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5_depth')(C5)
P4 = KL.Add(name="fpn_p4add_depth")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled_depth")(P5),
KL.Conv2D(256, (1, 1), name='fpn_c4p4_depth')(C4)])
P3 = KL.Add(name="fpn_p3add_depth")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled_depth")(P4),
KL.Conv2D(256, (1, 1), name='fpn_c3p3_depth')(C3)])
P2 = KL.Add(name="fpn_p2add_depth")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled_depth")(P3),
KL.Conv2D(256, (1, 1), name='fpn_c2p2_depth')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p2_depth")(P2)
P3 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p3_depth")(P3)
P4 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p4_depth")(P4)
P5 = KL.Conv2D(256, (3, 3), padding="SAME", name="fpn_p5_depth")(P5)
mrcnn_depth_feature_maps = [P2, P3, P4, P5]
return rpn_feature_maps, mrcnn_feature_maps, mrcnn_depth_feature_maps
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
# print(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def save_weights(self, filepath, name):
self.keras_model.save_weights(filepath=filepath, overwrite=True)
def load_weights_keras(self, filepath):
self.keras_model.load_weights(filepath)
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
# Conditional import to support versions of Keras before 2.2
# TODO: remove in about 6 months (end of 2018)
try:
from keras.engine import saving
except ImportError:
# Keras before 2.2 used the 'topology' namespace.
from keras.engine import topology as saving
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model") \
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, layers)
else:
saving.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs), metrics=['acc'])
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.metrics_tensors.append(loss)
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainble layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5
regex = r".*/\w+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_\w+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers, augmentation=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heaads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gausssian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=True,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matricies [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matricies:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks = np.stack(full_masks, axis=-1)\
if full_masks else np.empty(masks.shape[1:3] + (0,))
return boxes, class_ids, scores, full_masks
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detectWdepth(self, images, depthimages, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
molded_depth_images, _, _ = self.mold_inputs(depthimages)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("molded_depth_images", molded_depth_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images,molded_depth_images,image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def detect_molded(self, molded_images, depthimages, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also retruned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
molded_depth_images, _, _ = self.mold_inputs(depthimages)
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, molded_depth_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, depthimages, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and noramlized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
##########################
#
##########################
# Mold inputs to format expected by the neural network
molded_images, image_metas, _ = self.mold_inputs(images)
molded_depth_images, _, _ = self.mold_inputs(depthimages)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
##########################
# Prepare inputs
##########################
#
# if image_metas is None:
# molded_images, image_metas, _ = self.mold_inputs(images)
# else:
# molded_images = images
# print("Test:", molded_images.shape)
# image_shape = molded_images[0].shape
# # Anchors
# anchors = self.get_anchors(image_shape)
# # Duplicate across the batch dimension because Keras requires it
# # TODO: can this be optimized to avoid duplicating the anchors?
# anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
##########################
model_in = [molded_images, molded_depth_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v) for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtraces
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name=None):
"""Often boxes are represented with matricies of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
| 44.181732 | 146 | 0.612002 |
4bcb57736c6b36750d284ac676787b92aaf29ec2 | 3,868 | py | Python | yearn/v2/strategies.py | pmdaly/yearn-exporter | d1e7697f8bf12cdb1126ea86fa350a26aea23cf8 | [
"MIT"
] | 1 | 2022-03-29T01:04:27.000Z | 2022-03-29T01:04:27.000Z | yearn/v2/strategies.py | pmdaly/yearn-exporter | d1e7697f8bf12cdb1126ea86fa350a26aea23cf8 | [
"MIT"
] | null | null | null | yearn/v2/strategies.py | pmdaly/yearn-exporter | d1e7697f8bf12cdb1126ea86fa350a26aea23cf8 | [
"MIT"
] | 1 | 2022-03-24T20:54:50.000Z | 2022-03-24T20:54:50.000Z | import logging
import threading
import time
from typing import List
from eth_utils import encode_hex, event_abi_to_log_topic
from yearn.decorators import sentry_catch_all, wait_or_exit_after
from yearn.events import create_filter, decode_logs
from yearn.multicall2 import fetch_multicall
from yearn.utils import contract, safe_views
STRATEGY_VIEWS_SCALED = [
"maxDebtPerHarvest",
"minDebtPerHarvest",
"totalDebt",
"totalGain",
"totalLoss",
"estimatedTotalAssets",
"lentTotalAssets",
"balanceOfPool",
"balanceOfWant",
]
STRATEGY_EVENTS = ["Harvested"]
logger = logging.getLogger(__name__)
class Strategy:
def __init__(self, strategy, vault, watch_events_forever):
self.strategy = contract(strategy)
self.vault = vault
try:
self.name = self.strategy.name()
except ValueError:
self.name = strategy[:10]
self._views = safe_views(self.strategy.abi)
self._harvests = []
self._topics = [
[
encode_hex(event_abi_to_log_topic(event))
for event in self.strategy.abi
if event["type"] == "event" and event["name"] in STRATEGY_EVENTS
]
]
self._watch_events_forever = watch_events_forever
self._done = threading.Event()
self._has_exception = False
self._thread = threading.Thread(target=self.watch_events, daemon=True)
@property
def unique_name(self):
if [strategy.name for strategy in self.vault.strategies].count(self.name) > 1:
return f'{self.name} {str(self.strategy)[:8]}'
else:
return self.name
def __repr__(self) -> str:
return f"<Strategy {self.strategy} name={self.name}>"
def __eq__(self, other):
if isinstance(other, Strategy):
return self.strategy == other.strategy
if isinstance(other, str):
return self.strategy == other
raise ValueError("Strategy is only comparable with [Strategy, str]")
@sentry_catch_all
def watch_events(self):
start = time.time()
self.log_filter = create_filter(str(self.strategy), topics=self._topics)
logs = self.log_filter.get_all_entries()
while True:
events = decode_logs(logs)
self.process_events(events)
if not self._done.is_set():
self._done.set()
logger.info("loaded %d harvests %s in %.3fs", len(self._harvests), self.name, time.time() - start)
if not self._watch_events_forever:
return
time.sleep(300)
# read new logs at end of loop
logs = self.log_filter.get_new_entries()
def process_events(self, events):
for event in events:
if event.name == "Harvested":
block = event.block_number
logger.debug("%s harvested on %d", self.name, block)
self._harvests.append(block)
@wait_or_exit_after
def load_harvests(self):
if not self._thread._started.is_set():
self._thread.start()
@property
def harvests(self) -> List[int]:
self.load_harvests()
return self._harvests
def describe(self, block=None):
results = fetch_multicall(
*[[self.strategy, view] for view in self._views],
[self.vault.vault, "strategies", self.strategy],
block=block,
)
info = dict(zip(self._views, results))
info.update(results[-1].dict())
for view in STRATEGY_VIEWS_SCALED:
if view in info:
info[view] = (info[view] or 0) / self.vault.scale
# unwrap structs
for view in info:
if hasattr(info[view], '_dict'):
info[view] = info[view].dict()
return info
| 31.447154 | 114 | 0.608325 |
d5d17e7cc717db37ce886a73a35284c7bafc8640 | 608 | py | Python | sample_data/__init__.py | Mrhsk/cpsc2021 | dcb7fb23edf7df79549279d053e4a8cadab3b268 | [
"MIT"
] | 1 | 2021-12-21T11:59:57.000Z | 2021-12-21T11:59:57.000Z | sample_data/__init__.py | Mrhsk/cpsc2021 | dcb7fb23edf7df79549279d053e4a8cadab3b268 | [
"MIT"
] | null | null | null | sample_data/__init__.py | Mrhsk/cpsc2021 | dcb7fb23edf7df79549279d053e4a8cadab3b268 | [
"MIT"
] | null | null | null | """
"""
import os, zipfile, glob
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
_WORK_DIR = os.path.join(_BASE_DIR, "working_dir")
_SAMPLE_DATA_DIR = os.path.join(_WORK_DIR, "sample_data")
__all__ = ["extract_sample_data_if_needed",]
def extract_sample_data_if_needed():
"""
"""
if os.path.exists(_SAMPLE_DATA_DIR) and len(glob.glob(os.path.join(_SAMPLE_DATA_DIR, "*.dat"))) > 0:
return
os.makedirs(_SAMPLE_DATA_DIR, exist_ok=True)
zf = zipfile.ZipFile(os.path.join(_SAMPLE_DATA_DIR, "sample_data.zip"))
zf.extractall(_WORK_DIR)
zf.close()
| 25.333333 | 104 | 0.707237 |
ed9e83131c1f6e49401f4e791faa1bf1631e4ba4 | 5,849 | py | Python | ibis/backends/pandas/udf.py | rtpsw/ibis | d7318fdf87121cd8fadbcf0369a2b217aab3053a | [
"Apache-2.0"
] | 1 | 2022-03-22T10:39:37.000Z | 2022-03-22T10:39:37.000Z | ibis/backends/pandas/udf.py | marlenezw/ibis | 14b9baf3e1021e8698e7f0ae3c0ae5747543431c | [
"Apache-2.0"
] | null | null | null | ibis/backends/pandas/udf.py | marlenezw/ibis | 14b9baf3e1021e8698e7f0ae3c0ae5747543431c | [
"Apache-2.0"
] | null | null | null | """APIs for creating user-defined element-wise, reduction and analytic
functions.
"""
import itertools
from typing import Tuple
import pandas as pd
from pandas.core.groupby import SeriesGroupBy
import ibis.expr.operations as ops
import ibis.udf.vectorized
from ibis.backends.base import BaseBackend
from ibis.backends.pandas.aggcontext import Transform
from ibis.backends.pandas.dispatch import execute_node, pre_execute
def create_gens_from_args_groupby(args: Tuple[SeriesGroupBy]):
"""Create generators for each args for groupby udaf.
Returns a generator that outputs each group.
Parameters
----------
args : Tuple[SeriesGroupBy...]
Returns
-------
Tuple[Generator]
"""
iters = ((data for _, data in arg) for arg in args)
return iters
class udf:
@staticmethod
def elementwise(input_type, output_type):
"""Alias for ibis.udf.vectorized.elementwise."""
return ibis.udf.vectorized.elementwise(input_type, output_type)
@staticmethod
def reduction(input_type, output_type):
"""Alias for ibis.udf.vectorized.reduction."""
return ibis.udf.vectorized.reduction(input_type, output_type)
@staticmethod
def analytic(input_type, output_type):
"""Alias for ibis.udf.vectorized.analytic."""
return ibis.udf.vectorized.analytic(input_type, output_type)
@pre_execute.register(ops.ElementWiseVectorizedUDF)
@pre_execute.register(ops.ElementWiseVectorizedUDF, BaseBackend)
def pre_execute_elementwise_udf(op, *clients, scope=None, **kwargs):
"""Register execution rules for elementwise UDFs."""
input_type = op.input_type
# definitions
# Define an execution rule for elementwise operations on a
# grouped Series
nargs = len(input_type)
@execute_node.register(
ops.ElementWiseVectorizedUDF, *(itertools.repeat(SeriesGroupBy, nargs))
)
def execute_udf_node_groupby(op, *args, **kwargs):
func = op.func
groupers = [
grouper
for grouper in (getattr(arg, 'grouper', None) for arg in args)
if grouper is not None
]
# all grouping keys must be identical
assert all(groupers[0] == grouper for grouper in groupers[1:])
# we're performing a scalar operation on grouped column, so
# perform the operation directly on the underlying Series
# and regroup after it's finished
args = [getattr(arg, 'obj', arg) for arg in args]
groupings = groupers[0].groupings
return func(*args).groupby(groupings)
# Define an execution rule for a simple elementwise Series
# function
@execute_node.register(
ops.ElementWiseVectorizedUDF, *(itertools.repeat(pd.Series, nargs))
)
@execute_node.register(
ops.ElementWiseVectorizedUDF, *(itertools.repeat(object, nargs))
)
def execute_udf_node(op, *args, **kwargs):
# We have rewritten op.func to be a closure enclosing
# the kwargs, and therefore, we do not need to pass
# kwargs here. This is true for all udf execution in this
# file.
# See ibis.udf.vectorized.UserDefinedFunction
return op.func(*args)
return scope
@pre_execute.register(ops.AnalyticVectorizedUDF)
@pre_execute.register(ops.AnalyticVectorizedUDF, BaseBackend)
@pre_execute.register(ops.ReductionVectorizedUDF)
@pre_execute.register(ops.ReductionVectorizedUDF, BaseBackend)
def pre_execute_analytic_and_reduction_udf(op, *clients, scope=None, **kwargs):
input_type = op.input_type
nargs = len(input_type)
# An execution rule to handle analytic and reduction UDFs over
# 1) an ungrouped window,
# 2) an ungrouped Aggregate node, or
# 3) an ungrouped custom aggregation context
@execute_node.register(type(op), *(itertools.repeat(pd.Series, nargs)))
def execute_udaf_node_no_groupby(op, *args, aggcontext, **kwargs):
func = op.func
return aggcontext.agg(args[0], func, *args[1:])
# An execution rule to handle analytic and reduction UDFs over
# 1) a grouped window,
# 2) a grouped Aggregate node, or
# 3) a grouped custom aggregation context
@execute_node.register(type(op), *(itertools.repeat(SeriesGroupBy, nargs)))
def execute_udaf_node_groupby(op, *args, aggcontext, **kwargs):
func = op.func
if isinstance(aggcontext, Transform):
# We are aggregating over an unbounded (and GROUPED) window,
# which uses a Transform aggregation context.
# We need to do some pre-processing to func and args so that
# Transform can pull data out of the SeriesGroupBys in args.
# Construct a generator that yields the next group of data
# for every argument excluding the first (pandas performs
# the iteration for the first argument) for each argument
# that is a SeriesGroupBy.
iters = create_gens_from_args_groupby(args[1:])
# TODO: Unify calling convension here to be more like
# window
def aggregator(first, *rest):
# map(next, *rest) gets the inputs for the next group
# TODO: might be inefficient to do this on every call
return func(first, *map(next, rest))
return aggcontext.agg(args[0], aggregator, *iters)
else:
# We are either:
# 1) Aggregating over a bounded window, which uses a Window
# aggregation context
# 2) Aggregating over a custom aggregation context
# 3) Aggregating using an Aggregate node (with GROUPING), which
# uses a Summarize aggregation context
# No pre-processing to be done for any case.
return aggcontext.agg(args[0], func, *args[1:])
return scope
| 36.329193 | 79 | 0.674816 |
a0c592924d0bd4a7f34f9bb378e442524c16cf2a | 85,919 | py | Python | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/account/models/account_move.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | 1 | 2019-12-19T01:53:13.000Z | 2019-12-19T01:53:13.000Z | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/account/models/account_move.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/account/models/account_move.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import time
from collections import OrderedDict
from odoo import api, fields, models, _
from odoo.osv import expression
from odoo.exceptions import RedirectWarning, UserError, ValidationError
from odoo.tools.misc import formatLang
from odoo.tools import float_is_zero, float_compare
from odoo.tools.safe_eval import safe_eval
import odoo.addons.decimal_precision as dp
from lxml import etree
#----------------------------------------------------------
# Entries
#----------------------------------------------------------
class AccountMove(models.Model):
_name = "account.move"
_description = "Account Entry"
_order = 'date desc, id desc'
@api.multi
@api.depends('name', 'state')
def name_get(self):
result = []
for move in self:
if move.state == 'draft':
name = '* ' + str(move.id)
else:
name = move.name
result.append((move.id, name))
return result
@api.multi
@api.depends('line_ids.debit', 'line_ids.credit')
def _amount_compute(self):
for move in self:
total = 0.0
for line in move.line_ids:
total += line.debit
move.amount = total
@api.depends('line_ids.debit', 'line_ids.credit', 'line_ids.matched_debit_ids.amount', 'line_ids.matched_credit_ids.amount', 'line_ids.account_id.user_type_id.type')
def _compute_matched_percentage(self):
"""Compute the percentage to apply for cash basis method. This value is relevant only for moves that
involve journal items on receivable or payable accounts.
"""
for move in self:
total_amount = 0.0
total_reconciled = 0.0
for line in move.line_ids:
if line.account_id.user_type_id.type in ('receivable', 'payable'):
amount = abs(line.debit - line.credit)
total_amount += amount
for partial_line in (line.matched_debit_ids + line.matched_credit_ids):
total_reconciled += partial_line.amount
if float_is_zero(total_amount, precision_rounding=move.currency_id.rounding):
move.matched_percentage = 1.0
else:
move.matched_percentage = total_reconciled / total_amount
@api.one
@api.depends('company_id')
def _compute_currency(self):
self.currency_id = self.company_id.currency_id or self.env.user.company_id.currency_id
@api.multi
def _get_default_journal(self):
if self.env.context.get('default_journal_type'):
return self.env['account.journal'].search([('type', '=', self.env.context['default_journal_type'])], limit=1).id
@api.multi
@api.depends('line_ids.partner_id')
def _compute_partner_id(self):
for move in self:
partner = move.line_ids.mapped('partner_id')
move.partner_id = partner.id if len(partner) == 1 else False
name = fields.Char(string='Number', required=True, copy=False, default='/')
ref = fields.Char(string='Reference', copy=False)
date = fields.Date(required=True, states={'posted': [('readonly', True)]}, index=True, default=fields.Date.context_today)
journal_id = fields.Many2one('account.journal', string='Journal', required=True, states={'posted': [('readonly', True)]}, default=_get_default_journal)
currency_id = fields.Many2one('res.currency', compute='_compute_currency', store=True, string="Currency")
state = fields.Selection([('draft', 'Unposted'), ('posted', 'Posted')], string='Status',
required=True, readonly=True, copy=False, default='draft',
help='All manually created new journal entries are usually in the status \'Unposted\', '
'but you can set the option to skip that status on the related journal. '
'In that case, they will behave as journal entries automatically created by the '
'system on document validation (invoices, bank statements...) and will be created '
'in \'Posted\' status.')
line_ids = fields.One2many('account.move.line', 'move_id', string='Journal Items',
states={'posted': [('readonly', True)]}, copy=True)
partner_id = fields.Many2one('res.partner', compute='_compute_partner_id', string="Partner", store=True, readonly=True)
amount = fields.Monetary(compute='_amount_compute', store=True)
narration = fields.Text(string='Internal Note')
company_id = fields.Many2one('res.company', related='journal_id.company_id', string='Company', store=True, readonly=True,
default=lambda self: self.env.user.company_id)
matched_percentage = fields.Float('Percentage Matched', compute='_compute_matched_percentage', digits=0, store=True, readonly=True, help="Technical field used in cash basis method")
statement_line_id = fields.Many2one('account.bank.statement.line', index=True, string='Bank statement line reconciled with this entry', copy=False, readonly=True)
# Dummy Account field to search on account.move by account_id
dummy_account_id = fields.Many2one('account.account', related='line_ids.account_id', string='Account', store=False)
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(AccountMove, self).fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if self._context.get('vat_domain'):
res['fields']['line_ids']['views']['tree']['fields']['tax_line_id']['domain'] = [('tag_ids', 'in', [self.env.ref(self._context.get('vat_domain')).id])]
return res
@api.model
def create(self, vals):
move = super(AccountMove, self.with_context(check_move_validity=False, partner_id=vals.get('partner_id'))).create(vals)
move.assert_balanced()
return move
@api.multi
def write(self, vals):
if 'line_ids' in vals:
res = super(AccountMove, self.with_context(check_move_validity=False)).write(vals)
self.assert_balanced()
else:
res = super(AccountMove, self).write(vals)
return res
@api.multi
def post(self):
invoice = self._context.get('invoice', False)
self._post_validate()
for move in self:
move.line_ids.create_analytic_lines()
if move.name == '/':
new_name = False
journal = move.journal_id
if invoice and invoice.move_name and invoice.move_name != '/':
new_name = invoice.move_name
else:
if journal.sequence_id:
# If invoice is actually refund and journal has a refund_sequence then use that one or use the regular one
sequence = journal.sequence_id
if invoice and invoice.type in ['out_refund', 'in_refund'] and journal.refund_sequence:
if not journal.refund_sequence_id:
raise UserError(_('Please define a sequence for the refunds'))
sequence = journal.refund_sequence_id
new_name = sequence.with_context(ir_sequence_date=move.date).next_by_id()
else:
raise UserError(_('Please define a sequence on the journal.'))
if new_name:
move.name = new_name
return self.write({'state': 'posted'})
@api.multi
def button_cancel(self):
for move in self:
if not move.journal_id.update_posted:
raise UserError(_('You cannot modify a posted entry of this journal.\nFirst you should set the journal to allow cancelling entries.'))
if self.ids:
self._check_lock_date()
self._cr.execute('UPDATE account_move '\
'SET state=%s '\
'WHERE id IN %s', ('draft', tuple(self.ids),))
self.invalidate_cache()
self._check_lock_date()
return True
@api.multi
def unlink(self):
for move in self:
#check the lock date + check if some entries are reconciled
move.line_ids._update_check()
move.line_ids.unlink()
return super(AccountMove, self).unlink()
@api.multi
def _post_validate(self):
for move in self:
if move.line_ids:
if not all([x.company_id.id == move.company_id.id for x in move.line_ids]):
raise UserError(_("Cannot create moves for different companies."))
self.assert_balanced()
return self._check_lock_date()
@api.multi
def _check_lock_date(self):
for move in self:
lock_date = max(move.company_id.period_lock_date, move.company_id.fiscalyear_lock_date)
if self.user_has_groups('account.group_account_manager'):
lock_date = move.company_id.fiscalyear_lock_date
if move.date <= lock_date:
if self.user_has_groups('account.group_account_manager'):
message = _("You cannot add/modify entries prior to and inclusive of the lock date %s") % (lock_date)
else:
message = _("You cannot add/modify entries prior to and inclusive of the lock date %s. Check the company settings or ask someone with the 'Adviser' role") % (lock_date)
raise UserError(message)
return True
@api.multi
def assert_balanced(self):
if not self.ids:
return True
prec = self.env['decimal.precision'].precision_get('Account')
self._cr.execute("""\
SELECT move_id
FROM account_move_line
WHERE move_id in %s
GROUP BY move_id
HAVING abs(sum(debit) - sum(credit)) > %s
""", (tuple(self.ids), 10 ** (-max(5, prec))))
if len(self._cr.fetchall()) != 0:
raise UserError(_("Cannot create unbalanced journal entry."))
return True
@api.multi
def _reverse_move(self, date=None, journal_id=None):
self.ensure_one()
reversed_move = self.copy(default={
'date': date,
'journal_id': journal_id.id if journal_id else self.journal_id.id,
'ref': _('reversal of: ') + self.name})
for acm_line in reversed_move.line_ids.with_context(check_move_validity=False):
acm_line.write({
'debit': acm_line.credit,
'credit': acm_line.debit,
'amount_currency': -acm_line.amount_currency
})
return reversed_move
@api.multi
def reverse_moves(self, date=None, journal_id=None):
date = date or fields.Date.today()
reversed_moves = self.env['account.move']
for ac_move in self:
reversed_move = ac_move._reverse_move(date=date,
journal_id=journal_id)
reversed_moves |= reversed_move
if reversed_moves:
reversed_moves._post_validate()
reversed_moves.post()
return [x.id for x in reversed_moves]
return []
@api.multi
def open_reconcile_view(self):
return self.line_ids.open_reconcile_view()
class AccountMoveLine(models.Model):
_name = "account.move.line"
_description = "Journal Item"
_order = "date desc, id desc"
@api.model_cr
def init(self):
""" change index on partner_id to a multi-column index on (partner_id, ref), the new index will behave in the
same way when we search on partner_id, with the addition of being optimal when having a query that will
search on partner_id and ref at the same time (which is the case when we open the bank reconciliation widget)
"""
cr = self._cr
cr.execute('DROP INDEX IF EXISTS account_move_line_partner_id_index')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('account_move_line_partner_id_ref_idx',))
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_partner_id_ref_idx ON account_move_line (partner_id, ref)')
@api.depends('debit', 'credit', 'amount_currency', 'currency_id', 'matched_debit_ids', 'matched_credit_ids', 'matched_debit_ids.amount', 'matched_credit_ids.amount', 'account_id.currency_id', 'move_id.state')
def _amount_residual(self):
""" Computes the residual amount of a move line from a reconciliable account in the company currency and the line's currency.
This amount will be 0 for fully reconciled lines or lines from a non-reconciliable account, the original line amount
for unreconciled lines, and something in-between for partially reconciled lines.
"""
for line in self:
if not line.account_id.reconcile:
line.reconciled = False
line.amount_residual = 0
line.amount_residual_currency = 0
continue
#amounts in the partial reconcile table aren't signed, so we need to use abs()
amount = abs(line.debit - line.credit)
amount_residual_currency = abs(line.amount_currency) or 0.0
sign = 1 if (line.debit - line.credit) > 0 else -1
if not line.debit and not line.credit and line.amount_currency and line.currency_id:
#residual for exchange rate entries
sign = 1 if float_compare(line.amount_currency, 0, precision_rounding=line.currency_id.rounding) == 1 else -1
for partial_line in (line.matched_debit_ids + line.matched_credit_ids):
# If line is a credit (sign = -1) we:
# - subtract matched_debit_ids (partial_line.credit_move_id == line)
# - add matched_credit_ids (partial_line.credit_move_id != line)
# If line is a debit (sign = 1), do the opposite.
sign_partial_line = sign if partial_line.credit_move_id == line else (-1 * sign)
amount += sign_partial_line * partial_line.amount
#getting the date of the matched item to compute the amount_residual in currency
if line.currency_id:
if partial_line.currency_id and partial_line.currency_id == line.currency_id:
amount_residual_currency += sign_partial_line * partial_line.amount_currency
else:
if line.balance and line.amount_currency:
rate = line.amount_currency / line.balance
else:
date = partial_line.credit_move_id.date if partial_line.debit_move_id == line else partial_line.debit_move_id.date
rate = line.currency_id.with_context(date=date).rate
amount_residual_currency += sign_partial_line * line.currency_id.round(partial_line.amount * rate)
#computing the `reconciled` field. As we book exchange rate difference on each partial matching,
#we can only check the amount in company currency
reconciled = False
digits_rounding_precision = line.company_id.currency_id.rounding
if float_is_zero(amount, precision_rounding=digits_rounding_precision):
if line.currency_id and line.amount_currency:
if float_is_zero(amount_residual_currency, precision_rounding=line.currency_id.rounding):
reconciled = True
else:
reconciled = True
line.reconciled = reconciled
line.amount_residual = line.company_id.currency_id.round(amount * sign)
line.amount_residual_currency = line.currency_id and line.currency_id.round(amount_residual_currency * sign) or 0.0
@api.depends('debit', 'credit')
def _store_balance(self):
for line in self:
line.balance = line.debit - line.credit
@api.model
def _get_currency(self):
currency = False
context = self._context or {}
if context.get('default_journal_id', False):
currency = self.env['account.journal'].browse(context['default_journal_id']).currency_id
return currency
@api.depends('debit', 'credit', 'move_id.matched_percentage', 'move_id.journal_id')
def _compute_cash_basis(self):
for move_line in self:
if move_line.journal_id.type in ('sale', 'purchase'):
move_line.debit_cash_basis = move_line.debit * move_line.move_id.matched_percentage
move_line.credit_cash_basis = move_line.credit * move_line.move_id.matched_percentage
else:
move_line.debit_cash_basis = move_line.debit
move_line.credit_cash_basis = move_line.credit
move_line.balance_cash_basis = move_line.debit_cash_basis - move_line.credit_cash_basis
@api.one
@api.depends('move_id.line_ids')
def _get_counterpart(self):
counterpart = set()
for line in self.move_id.line_ids:
if (line.account_id.code != self.account_id.code):
counterpart.add(line.account_id.code)
if len(counterpart) > 2:
counterpart = list(counterpart)[0:2] + ["..."]
self.counterpart = ",".join(counterpart)
name = fields.Char(required=True, string="Label")
quantity = fields.Float(digits=dp.get_precision('Product Unit of Measure'),
help="The optional quantity expressed by this line, eg: number of product sold. The quantity is not a legal requirement but is very useful for some reports.")
product_uom_id = fields.Many2one('product.uom', string='Unit of Measure')
product_id = fields.Many2one('product.product', string='Product')
debit = fields.Monetary(default=0.0, currency_field='company_currency_id')
credit = fields.Monetary(default=0.0, currency_field='company_currency_id')
balance = fields.Monetary(compute='_store_balance', store=True, currency_field='company_currency_id',
help="Technical field holding the debit - credit in order to open meaningful graph views from reports")
debit_cash_basis = fields.Monetary(currency_field='company_currency_id', compute='_compute_cash_basis', store=True)
credit_cash_basis = fields.Monetary(currency_field='company_currency_id', compute='_compute_cash_basis', store=True)
balance_cash_basis = fields.Monetary(compute='_compute_cash_basis', store=True, currency_field='company_currency_id',
help="Technical field holding the debit_cash_basis - credit_cash_basis in order to open meaningful graph views from reports")
amount_currency = fields.Monetary(default=0.0, help="The amount expressed in an optional other currency if it is a multi-currency entry.")
company_currency_id = fields.Many2one('res.currency', related='company_id.currency_id', string="Company Currency", readonly=True,
help='Utility field to express amount currency', store=True)
currency_id = fields.Many2one('res.currency', string='Currency', default=_get_currency,
help="The optional other currency if it is a multi-currency entry.")
amount_residual = fields.Monetary(compute='_amount_residual', string='Residual Amount', store=True, currency_field='company_currency_id',
help="The residual amount on a journal item expressed in the company currency.")
amount_residual_currency = fields.Monetary(compute='_amount_residual', string='Residual Amount in Currency', store=True,
help="The residual amount on a journal item expressed in its currency (possibly not the company currency).")
account_id = fields.Many2one('account.account', string='Account', required=True, index=True,
ondelete="cascade", domain=[('deprecated', '=', False)], default=lambda self: self._context.get('account_id', False))
move_id = fields.Many2one('account.move', string='Journal Entry', ondelete="cascade",
help="The move of this entry line.", index=True, required=True, auto_join=True)
narration = fields.Text(related='move_id.narration', string='Narration')
ref = fields.Char(related='move_id.ref', string='Reference', store=True, copy=False, index=True)
payment_id = fields.Many2one('account.payment', string="Originator Payment", help="Payment that created this entry")
statement_id = fields.Many2one('account.bank.statement', string='Statement',
help="The bank statement used for bank reconciliation", index=True, copy=False)
reconciled = fields.Boolean(compute='_amount_residual', store=True)
full_reconcile_id = fields.Many2one('account.full.reconcile', string="Matching Number", copy=False)
matched_debit_ids = fields.One2many('account.partial.reconcile', 'credit_move_id', String='Matched Debits',
help='Debit journal items that are matched with this journal item.')
matched_credit_ids = fields.One2many('account.partial.reconcile', 'debit_move_id', String='Matched Credits',
help='Credit journal items that are matched with this journal item.')
journal_id = fields.Many2one('account.journal', related='move_id.journal_id', string='Journal',
index=True, store=True, copy=False) # related is required
blocked = fields.Boolean(string='No Follow-up', default=False,
help="You can check this box to mark this journal item as a litigation with the associated partner")
date_maturity = fields.Date(string='Due date', index=True, required=True,
help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line.")
date = fields.Date(related='move_id.date', string='Date', index=True, store=True, copy=False) # related is required
analytic_line_ids = fields.One2many('account.analytic.line', 'move_id', string='Analytic lines', oldname="analytic_lines")
tax_ids = fields.Many2many('account.tax', string='Taxes')
tax_line_id = fields.Many2one('account.tax', string='Originator tax', ondelete='restrict')
analytic_account_id = fields.Many2one('account.analytic.account', string='Analytic Account')
analytic_tag_ids = fields.Many2many('account.analytic.tag', string='Analytic tags')
company_id = fields.Many2one('res.company', related='account_id.company_id', string='Company', store=True)
counterpart = fields.Char("Counterpart", compute='_get_counterpart', help="Compute the counter part accounts of this journal item for this journal entry. This can be needed in reports.")
# TODO: put the invoice link and partner_id on the account_move
invoice_id = fields.Many2one('account.invoice', oldname="invoice")
partner_id = fields.Many2one('res.partner', string='Partner', ondelete='restrict')
user_type_id = fields.Many2one('account.account.type', related='account_id.user_type_id', index=True, store=True, oldname="user_type")
tax_exigible = fields.Boolean(string='Appears in VAT report', default=True,
help="Technical field used to mark a tax line as exigible in the vat report or not (only exigible journal items are displayed). By default all new journal items are directly exigible, but with the module account_tax_cash_basis, some will become exigible only when the payment is recorded.")
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in accounting entry !'),
]
@api.multi
@api.constrains('currency_id', 'account_id')
def _check_currency(self):
for line in self:
if line.account_id.currency_id:
if not line.currency_id or line.currency_id.id != line.account_id.currency_id.id:
raise ValidationError(_('The selected account of your Journal Entry forces to provide a secondary currency. You should remove the secondary currency on the account.'))
@api.multi
@api.constrains('currency_id', 'amount_currency')
def _check_currency_and_amount(self):
for line in self:
if (line.amount_currency and not line.currency_id):
raise ValidationError(_("You cannot create journal items with a secondary currency without filling both 'currency' and 'amount currency' field."))
@api.multi
@api.constrains('amount_currency')
def _check_currency_amount(self):
for line in self:
if line.amount_currency:
if (line.amount_currency > 0.0 and line.credit > 0.0) or (line.amount_currency < 0.0 and line.debit > 0.0):
raise ValidationError(_('The amount expressed in the secondary currency must be positive when account is debited and negative when account is credited.'))
####################################################
# Reconciliation interface methods
####################################################
@api.model
def get_data_for_manual_reconciliation_widget(self, partner_ids, account_ids):
""" Returns the data required for the invoices & payments matching of partners/accounts.
If an argument is None, fetch all related reconciliations. Use [] to fetch nothing.
"""
return {
'customers': self.get_data_for_manual_reconciliation('partner', partner_ids, 'receivable'),
'suppliers': self.get_data_for_manual_reconciliation('partner', partner_ids, 'payable'),
'accounts': self.get_data_for_manual_reconciliation('account', account_ids),
}
@api.model
def get_data_for_manual_reconciliation(self, res_type, res_ids=None, account_type=None):
""" Returns the data required for the invoices & payments matching of partners/accounts (list of dicts).
If no res_ids is passed, returns data for all partners/accounts that can be reconciled.
:param res_type: either 'partner' or 'account'
:param res_ids: ids of the partners/accounts to reconcile, use None to fetch data indiscriminately
of the id, use [] to prevent from fetching any data at all.
:param account_type: if a partner is both customer and vendor, you can use 'payable' to reconcile
the vendor-related journal entries and 'receivable' for the customer-related entries.
"""
if res_ids is not None and len(res_ids) == 0:
# Note : this short-circuiting is better for performances, but also required
# since postgresql doesn't implement empty list (so 'AND id in ()' is useless)
return []
res_ids = res_ids and tuple(res_ids)
assert res_type in ('partner', 'account')
assert account_type in ('payable', 'receivable', None)
is_partner = res_type == 'partner'
res_alias = is_partner and 'p' or 'a'
query = ("""
SELECT {0} account_id, account_name, account_code, max_date,
to_char(last_time_entries_checked, 'YYYY-MM-DD') AS last_time_entries_checked
FROM (
SELECT {1}
{res_alias}.last_time_entries_checked AS last_time_entries_checked,
a.id AS account_id,
a.name AS account_name,
a.code AS account_code,
MAX(l.write_date) AS max_date
FROM
account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN account_account_type at ON (at.id = a.user_type_id)
{2}
WHERE
a.reconcile IS TRUE
{3}
{4}
{5}
AND l.company_id = {6}
AND EXISTS (
SELECT NULL
FROM account_move_line l
WHERE l.account_id = a.id
{7}
AND l.amount_residual > 0
)
AND EXISTS (
SELECT NULL
FROM account_move_line l
WHERE l.account_id = a.id
{7}
AND l.amount_residual < 0
)
GROUP BY {8} a.id, a.name, a.code, {res_alias}.last_time_entries_checked
ORDER BY {res_alias}.last_time_entries_checked
) as s
WHERE (last_time_entries_checked IS NULL OR max_date > last_time_entries_checked)
""".format(
is_partner and 'partner_id, partner_name,' or ' ',
is_partner and 'p.id AS partner_id, p.name AS partner_name,' or ' ',
is_partner and 'RIGHT JOIN res_partner p ON (l.partner_id = p.id)' or ' ',
is_partner and ' ' or "AND at.type <> 'payable' AND at.type <> 'receivable'",
account_type and "AND at.type = %(account_type)s" or '',
res_ids and 'AND ' + res_alias + '.id in %(res_ids)s' or '',
self.env.user.company_id.id,
is_partner and 'AND l.partner_id = p.id' or ' ',
is_partner and 'l.partner_id, p.id,' or ' ',
res_alias=res_alias
))
self.env.cr.execute(query, locals())
# Apply ir_rules by filtering out
rows = self.env.cr.dictfetchall()
ids = [x['account_id'] for x in rows]
allowed_ids = set(self.env['account.account'].browse(ids).ids)
rows = [row for row in rows if row['account_id'] in allowed_ids]
if is_partner:
ids = [x['partner_id'] for x in rows]
allowed_ids = set(self.env['res.partner'].browse(ids).ids)
rows = [row for row in rows if row['partner_id'] in allowed_ids]
# Fetch other data
for row in rows:
account = self.env['account.account'].browse(row['account_id'])
row['currency_id'] = account.currency_id.id or account.company_id.currency_id.id
partner_id = is_partner and row['partner_id'] or None
row['reconciliation_proposition'] = self.get_reconciliation_proposition(account.id, partner_id)
return rows
@api.model
def get_reconciliation_proposition(self, account_id, partner_id=False):
""" Returns two lines whose amount are opposite """
# Get pairs
partner_id_condition = partner_id and 'AND a.partner_id = %(partner_id)s AND b.partner_id = %(partner_id)s' or ''
query = """
SELECT a.id, b.id
FROM account_move_line a, account_move_line b
WHERE a.amount_residual = -b.amount_residual
AND NOT a.reconciled AND NOT b.reconciled
AND a.account_id = %(account_id)s AND b.account_id = %(account_id)s
{partner_id_condition}
ORDER BY a.date asc
LIMIT 10
""".format(**locals())
self.env.cr.execute(query, locals())
pairs = self.env.cr.fetchall()
# Apply ir_rules by filtering out
all_pair_ids = [element for tupl in pairs for element in tupl]
allowed_ids = set(self.env['account.move.line'].browse(all_pair_ids).ids)
pairs = [pair for pair in pairs if pair[0] in allowed_ids and pair[1] in allowed_ids]
# Return lines formatted
if len(pairs) > 0:
target_currency = (self.currency_id and self.amount_currency) and self.currency_id or self.company_id.currency_id
lines = self.browse(list(pairs[0]))
return lines.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency)
return []
@api.model
def domain_move_lines_for_reconciliation(self, excluded_ids=None, str=False):
""" Returns the domain which is common to both manual and bank statement reconciliation.
:param excluded_ids: list of ids of move lines that should not be fetched
:param str: search string
"""
context = (self._context or {})
if excluded_ids is None:
excluded_ids = []
domain = []
if excluded_ids:
domain = expression.AND([domain, [('id', 'not in', excluded_ids)]])
if str:
str_domain = [
'|', ('move_id.name', 'ilike', str),
'|', ('move_id.ref', 'ilike', str),
'|', ('date_maturity', 'like', str),
'&', ('name', '!=', '/'), ('name', 'ilike', str)
]
try:
amount = float(str)
amount_domain = [
'|', ('amount_residual', '=', amount),
'|', ('amount_residual_currency', '=', amount),
'|', ('amount_residual', '=', -amount),
'|', ('amount_residual_currency', '=', -amount),
'&', ('account_id.internal_type', '=', 'liquidity'),
'|', '|', ('debit', '=', amount), ('credit', '=', amount), ('amount_currency', '=', amount),
]
str_domain = expression.OR([str_domain, amount_domain])
except:
pass
# When building a domain for the bank statement reconciliation, if there's no partner
# and a search string, search also a match in the partner names
if 'bank_statement_line' in context and not context['bank_statement_line'].partner_id.id:
str_domain = expression.OR([str_domain, [('partner_id.name', 'ilike', str)]])
domain = expression.AND([domain, str_domain])
return domain
def _domain_move_lines_for_manual_reconciliation(self, account_id, partner_id=False, excluded_ids=None, str=False):
""" Create domain criteria that are relevant to manual reconciliation. """
domain = ['&', ('reconciled', '=', False), ('account_id', '=', account_id)]
if partner_id:
domain = expression.AND([domain, [('partner_id', '=', partner_id)]])
generic_domain = self.domain_move_lines_for_reconciliation(excluded_ids=excluded_ids, str=str)
return expression.AND([generic_domain, domain])
@api.model
def get_move_lines_for_manual_reconciliation(self, account_id, partner_id=False, excluded_ids=None, str=False, offset=0, limit=None, target_currency_id=False):
""" Returns unreconciled move lines for an account or a partner+account, formatted for the manual reconciliation widget """
domain = self._domain_move_lines_for_manual_reconciliation(account_id, partner_id, excluded_ids, str)
lines = self.search(domain, offset=offset, limit=limit, order="date_maturity asc, id asc")
if target_currency_id:
target_currency = self.env['res.currency'].browse(target_currency_id)
else:
account = self.env['account.account'].browse(account_id)
target_currency = account.currency_id or account.company_id.currency_id
return lines.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency)
@api.multi
def prepare_move_lines_for_reconciliation_widget(self, target_currency=False, target_date=False):
""" Returns move lines formatted for the manual/bank reconciliation widget
:param target_currency: currency (browse_record or ID) you want the move line debit/credit converted into
:param target_date: date to use for the monetary conversion
"""
context = dict(self._context or {})
ret = []
if target_currency:
# re-browse in case we were passed a currency ID via RPC call
target_currency = self.env['res.currency'].browse(int(target_currency))
for line in self:
company_currency = line.account_id.company_id.currency_id
ret_line = {
'id': line.id,
'name': line.name != '/' and line.move_id.name + ': ' + line.name or line.move_id.name,
'ref': line.move_id.ref or '',
# For reconciliation between statement transactions and already registered payments (eg. checks)
# NB : we don't use the 'reconciled' field because the line we're selecting is not the one that gets reconciled
'already_paid': line.account_id.internal_type == 'liquidity',
'account_code': line.account_id.code,
'account_name': line.account_id.name,
'account_type': line.account_id.internal_type,
'date_maturity': line.date_maturity,
'date': line.date,
'journal_name': line.journal_id.name,
'partner_id': line.partner_id.id,
'partner_name': line.partner_id.name,
'currency_id': (line.currency_id and line.amount_currency) and line.currency_id.id or False,
}
debit = line.debit
credit = line.credit
amount = line.amount_residual
amount_currency = line.amount_residual_currency
# For already reconciled lines, don't use amount_residual(_currency)
if line.account_id.internal_type == 'liquidity':
amount = abs(debit - credit)
amount_currency = abs(line.amount_currency)
# Get right debit / credit:
target_currency = target_currency or company_currency
line_currency = (line.currency_id and line.amount_currency) and line.currency_id or company_currency
amount_currency_str = ""
total_amount_currency_str = ""
if line_currency != company_currency and target_currency == line_currency:
# The payment currency is the invoice currency, but they are different than the company currency
# We use the `amount_currency` computed during the invoice validation, at the invoice date
# to avoid exchange gain/loss
# e.g. an invoice of 100€ must be paid with 100€, whatever the company currency and the exchange rates
total_amount = line.amount_currency
actual_debit = debit > 0 and amount_currency or 0.0
actual_credit = credit > 0 and -amount_currency or 0.0
currency = line_currency
else:
# Either:
# - the invoice, payment, company currencies are all the same,
# - the payment currency is the company currency, but the invoice currency is different,
# - the invoice currency is the company currency, but the payment currency is different,
# - the invoice, payment and company currencies are all different.
# For the two first cases, we can simply use the debit/credit of the invoice move line, which are always in the company currency,
# and this is what the target need.
# For the two last cases, we can use the debit/credit which are in the company currency, and then change them to the target currency
total_amount = abs(debit - credit)
actual_debit = debit > 0 and amount or 0.0
actual_credit = credit > 0 and -amount or 0.0
currency = company_currency
if line_currency != target_currency:
amount_currency_str = formatLang(self.env, abs(actual_debit or actual_credit), currency_obj=line_currency)
total_amount_currency_str = formatLang(self.env, total_amount, currency_obj=line_currency)
if currency != target_currency:
ctx = context.copy()
ctx.update({'date': target_date or line.date})
total_amount = currency.with_context(ctx).compute(total_amount, target_currency)
actual_debit = currency.with_context(ctx).compute(actual_debit, target_currency)
actual_credit = currency.with_context(ctx).compute(actual_credit, target_currency)
amount_str = formatLang(self.env, abs(actual_debit or actual_credit), currency_obj=target_currency)
total_amount_str = formatLang(self.env, total_amount, currency_obj=target_currency)
ret_line['debit'] = abs(actual_debit)
ret_line['credit'] = abs(actual_credit)
ret_line['amount_str'] = amount_str
ret_line['total_amount_str'] = total_amount_str
ret_line['amount_currency_str'] = amount_currency_str
ret_line['total_amount_currency_str'] = total_amount_currency_str
ret.append(ret_line)
return ret
@api.model
def process_reconciliations(self, data):
""" Used to validate a batch of reconciliations in a single call
:param data: list of dicts containing:
- 'type': either 'partner' or 'account'
- 'id': id of the affected res.partner or account.account
- 'mv_line_ids': ids of exisiting account.move.line to reconcile
- 'new_mv_line_dicts': list of dicts containing values suitable for account_move_line.create()
"""
for datum in data:
if len(datum['mv_line_ids']) >= 1 or len(datum['mv_line_ids']) + len(datum['new_mv_line_dicts']) >= 2:
self.env['account.move.line'].browse(datum['mv_line_ids']).process_reconciliation(datum['new_mv_line_dicts'])
if datum['type'] == 'partner':
partners = self.env['res.partner'].browse(datum['id'])
partners.mark_as_reconciled()
if datum['type'] == 'account':
accounts = self.env['account.account'].browse(datum['id'])
accounts.mark_as_reconciled()
@api.multi
def process_reconciliation(self, new_mv_line_dicts):
""" Create new move lines from new_mv_line_dicts (if not empty) then call reconcile_partial on self and new move lines
:param new_mv_line_dicts: list of dicts containing values suitable fot account_move_line.create()
"""
if len(self) < 1 or len(self) + len(new_mv_line_dicts) < 2:
raise UserError(_('A reconciliation must involve at least 2 move lines.'))
# Create writeoff move lines
if len(new_mv_line_dicts) > 0:
writeoff_lines = self.env['account.move.line']
company_currency = self[0].account_id.company_id.currency_id
writeoff_currency = self[0].currency_id or company_currency
for mv_line_dict in new_mv_line_dicts:
if writeoff_currency != company_currency:
mv_line_dict['debit'] = writeoff_currency.compute(mv_line_dict['debit'], company_currency)
mv_line_dict['credit'] = writeoff_currency.compute(mv_line_dict['credit'], company_currency)
writeoff_lines += self._create_writeoff(mv_line_dict)
(self + writeoff_lines).reconcile()
else:
self.reconcile()
####################################################
# Reconciliation methods
####################################################
def _get_pair_to_reconcile(self):
#field is either 'amount_residual' or 'amount_residual_currency' (if the reconciled account has a secondary currency set)
field = self[0].account_id.currency_id and 'amount_residual_currency' or 'amount_residual'
rounding = self[0].company_id.currency_id.rounding
if self[0].currency_id and all([x.amount_currency and x.currency_id == self[0].currency_id for x in self]):
#or if all lines share the same currency
field = 'amount_residual_currency'
rounding = self[0].currency_id.rounding
if self._context.get('skip_full_reconcile_check') == 'amount_currency_excluded':
field = 'amount_residual'
elif self._context.get('skip_full_reconcile_check') == 'amount_currency_only':
field = 'amount_residual_currency'
#target the pair of move in self that are the oldest
sorted_moves = sorted(self, key=lambda a: a.date)
debit = credit = False
for aml in sorted_moves:
if credit and debit:
break
if float_compare(aml[field], 0, precision_rounding=rounding) == 1 and not debit:
debit = aml
elif float_compare(aml[field], 0, precision_rounding=rounding) == -1 and not credit:
credit = aml
return debit, credit
def auto_reconcile_lines(self):
""" This function iterates recursively on the recordset given as parameter as long as it
can find a debit and a credit to reconcile together. It returns the recordset of the
account move lines that were not reconciled during the process.
"""
if not self.ids:
return self
sm_debit_move, sm_credit_move = self._get_pair_to_reconcile()
#there is no more pair to reconcile so return what move_line are left
if not sm_credit_move or not sm_debit_move:
return self
field = self[0].account_id.currency_id and 'amount_residual_currency' or 'amount_residual'
if not sm_debit_move.debit and not sm_debit_move.credit:
#both debit and credit field are 0, consider the amount_residual_currency field because it's an exchange difference entry
field = 'amount_residual_currency'
if self[0].currency_id and all([x.currency_id == self[0].currency_id for x in self]):
#all the lines have the same currency, so we consider the amount_residual_currency field
field = 'amount_residual_currency'
if self._context.get('skip_full_reconcile_check') == 'amount_currency_excluded':
field = 'amount_residual'
elif self._context.get('skip_full_reconcile_check') == 'amount_currency_only':
field = 'amount_residual_currency'
#Reconcile the pair together
amount_reconcile = min(sm_debit_move[field], -sm_credit_move[field])
#Remove from recordset the one(s) that will be totally reconciled
if amount_reconcile == sm_debit_move[field]:
self -= sm_debit_move
if amount_reconcile == -sm_credit_move[field]:
self -= sm_credit_move
#Check for the currency and amount_currency we can set
currency = False
amount_reconcile_currency = 0
if sm_debit_move.currency_id == sm_credit_move.currency_id and sm_debit_move.currency_id.id:
currency = sm_credit_move.currency_id.id
amount_reconcile_currency = min(sm_debit_move.amount_residual_currency, -sm_credit_move.amount_residual_currency)
amount_reconcile = min(sm_debit_move.amount_residual, -sm_credit_move.amount_residual)
if self._context.get('skip_full_reconcile_check') == 'amount_currency_excluded':
amount_reconcile_currency = 0.0
currency = self._context.get('manual_full_reconcile_currency')
elif self._context.get('skip_full_reconcile_check') == 'amount_currency_only':
currency = self._context.get('manual_full_reconcile_currency')
self.env['account.partial.reconcile'].create({
'debit_move_id': sm_debit_move.id,
'credit_move_id': sm_credit_move.id,
'amount': amount_reconcile,
'amount_currency': amount_reconcile_currency,
'currency_id': currency,
})
#Iterate process again on self
return self.auto_reconcile_lines()
@api.multi
def reconcile(self, writeoff_acc_id=False, writeoff_journal_id=False):
#Perform all checks on lines
company_ids = set()
all_accounts = []
partners = set()
for line in self:
company_ids.add(line.company_id.id)
all_accounts.append(line.account_id)
if (line.account_id.internal_type in ('receivable', 'payable')):
partners.add(line.partner_id.id)
if line.reconciled:
raise UserError(_('You are trying to reconcile some entries that are already reconciled!'))
if len(company_ids) > 1:
raise UserError(_('To reconcile the entries company should be the same for all entries!'))
if len(set(all_accounts)) > 1:
raise UserError(_('Entries are not of the same account!'))
if not all_accounts[0].reconcile:
raise UserError(_('The account %s (%s) is not marked as reconciliable !') % (all_accounts[0].name, all_accounts[0].code))
if len(partners) > 1:
raise UserError(_('The partner has to be the same on all lines for receivable and payable accounts!'))
#reconcile everything that can be
remaining_moves = self.auto_reconcile_lines()
#if writeoff_acc_id specified, then create write-off move with value the remaining amount from move in self
if writeoff_acc_id and writeoff_journal_id and remaining_moves:
all_aml_share_same_currency = all([x.currency_id == self[0].currency_id for x in self])
writeoff_vals = {
'account_id': writeoff_acc_id.id,
'journal_id': writeoff_journal_id.id
}
if not all_aml_share_same_currency:
writeoff_vals['amount_currency'] = False
writeoff_to_reconcile = remaining_moves._create_writeoff(writeoff_vals)
#add writeoff line to reconcile algo and finish the reconciliation
remaining_moves = (remaining_moves + writeoff_to_reconcile).auto_reconcile_lines()
return writeoff_to_reconcile
return True
def _create_writeoff(self, vals):
""" Create a writeoff move for the account.move.lines in self. If debit/credit is not specified in vals,
the writeoff amount will be computed as the sum of amount_residual of the given recordset.
:param vals: dict containing values suitable fot account_move_line.create(). The data in vals will
be processed to create bot writeoff acount.move.line and their enclosing account.move.
"""
# Check and complete vals
if 'account_id' not in vals or 'journal_id' not in vals:
raise UserError(_("It is mandatory to specify an account and a journal to create a write-off."))
if ('debit' in vals) ^ ('credit' in vals):
raise UserError(_("Either pass both debit and credit or none."))
if 'date' not in vals:
vals['date'] = self._context.get('date_p') or time.strftime('%Y-%m-%d')
if 'name' not in vals:
vals['name'] = self._context.get('comment') or _('Write-Off')
if 'analytic_account_id' not in vals:
vals['analytic_account_id'] = self.env.context.get('analytic_id', False)
#compute the writeoff amount if not given
if 'credit' not in vals and 'debit' not in vals:
amount = sum([r.amount_residual for r in self])
vals['credit'] = amount > 0 and amount or 0.0
vals['debit'] = amount < 0 and abs(amount) or 0.0
vals['partner_id'] = self.env['res.partner']._find_accounting_partner(self[0].partner_id).id
company_currency = self[0].account_id.company_id.currency_id
writeoff_currency = self[0].currency_id or company_currency
if not self._context.get('skip_full_reconcile_check') == 'amount_currency_excluded' and 'amount_currency' not in vals and writeoff_currency != company_currency:
vals['currency_id'] = writeoff_currency.id
sign = 1 if vals['debit'] > 0 else -1
vals['amount_currency'] = sign * abs(sum([r.amount_residual_currency for r in self]))
# Writeoff line in the account of self
first_line_dict = vals.copy()
first_line_dict['account_id'] = self[0].account_id.id
if 'analytic_account_id' in first_line_dict:
del first_line_dict['analytic_account_id']
if 'tax_ids' in first_line_dict:
tax_ids = []
#vals['tax_ids'] is a list of commands [[4, tax_id, None], ...]
for tax_id in vals['tax_ids']:
tax_ids.append(tax_id[1])
amount = first_line_dict['credit'] - first_line_dict['debit']
amount_tax = self.env['account.tax'].browse(tax_ids).compute_all(amount)['total_included']
first_line_dict['credit'] = amount_tax > 0 and amount_tax or 0.0
first_line_dict['debit'] = amount_tax < 0 and abs(amount_tax) or 0.0
del first_line_dict['tax_ids']
# Writeoff line in specified writeoff account
second_line_dict = vals.copy()
second_line_dict['debit'], second_line_dict['credit'] = second_line_dict['credit'], second_line_dict['debit']
if 'amount_currency' in vals:
second_line_dict['amount_currency'] = -second_line_dict['amount_currency']
# Create the move
writeoff_move = self.env['account.move'].with_context(apply_taxes=True).create({
'journal_id': vals['journal_id'],
'date': vals['date'],
'state': 'draft',
'line_ids': [(0, 0, first_line_dict), (0, 0, second_line_dict)],
})
writeoff_move.post()
# Return the writeoff move.line which is to be reconciled
return writeoff_move.line_ids.filtered(lambda r: r.account_id == self[0].account_id)
@api.model
def compute_full_after_batch_reconcile(self):
""" After running the manual reconciliation wizard and making full reconciliation, we need to run this method to create
potentially an exchange rate entry that will balance the remaining amount_residual_currency (possibly several aml).
This ensure that all aml in the full reconciliation are reconciled (amount_residual = amount_residual_currency = 0).
"""
total_debit = 0
total_credit = 0
total_amount_currency = 0
currency = False
aml_to_balance_currency = self.env['account.move.line']
partial_rec_set = self.env['account.partial.reconcile']
aml_id = False
partial_rec_id = False
maxdate = None
for aml in self:
total_debit += aml.debit
total_credit += aml.credit
if aml.amount_residual_currency:
aml_to_balance_currency |= aml
maxdate = max(aml.date, maxdate)
if not currency and aml.currency_id:
currency = aml.currency_id
if aml.currency_id and aml.currency_id == currency:
total_amount_currency += aml.amount_currency
partial_rec_set |= aml.matched_debit_ids | aml.matched_credit_ids
if currency and aml_to_balance_currency:
aml = aml_to_balance_currency[0]
#eventually create journal entries to book the difference due to foreign currency's exchange rate that fluctuates
partial_rec = aml.credit and aml.matched_debit_ids[0] or aml.matched_credit_ids[0]
aml_id, partial_rec_id = partial_rec.with_context(skip_full_reconcile_check=True).create_exchange_rate_entry(aml_to_balance_currency, 0.0, total_amount_currency, currency, maxdate)
self |= aml_id
partial_rec_set |= partial_rec_id
total_amount_currency += aml_id.amount_currency
partial_rec_ids = [x.id for x in list(partial_rec_set)]
#if the total debit and credit are equal, and the total amount in currency is 0, the reconciliation is full
digits_rounding_precision = self[0].company_id.currency_id.rounding
if float_compare(total_debit, total_credit, precision_rounding=digits_rounding_precision) == 0 \
and (not currency or float_is_zero(total_amount_currency, precision_rounding=currency.rounding)):
#in that case, mark the reference on the partial reconciliations and the entries
self.env['account.full.reconcile'].with_context(check_move_validity=False).create({
'partial_reconcile_ids': [(6, 0, partial_rec_ids)],
'reconciled_line_ids': [(6, 0, self.ids)],
'exchange_move_id': aml_id.move_id.id if aml_id else False,
'exchange_partial_rec_id': partial_rec_id.id if partial_rec_id else False})
@api.multi
def remove_move_reconcile(self):
""" Undo a reconciliation """
if not self:
return True
rec_move_ids = self.env['account.partial.reconcile']
for account_move_line in self:
for invoice in account_move_line.payment_id.invoice_ids:
if invoice.id == self.env.context.get('invoice_id') and account_move_line in invoice.payment_move_line_ids:
account_move_line.payment_id.write({'invoice_ids': [(3, invoice.id, None)]})
rec_move_ids += account_move_line.matched_debit_ids
rec_move_ids += account_move_line.matched_credit_ids
return rec_move_ids.unlink()
####################################################
# CRUD methods
####################################################
#TODO: to check/refactor
@api.model
def create(self, vals):
""" :context's key apply_taxes: set to True if you want vals['tax_ids'] to result in the creation of move lines for taxes and eventual
adjustment of the line amount (in case of a tax included in price).
:context's key `check_move_validity`: check data consistency after move line creation. Eg. set to false to disable verification that the move
debit-credit == 0 while creating the move lines composing the move.
"""
context = dict(self._context or {})
amount = vals.get('debit', 0.0) - vals.get('credit', 0.0)
if not vals.get('partner_id') and context.get('partner_id'):
vals['partner_id'] = context.get('partner_id')
move = self.env['account.move'].browse(vals['move_id'])
account = self.env['account.account'].browse(vals['account_id'])
if account.deprecated:
raise UserError(_('You cannot use deprecated account.'))
if 'journal_id' in vals and vals['journal_id']:
context['journal_id'] = vals['journal_id']
if 'date' in vals and vals['date']:
context['date'] = vals['date']
if 'journal_id' not in context:
context['journal_id'] = move.journal_id.id
context['date'] = move.date
#we need to treat the case where a value is given in the context for period_id as a string
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
if 'date' not in context:
context['date'] = fields.Date.context_today(self)
journal = vals.get('journal_id') and self.env['account.journal'].browse(vals['journal_id']) or move.journal_id
vals['date_maturity'] = vals.get('date_maturity') or vals.get('date') or move.date
ok = not (journal.type_control_ids or journal.account_control_ids)
if journal.type_control_ids:
type = account.user_type_id
for t in journal.type_control_ids:
if type == t:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id == vals['account_id']:
ok = True
break
# Automatically convert in the account's secondary currency if there is one and
# the provided values were not already multi-currency
if account.currency_id and 'amount_currency' not in vals and account.currency_id.id != account.company_id.currency_id.id:
vals['currency_id'] = account.currency_id.id
if self._context.get('skip_full_reconcile_check') == 'amount_currency_excluded':
vals['amount_currency'] = 0.0
else:
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
vals['amount_currency'] = account.company_id.currency_id.with_context(ctx).compute(amount, account.currency_id)
if not ok:
raise UserError(_('You cannot use this general account in this journal, check the tab \'Entry Controls\' on the related journal.'))
# Create tax lines
tax_lines_vals = []
if context.get('apply_taxes') and vals.get('tax_ids'):
# Get ids from triplets : https://www.odoo.com/documentation/master/reference/orm.html#openerp.models.Model.write
tax_ids = [tax['id'] for tax in self.resolve_2many_commands('tax_ids', vals['tax_ids']) if tax.get('id')]
# Since create() receives ids instead of recordset, let's just use the old-api bridge
taxes = self.env['account.tax'].browse(tax_ids)
currency = self.env['res.currency'].browse(vals.get('currency_id'))
partner = self.env['res.partner'].browse(vals.get('partner_id'))
res = taxes.with_context(round=True).compute_all(amount,
currency, 1, vals.get('product_id'), partner)
# Adjust line amount if any tax is price_include
if abs(res['total_excluded']) < abs(amount):
if vals['debit'] != 0.0: vals['debit'] = res['total_excluded']
if vals['credit'] != 0.0: vals['credit'] = -res['total_excluded']
if vals.get('amount_currency'):
vals['amount_currency'] = self.env['res.currency'].browse(vals['currency_id']).round(vals['amount_currency'] * (res['total_excluded']/amount))
# Create tax lines
for tax_vals in res['taxes']:
if tax_vals['amount']:
tax = self.env['account.tax'].browse([tax_vals['id']])
account_id = (amount > 0 and tax_vals['account_id'] or tax_vals['refund_account_id'])
if not account_id: account_id = vals['account_id']
temp = {
'account_id': account_id,
'name': vals['name'] + ' ' + tax_vals['name'],
'tax_line_id': tax_vals['id'],
'move_id': vals['move_id'],
'partner_id': vals.get('partner_id'),
'statement_id': vals.get('statement_id'),
'debit': tax_vals['amount'] > 0 and tax_vals['amount'] or 0.0,
'credit': tax_vals['amount'] < 0 and -tax_vals['amount'] or 0.0,
'analytic_account_id': vals.get('analytic_account_id') if tax.analytic else False,
}
bank = self.env["account.bank.statement"].browse(vals.get('statement_id'))
if bank.currency_id != bank.company_id.currency_id:
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
temp['currency_id'] = bank.currency_id.id
temp['amount_currency'] = bank.company_id.currency_id.with_context(ctx).compute(tax_vals['amount'], bank.currency_id, round=True)
tax_lines_vals.append(temp)
new_line = super(AccountMoveLine, self).create(vals)
for tax_line_vals in tax_lines_vals:
# TODO: remove .with_context(context) once this context nonsense is solved
self.with_context(context).create(tax_line_vals)
if self._context.get('check_move_validity', True):
move.with_context(context)._post_validate()
return new_line
@api.multi
def unlink(self):
self._update_check()
move_ids = set()
for line in self:
if line.move_id.id not in move_ids:
move_ids.add(line.move_id.id)
result = super(AccountMoveLine, self).unlink()
if self._context.get('check_move_validity', True) and move_ids:
self.env['account.move'].browse(list(move_ids))._post_validate()
return result
@api.multi
def write(self, vals):
if ('account_id' in vals) and self.env['account.account'].browse(vals['account_id']).deprecated:
raise UserError(_('You cannot use deprecated account.'))
if any(key in vals for key in ('account_id', 'journal_id', 'date', 'move_id', 'debit', 'credit')):
self._update_check()
if not self._context.get('allow_amount_currency') and any(key in vals for key in ('amount_currency', 'currency_id')):
#hackish workaround to write the amount_currency when assigning a payment to an invoice through the 'add' button
#this is needed to compute the correct amount_residual_currency and potentially create an exchange difference entry
self._update_check()
#when we set the expected payment date, log a note on the invoice_id related (if any)
if vals.get('expected_pay_date') and self.invoice_id:
msg = _('New expected payment date: ') + vals['expected_pay_date'] + '.\n' + vals.get('internal_note', '')
self.invoice_id.message_post(body=msg) #TODO: check it is an internal note (not a regular email)!
#when making a reconciliation on an existing liquidity journal item, mark the payment as reconciled
for record in self:
if 'statement_id' in vals and record.payment_id:
# In case of an internal transfer, there are 2 liquidity move lines to match with a bank statement
if all(line.statement_id for line in record.payment_id.move_line_ids.filtered(lambda r: r.id != record.id and r.account_id.internal_type=='liquidity')):
record.payment_id.state = 'reconciled'
result = super(AccountMoveLine, self).write(vals)
if self._context.get('check_move_validity', True):
move_ids = set()
for line in self:
if line.move_id.id not in move_ids:
move_ids.add(line.move_id.id)
self.env['account.move'].browse(list(move_ids))._post_validate()
return result
@api.multi
def _update_check(self):
""" Raise Warning to cause rollback if the move is posted, some entries are reconciled or the move is older than the lock date"""
move_ids = set()
for line in self:
err_msg = _('Move name (id): %s (%s)') % (line.move_id.name, str(line.move_id.id))
if line.move_id.state != 'draft':
raise UserError(_('You cannot do this modification on a posted journal entry, you can just change some non legal fields. You must revert the journal entry to cancel it.\n%s.') % err_msg)
if line.reconciled and not (line.debit == 0 and line.credit == 0):
raise UserError(_('You cannot do this modification on a reconciled entry. You can just change some non legal fields or you must unreconcile first.\n%s.') % err_msg)
if line.move_id.id not in move_ids:
move_ids.add(line.move_id.id)
self.env['account.move'].browse(list(move_ids))._check_lock_date()
return True
####################################################
# Misc / utility methods
####################################################
@api.multi
@api.depends('ref', 'move_id')
def name_get(self):
result = []
for line in self:
if line.ref:
result.append((line.id, (line.move_id.name or '') + '(' + line.ref + ')'))
else:
result.append((line.id, line.move_id.name))
return result
@api.model
def compute_amount_fields(self, amount, src_currency, company_currency, invoice_currency=False):
""" Helper function to compute value for fields debit/credit/amount_currency based on an amount and the currencies given in parameter"""
amount_currency = False
currency_id = False
if src_currency and src_currency != company_currency:
amount_currency = amount
amount = src_currency.with_context(self._context).compute(amount, company_currency)
currency_id = src_currency.id
debit = amount > 0 and amount or 0.0
credit = amount < 0 and -amount or 0.0
if invoice_currency and invoice_currency != company_currency and not amount_currency:
amount_currency = src_currency.with_context(self._context).compute(amount, invoice_currency)
currency_id = invoice_currency.id
return debit, credit, amount_currency, currency_id
@api.multi
def create_analytic_lines(self):
""" Create analytic items upon validation of an account.move.line having an analytic account. This
method first remove any existing analytic item related to the line before creating any new one.
"""
self.mapped('analytic_line_ids').unlink()
for obj_line in self:
if obj_line.analytic_account_id:
vals_line = obj_line._prepare_analytic_line()[0]
self.env['account.analytic.line'].create(vals_line)
@api.one
def _prepare_analytic_line(self):
""" Prepare the values used to create() an account.analytic.line upon validation of an account.move.line having
an analytic account. This method is intended to be extended in other modules.
"""
amount = (self.credit or 0.0) - (self.debit or 0.0)
return {
'name': self.name,
'date': self.date,
'account_id': self.analytic_account_id.id,
'tag_ids': [(6, 0, self.analytic_tag_ids.ids)],
'unit_amount': self.quantity,
'product_id': self.product_id and self.product_id.id or False,
'product_uom_id': self.product_uom_id and self.product_uom_id.id or False,
'amount': self.company_currency_id.with_context(date=self.date or fields.Date.context_today(self)).compute(amount, self.analytic_account_id.currency_id) if self.analytic_account_id.currency_id else amount,
'general_account_id': self.account_id.id,
'ref': self.ref,
'move_id': self.id,
'user_id': self.invoice_id.user_id.id or self._uid,
}
@api.model
def _query_get(self, domain=None):
context = dict(self._context or {})
domain = domain and safe_eval(str(domain)) or []
date_field = 'date'
if context.get('aged_balance'):
date_field = 'date_maturity'
if context.get('date_to'):
domain += [(date_field, '<=', context['date_to'])]
if context.get('date_from'):
if not context.get('strict_range'):
domain += ['|', (date_field, '>=', context['date_from']), ('account_id.user_type_id.include_initial_balance', '=', True)]
elif context.get('initial_bal'):
domain += [(date_field, '<', context['date_from'])]
else:
domain += [(date_field, '>=', context['date_from'])]
if context.get('journal_ids'):
domain += [('journal_id', 'in', context['journal_ids'])]
state = context.get('state')
if state and state.lower() != 'all':
domain += [('move_id.state', '=', state)]
if context.get('company_id'):
domain += [('company_id', '=', context['company_id'])]
if 'company_ids' in context:
domain += [('company_id', 'in', context['company_ids'])]
if context.get('reconcile_date'):
domain += ['|', ('reconciled', '=', False), '|', ('matched_debit_ids.create_date', '>', context['reconcile_date']), ('matched_credit_ids.create_date', '>', context['reconcile_date'])]
if context.get('account_tag_ids'):
domain += [('account_id.tag_ids', 'in', context['account_tag_ids'].ids)]
if context.get('analytic_tag_ids'):
domain += ['|', ('analytic_account_id.tag_ids', 'in', context['analytic_tag_ids'].ids), ('analytic_tag_ids', 'in', context['analytic_tag_ids'].ids)]
if context.get('analytic_account_ids'):
domain += [('analytic_account_id', 'in', context['analytic_account_ids'].ids)]
where_clause = ""
where_clause_params = []
tables = ''
if domain:
query = self._where_calc(domain)
tables, where_clause, where_clause_params = query.get_sql()
return tables, where_clause, where_clause_params
@api.multi
def open_reconcile_view(self):
[action] = self.env.ref('account.action_account_moves_all_a').read()
ids = []
for aml in self:
if aml.account_id.reconcile:
ids.extend([r.debit_move_id.id for r in aml.matched_debit_ids] if aml.credit > 0 else [r.credit_move_id.id for r in aml.matched_credit_ids])
ids.append(aml.id)
action['domain'] = [('id', 'in', ids)]
return action
class AccountPartialReconcile(models.Model):
_name = "account.partial.reconcile"
_description = "Partial Reconcile"
debit_move_id = fields.Many2one('account.move.line', index=True, required=True)
credit_move_id = fields.Many2one('account.move.line', index=True, required=True)
amount = fields.Monetary(currency_field='company_currency_id', help="Amount concerned by this matching. Assumed to be always positive")
amount_currency = fields.Monetary(string="Amount in Currency")
currency_id = fields.Many2one('res.currency', string='Currency')
company_currency_id = fields.Many2one('res.currency', related='company_id.currency_id', readonly=True,
help='Utility field to express amount currency')
company_id = fields.Many2one('res.company', related='debit_move_id.company_id', store=True, string='Currency')
full_reconcile_id = fields.Many2one('account.full.reconcile', string="Full Reconcile", copy=False)
def create_exchange_rate_entry(self, aml_to_fix, amount_diff, diff_in_currency, currency, move_date):
""" Automatically create a journal entry to book the exchange rate difference.
That new journal entry is made in the company `currency_exchange_journal_id` and one of its journal
items is matched with the other lines to balance the full reconciliation.
"""
for rec in self:
if not rec.company_id.currency_exchange_journal_id:
raise UserError(_("You should configure the 'Exchange Rate Journal' in the accounting settings, to manage automatically the booking of accounting entries related to differences between exchange rates."))
if not rec.company_id.income_currency_exchange_account_id.id:
raise UserError(_("You should configure the 'Gain Exchange Rate Account' in the accounting settings, to manage automatically the booking of accounting entries related to differences between exchange rates."))
if not rec.company_id.expense_currency_exchange_account_id.id:
raise UserError(_("You should configure the 'Loss Exchange Rate Account' in the accounting settings, to manage automatically the booking of accounting entries related to differences between exchange rates."))
move_vals = {'journal_id': rec.company_id.currency_exchange_journal_id.id}
# The move date should be the maximum date between payment and invoice (in case
# of payment in advance). However, we should make sure the move date is not
# recorded after the end of year closing.
if move_date > rec.company_id.fiscalyear_lock_date:
move_vals['date'] = move_date
move = rec.env['account.move'].create(move_vals)
amount_diff = rec.company_id.currency_id.round(amount_diff)
diff_in_currency = currency.round(diff_in_currency)
line_to_reconcile = rec.env['account.move.line'].with_context(check_move_validity=False).create({
'name': _('Currency exchange rate difference'),
'debit': amount_diff < 0 and -amount_diff or 0.0,
'credit': amount_diff > 0 and amount_diff or 0.0,
'account_id': rec.debit_move_id.account_id.id,
'move_id': move.id,
'currency_id': currency.id,
'amount_currency': -diff_in_currency,
'partner_id': rec.debit_move_id.partner_id.id,
})
rec.env['account.move.line'].create({
'name': _('Currency exchange rate difference'),
'debit': amount_diff > 0 and amount_diff or 0.0,
'credit': amount_diff < 0 and -amount_diff or 0.0,
'account_id': amount_diff > 0 and rec.company_id.currency_exchange_journal_id.default_debit_account_id.id or rec.company_id.currency_exchange_journal_id.default_credit_account_id.id,
'move_id': move.id,
'currency_id': currency.id,
'amount_currency': diff_in_currency,
'partner_id': rec.debit_move_id.partner_id.id,
})
for aml in aml_to_fix:
partial_rec = rec.env['account.partial.reconcile'].create({
'debit_move_id': aml.credit and line_to_reconcile.id or aml.id,
'credit_move_id': aml.debit and line_to_reconcile.id or aml.id,
'amount': abs(aml.amount_residual),
'amount_currency': abs(aml.amount_residual_currency),
'currency_id': currency.id,
})
move.post()
return line_to_reconcile, partial_rec
@api.model
def create(self, vals):
res = super(AccountPartialReconcile, self).create(vals)
if self._context.get('skip_full_reconcile_check'):
#when running the manual reconciliation wizard, don't check the partials separately for full
#reconciliation or exchange rate because it is handled manually after the whole processing
return res
#check if the reconcilation is full
#first, gather all journal items involved in the reconciliation just created
partial_rec_set = OrderedDict.fromkeys([x for x in res])
aml_set = self.env['account.move.line']
total_debit = 0
total_credit = 0
total_amount_currency = 0
#make sure that all partial reconciliations share the same secondary currency otherwise it's not
#possible to compute the exchange difference entry and it has to be done manually.
currency = list(partial_rec_set)[0].currency_id
maxdate = None
aml_to_balance = None
for partial_rec in partial_rec_set:
if partial_rec.currency_id != currency:
#no exchange rate entry will be created
currency = None
for aml in [partial_rec.debit_move_id, partial_rec.credit_move_id]:
if aml not in aml_set:
if aml.amount_residual or aml.amount_residual_currency:
aml_to_balance = aml
maxdate = max(aml.date, maxdate)
total_debit += aml.debit
total_credit += aml.credit
aml_set |= aml
if aml.currency_id and aml.currency_id == currency:
total_amount_currency += aml.amount_currency
elif partial_rec.currency_id and partial_rec.currency_id == currency:
#if the aml has no secondary currency but is reconciled with other journal item(s) in secondary currency, the amount
#currency is recorded on the partial rec and in order to check if the reconciliation is total, we need to convert the
#aml.balance in that foreign currency
total_amount_currency += aml.company_id.currency_id.with_context(date=aml.date).compute(aml.balance, partial_rec.currency_id)
for x in aml.matched_debit_ids | aml.matched_credit_ids:
partial_rec_set[x] = None
partial_rec_ids = [x.id for x in partial_rec_set.keys()]
aml_ids = aml_set.ids
#then, if the total debit and credit are equal, or the total amount in currency is 0, the reconciliation is full
digits_rounding_precision = aml_set[0].company_id.currency_id.rounding
if (currency and float_is_zero(total_amount_currency, precision_rounding=currency.rounding)) or float_compare(total_debit, total_credit, precision_rounding=digits_rounding_precision) == 0:
exchange_move_id = False
exchange_partial_rec_id = False
if currency and aml_to_balance:
#eventually create a journal entry to book the difference due to foreign currency's exchange rate that fluctuates
rate_diff_aml, rate_diff_partial_rec = partial_rec.create_exchange_rate_entry(aml_to_balance, total_debit - total_credit, total_amount_currency, currency, maxdate)
aml_ids.append(rate_diff_aml.id)
partial_rec_ids.append(rate_diff_partial_rec.id)
exchange_move_id = rate_diff_aml.move_id.id
exchange_partial_rec_id = rate_diff_partial_rec.id
#mark the reference of the full reconciliation on the partial ones and on the entries
self.env['account.full.reconcile'].with_context(check_move_validity=False).create({
'partial_reconcile_ids': [(4, p_id) for p_id in partial_rec_ids],
'reconciled_line_ids': [(4, a_id) for a_id in aml_ids],
'exchange_move_id': exchange_move_id,
'exchange_partial_rec_id': exchange_partial_rec_id,
})
return res
@api.multi
def unlink(self):
""" When removing a partial reconciliation, also unlink its full reconciliation if it exists """
to_unlink = self
full_to_unlink = self.env['account.full.reconcile']
res = True
if self._context.get('full_rec_lookup', True):
for rec in self:
#exclude partial reconciliations related to an exchange rate entry, because the unlink of the full reconciliation will already do it
if self.env['account.full.reconcile'].search([('exchange_partial_rec_id', '=', rec.id)]):
to_unlink = to_unlink - rec
#without the deleted partial reconciliations, the full reconciliation won't be full anymore
if rec.full_reconcile_id:
full_to_unlink |= rec.full_reconcile_id
if to_unlink:
res = super(AccountPartialReconcile, to_unlink).unlink()
if full_to_unlink:
full_to_unlink.unlink()
return res
class AccountFullReconcile(models.Model):
_name = "account.full.reconcile"
_description = "Full Reconcile"
name = fields.Char(string='Number', required=True, copy=False, default=lambda self: self.env['ir.sequence'].next_by_code('account.reconcile'))
partial_reconcile_ids = fields.One2many('account.partial.reconcile', 'full_reconcile_id', string='Reconciliation Parts')
reconciled_line_ids = fields.One2many('account.move.line', 'full_reconcile_id', string='Matched Journal Items')
exchange_move_id = fields.Many2one('account.move')
exchange_partial_rec_id = fields.Many2one('account.partial.reconcile')
@api.multi
def unlink(self):
""" When removing a full reconciliation, we need to revert the eventual journal entries we created to book the
fluctuation of the foreign currency's exchange rate.
We need also to reconcile together the origin currency difference line and its reversal in order to completly
cancel the currency difference entry on the partner account (otherwise it will still appear on the aged balance
for example).
"""
for rec in self:
if not rec.exchange_move_id or not rec.exchange_partial_rec_id:
continue
#reverse the exchange rate entry
reversed_move_id = rec.exchange_move_id.reverse_moves()[0]
reversed_move = self.env['account.move'].browse(reversed_move_id)
#search the original line and its newly created reversal
for aml in reversed_move.line_ids:
if aml.account_id.reconcile:
break
if aml:
precision = aml.currency_id and aml.currency_id.rounding or aml.company_id.currency_id.rounding
if aml.debit or float_compare(aml.amount_currency, 0, precision_rounding=precision) == 1:
pair_to_rec = aml | rec.exchange_partial_rec_id.credit_move_id
else:
pair_to_rec = aml | rec.exchange_partial_rec_id.debit_move_id
#remove the partial reconciliation of the exchange rate entry as well
rec.exchange_partial_rec_id.with_context(full_rec_lookup=False).unlink()
#reconcile together the original exchange rate line and its reversal
pair_to_rec.reconcile()
return super(AccountFullReconcile, self).unlink()
| 56.229712 | 298 | 0.633527 |
620fde81a535cf281d66de88ecec22aad0c313cb | 418 | py | Python | g12.py | pictographer/pi-i2c | d3b4c9faf5cd17b37f9e0863c4559c2ea06bdd0e | [
"MIT"
] | 1 | 2018-04-08T13:41:33.000Z | 2018-04-08T13:41:33.000Z | g12.py | pictographer/pi-i2c | d3b4c9faf5cd17b37f9e0863c4559c2ea06bdd0e | [
"MIT"
] | null | null | null | g12.py | pictographer/pi-i2c | d3b4c9faf5cd17b37f9e0863c4559c2ea06bdd0e | [
"MIT"
] | null | null | null | #
# Import the required module.
import time
import RPi.GPIO as GPIO
# Set the mode of numbering the pins.
GPIO.setmode(GPIO.BCM)
# GPIO pin 6 is an output.
GPIO.setup(12, GPIO.OUT)
GPIO.output(12, False)
# Initialize the system shutdown pin (STIR_OFF_Rx) to zero (GPIO34)
while 1:
print("on")
GPIO.output(12, True)
time.sleep(0.05)
print("off")
GPIO.output(12, False)
time.sleep(0.05)
| 23.222222 | 67 | 0.674641 |
536fccbac2faafc94b22be805dbb15305b1cc6bd | 12,993 | py | Python | mesonbuild/mcompile.py | xclaesse/meson | 0a6b6dfe02f10fdb0ad6b9b5298c20aa25e1e879 | [
"Apache-2.0"
] | null | null | null | mesonbuild/mcompile.py | xclaesse/meson | 0a6b6dfe02f10fdb0ad6b9b5298c20aa25e1e879 | [
"Apache-2.0"
] | 5 | 2021-07-28T00:47:18.000Z | 2022-03-14T00:38:45.000Z | mesonbuild/mcompile.py | xclaesse/meson | 0a6b6dfe02f10fdb0ad6b9b5298c20aa25e1e879 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for backend agnostic compile."""
import os
import json
import re
import sys
import shutil
import typing as T
from collections import defaultdict
from pathlib import Path
from . import mlog
from . import mesonlib
from . import coredata
from .mesonlib import MesonException, RealPathAction, setup_vsenv
from mesonbuild.environment import detect_ninja
from mesonbuild.coredata import UserArrayOption
from mesonbuild import build
if T.TYPE_CHECKING:
import argparse
def array_arg(value: str) -> T.List[str]:
return UserArrayOption(None, value, allow_dups=True, user_input=True).value
def validate_builddir(builddir: Path) -> None:
if not (builddir / 'meson-private' / 'coredata.dat').is_file():
raise MesonException(f'Current directory is not a meson build directory: `{builddir}`.\n'
'Please specify a valid build dir or change the working directory to it.\n'
'It is also possible that the build directory was generated with an old\n'
'meson version. Please regenerate it in this case.')
def parse_introspect_data(builddir: Path) -> T.Dict[str, T.List[dict]]:
"""
Converts a List of name-to-dict to a dict of name-to-dicts (since names are not unique)
"""
path_to_intro = builddir / 'meson-info' / 'intro-targets.json'
if not path_to_intro.exists():
raise MesonException(f'`{path_to_intro.name}` is missing! Directory is not configured yet?')
with path_to_intro.open(encoding='utf-8') as f:
schema = json.load(f)
parsed_data = defaultdict(list) # type: T.Dict[str, T.List[dict]]
for target in schema:
parsed_data[target['name']] += [target]
return parsed_data
class ParsedTargetName:
full_name = ''
name = ''
type = ''
path = ''
def __init__(self, target: str):
self.full_name = target
split = target.rsplit(':', 1)
if len(split) > 1:
self.type = split[1]
if not self._is_valid_type(self.type):
raise MesonException(f'Can\'t invoke target `{target}`: unknown target type: `{self.type}`')
split = split[0].rsplit('/', 1)
if len(split) > 1:
self.path = split[0]
self.name = split[1]
else:
self.name = split[0]
@staticmethod
def _is_valid_type(type: str) -> bool:
# Amend docs in Commands.md when editing this list
allowed_types = {
'executable',
'static_library',
'shared_library',
'shared_module',
'custom',
'run',
'jar',
}
return type in allowed_types
def get_target_from_intro_data(target: ParsedTargetName, builddir: Path, introspect_data: T.Dict[str, T.Any]) -> T.Dict[str, T.Any]:
if target.name not in introspect_data:
raise MesonException(f'Can\'t invoke target `{target.full_name}`: target not found')
intro_targets = introspect_data[target.name]
found_targets = [] # type: T.List[T.Dict[str, T.Any]]
resolved_bdir = builddir.resolve()
if not target.type and not target.path:
found_targets = intro_targets
else:
for intro_target in intro_targets:
if (intro_target['subproject'] or
(target.type and target.type != intro_target['type'].replace(' ', '_')) or
(target.path
and intro_target['filename'] != 'no_name'
and Path(target.path) != Path(intro_target['filename'][0]).relative_to(resolved_bdir).parent)):
continue
found_targets += [intro_target]
if not found_targets:
raise MesonException(f'Can\'t invoke target `{target.full_name}`: target not found')
elif len(found_targets) > 1:
raise MesonException(f'Can\'t invoke target `{target.full_name}`: ambiguous name. Add target type and/or path: `PATH/NAME:TYPE`')
return found_targets[0]
def generate_target_names_ninja(target: ParsedTargetName, builddir: Path, introspect_data: dict) -> T.List[str]:
intro_target = get_target_from_intro_data(target, builddir, introspect_data)
if intro_target['type'] == 'run':
return [target.name]
else:
return [str(Path(out_file).relative_to(builddir.resolve())) for out_file in intro_target['filename']]
def get_parsed_args_ninja(options: 'argparse.Namespace', builddir: Path) -> T.Tuple[T.List[str], T.Optional[T.Dict[str, str]]]:
runner = detect_ninja()
if runner is None:
raise MesonException('Cannot find ninja.')
cmd = runner
if not builddir.samefile('.'):
cmd.extend(['-C', builddir.as_posix()])
# If the value is set to < 1 then don't set anything, which let's
# ninja/samu decide what to do.
if options.jobs > 0:
cmd.extend(['-j', str(options.jobs)])
if options.load_average > 0:
cmd.extend(['-l', str(options.load_average)])
if options.verbose:
cmd.append('-v')
cmd += options.ninja_args
# operands must be processed after options/option-arguments
if options.targets:
intro_data = parse_introspect_data(builddir)
for t in options.targets:
cmd.extend(generate_target_names_ninja(ParsedTargetName(t), builddir, intro_data))
if options.clean:
cmd.append('clean')
return cmd, None
def generate_target_name_vs(target: ParsedTargetName, builddir: Path, introspect_data: dict) -> str:
intro_target = get_target_from_intro_data(target, builddir, introspect_data)
assert intro_target['type'] != 'run', 'Should not reach here: `run` targets must be handle above'
# Normalize project name
# Source: https://docs.microsoft.com/en-us/visualstudio/msbuild/how-to-build-specific-targets-in-solutions-by-using-msbuild-exe
target_name = re.sub(r"[\%\$\@\;\.\(\)']", '_', intro_target['id']) # type: str
rel_path = Path(intro_target['filename'][0]).relative_to(builddir.resolve()).parent
if rel_path != Path('.'):
target_name = str(rel_path / target_name)
return target_name
def get_parsed_args_vs(options: 'argparse.Namespace', builddir: Path) -> T.Tuple[T.List[str], T.Optional[T.Dict[str, str]]]:
slns = list(builddir.glob('*.sln'))
assert len(slns) == 1, 'More than one solution in a project?'
sln = slns[0]
cmd = ['msbuild']
if options.targets:
intro_data = parse_introspect_data(builddir)
has_run_target = any(map(
lambda t:
get_target_from_intro_data(ParsedTargetName(t), builddir, intro_data)['type'] == 'run',
options.targets
))
if has_run_target:
# `run` target can't be used the same way as other targets on `vs` backend.
# They are defined as disabled projects, which can't be invoked as `.sln`
# target and have to be invoked directly as project instead.
# Issue: https://github.com/microsoft/msbuild/issues/4772
if len(options.targets) > 1:
raise MesonException('Only one target may be specified when `run` target type is used on this backend.')
intro_target = get_target_from_intro_data(ParsedTargetName(options.targets[0]), builddir, intro_data)
proj_dir = Path(intro_target['filename'][0]).parent
proj = proj_dir/'{}.vcxproj'.format(intro_target['id'])
cmd += [str(proj.resolve())]
else:
cmd += [str(sln.resolve())]
cmd.extend(['-target:{}'.format(generate_target_name_vs(ParsedTargetName(t), builddir, intro_data)) for t in options.targets])
else:
cmd += [str(sln.resolve())]
if options.clean:
cmd.extend(['-target:Clean'])
# In msbuild `-maxCpuCount` with no number means "detect cpus", the default is `-maxCpuCount:1`
if options.jobs > 0:
cmd.append(f'-maxCpuCount:{options.jobs}')
else:
cmd.append('-maxCpuCount')
if options.load_average:
mlog.warning('Msbuild does not have a load-average switch, ignoring.')
if not options.verbose:
cmd.append('-verbosity:minimal')
cmd += options.vs_args
# Remove platform from env so that msbuild does not pick x86 platform when solution platform is Win32
env = os.environ.copy()
del env['PLATFORM']
return cmd, env
def get_parsed_args_xcode(options: 'argparse.Namespace', builddir: Path) -> T.Tuple[T.List[str], T.Optional[T.Dict[str, str]]]:
runner = 'xcodebuild'
if not shutil.which(runner):
raise MesonException('Cannot find xcodebuild, did you install XCode?')
# No argument to switch directory
os.chdir(str(builddir))
cmd = [runner, '-parallelizeTargets']
if options.targets:
for t in options.targets:
cmd += ['-target', t]
if options.clean:
if options.targets:
cmd += ['clean']
else:
cmd += ['-alltargets', 'clean']
# Otherwise xcodebuild tries to delete the builddir and fails
cmd += ['-UseNewBuildSystem=FALSE']
if options.jobs > 0:
cmd.extend(['-jobs', str(options.jobs)])
if options.load_average > 0:
mlog.warning('xcodebuild does not have a load-average switch, ignoring')
if options.verbose:
# xcodebuild is already quite verbose, and -quiet doesn't print any
# status messages
pass
cmd += options.xcode_args
return cmd, None
def add_arguments(parser: 'argparse.ArgumentParser') -> None:
"""Add compile specific arguments."""
parser.add_argument(
'targets',
metavar='TARGET',
nargs='*',
default=None,
help='Targets to build. Target has the following format: [PATH_TO_TARGET/]TARGET_NAME[:TARGET_TYPE].')
parser.add_argument(
'--clean',
action='store_true',
help='Clean the build directory.'
)
parser.add_argument('-C', dest='wd', action=RealPathAction,
help='directory to cd into before running')
parser.add_argument(
'-j', '--jobs',
action='store',
default=0,
type=int,
help='The number of worker jobs to run (if supported). If the value is less than 1 the build program will guess.'
)
parser.add_argument(
'-l', '--load-average',
action='store',
default=0,
type=float,
help='The system load average to try to maintain (if supported).'
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='Show more verbose output.'
)
parser.add_argument(
'--ninja-args',
type=array_arg,
default=[],
help='Arguments to pass to `ninja` (applied only on `ninja` backend).'
)
parser.add_argument(
'--vs-args',
type=array_arg,
default=[],
help='Arguments to pass to `msbuild` (applied only on `vs` backend).'
)
parser.add_argument(
'--xcode-args',
type=array_arg,
default=[],
help='Arguments to pass to `xcodebuild` (applied only on `xcode` backend).'
)
def run(options: 'argparse.Namespace') -> int:
cdata = coredata.load(options.wd)
bdir = Path(options.wd)
buildfile = bdir / 'meson-private' / 'build.dat'
if not buildfile.is_file():
raise MesonException(f'Directory {options.wd!r} does not seem to be a Meson build directory.')
b = build.load(options.wd)
setup_vsenv(b.need_vsenv)
cmd = [] # type: T.List[str]
env = None # type: T.Optional[T.Dict[str, str]]
if options.targets and options.clean:
raise MesonException('`TARGET` and `--clean` can\'t be used simultaneously')
backend = cdata.get_option(mesonlib.OptionKey('backend'))
assert isinstance(backend, str)
if backend == 'ninja':
cmd, env = get_parsed_args_ninja(options, bdir)
elif backend.startswith('vs'):
cmd, env = get_parsed_args_vs(options, bdir)
elif backend == 'xcode':
cmd, env = get_parsed_args_xcode(options, bdir)
else:
raise MesonException(
f'Backend `{backend}` is not yet supported by `compile`. Use generated project files directly instead.')
p, *_ = mesonlib.Popen_safe(cmd, stdout=sys.stdout.buffer, stderr=sys.stderr.buffer, env=env)
return p.returncode
| 36.807365 | 138 | 0.63919 |
2fd8c3b1ac3dcc9370ba73a0d161f6324de573dc | 5,238 | py | Python | Contrib-Inspur/openbmc/poky/meta/lib/oe/gpg_sign.py | opencomputeproject/Rack-Manager | e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a | [
"MIT"
] | 5 | 2019-11-11T07:57:26.000Z | 2022-03-28T08:26:53.000Z | Contrib-Inspur/openbmc/poky/meta/lib/oe/gpg_sign.py | opencomputeproject/Rack-Manager | e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a | [
"MIT"
] | 3 | 2019-09-05T21:47:07.000Z | 2019-09-17T18:10:45.000Z | Contrib-Inspur/openbmc/poky/meta/lib/oe/gpg_sign.py | opencomputeproject/Rack-Manager | e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a | [
"MIT"
] | 11 | 2019-07-20T00:16:32.000Z | 2022-01-11T14:17:48.000Z | #
# SPDX-License-Identifier: GPL-2.0-only
#
"""Helper module for GPG signing"""
import os
import bb
import oe.utils
import subprocess
import shlex
class LocalSigner(object):
"""Class for handling local (on the build host) signing"""
def __init__(self, d):
self.gpg_bin = d.getVar('GPG_BIN') or \
bb.utils.which(os.getenv('PATH'), 'gpg')
self.gpg_cmd = [self.gpg_bin]
self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent")
# Without this we see "Cannot allocate memory" errors when running processes in parallel
# It needs to be set for any gpg command since any agent launched can stick around in memory
# and this parameter must be set.
if self.gpg_agent_bin:
self.gpg_cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)]
self.gpg_path = d.getVar('GPG_PATH')
self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
self.gpg_version = self.get_gpg_version()
def export_pubkey(self, output_file, keyid, armor=True):
"""Export GPG public key to a file"""
cmd = self.gpg_cmd + ["--no-permission-warning", "--batch", "--yes", "--export", "-o", output_file]
if self.gpg_path:
cmd += ["--homedir", self.gpg_path]
if armor:
cmd += ["--armor"]
cmd += [keyid]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None):
"""Sign RPM files"""
cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid
gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin)
if self.gpg_version > (2,1,):
gpg_args += ' --pinentry-mode=loopback'
cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
cmd += "--define '_binary_filedigest_algorithm %s' " % digest
if self.gpg_bin:
cmd += "--define '__gpg %s' " % self.gpg_bin
if self.gpg_path:
cmd += "--define '_gpg_path %s' " % self.gpg_path
if fsk:
cmd += "--signfiles --fskpath %s " % fsk
if fsk_password:
cmd += "--define '_file_signing_key_password %s' " % fsk_password
# Sign in chunks
for i in range(0, len(files), sign_chunk):
subprocess.check_output(shlex.split(cmd + ' '.join(files[i:i+sign_chunk])), stderr=subprocess.STDOUT)
def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True):
"""Create a detached signature of a file"""
if passphrase_file and passphrase:
raise Exception("You should use either passphrase_file of passphrase, not both")
cmd = self.gpg_cmd + ['--detach-sign', '--no-permission-warning', '--batch',
'--no-tty', '--yes', '--passphrase-fd', '0', '-u', keyid]
if self.gpg_path:
cmd += ['--homedir', self.gpg_path]
if armor:
cmd += ['--armor']
#gpg > 2.1 supports password pipes only through the loopback interface
#gpg < 2.1 errors out if given unknown parameters
if self.gpg_version > (2,1,):
cmd += ['--pinentry-mode', 'loopback']
cmd += [input_file]
try:
if passphrase_file:
with open(passphrase_file) as fobj:
passphrase = fobj.readline();
job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
(_, stderr) = job.communicate(passphrase.encode("utf-8"))
if job.returncode:
raise bb.build.FuncFailed("GPG exited with code %d: %s" %
(job.returncode, stderr.decode("utf-8")))
except IOError as e:
bb.error("IO error (%s): %s" % (e.errno, e.strerror))
raise Exception("Failed to sign '%s'" % input_file)
except OSError as e:
bb.error("OS error (%s): %s" % (e.errno, e.strerror))
raise Exception("Failed to sign '%s" % input_file)
def get_gpg_version(self):
"""Return the gpg version as a tuple of ints"""
try:
cmd = self.gpg_cmd + ["--version", "--no-permission-warning"]
ver_str = subprocess.check_output(cmd).split()[2].decode("utf-8")
return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
except subprocess.CalledProcessError as e:
raise bb.build.FuncFailed("Could not get gpg version: %s" % e)
def verify(self, sig_file):
"""Verify signature"""
cmd = self.gpg_cmd + [" --verify", "--no-permission-warning"]
if self.gpg_path:
cmd += ["--homedir", self.gpg_path]
cmd += [sig_file]
status = subprocess.call(cmd)
ret = False if status else True
return ret
def get_signer(d, backend):
"""Get signer object for the specified backend"""
# Use local signing by default
if backend == 'local':
return LocalSigner(d)
else:
bb.fatal("Unsupported signing backend '%s'" % backend)
| 39.383459 | 143 | 0.58362 |
789786ed168a8e406a9d835c141ea5ce1bd2ba80 | 9,167 | py | Python | powerprotect/protectionpolicy.py | EMC-Underground/powerprotect | 3e599fc80c474765266bd98693d3712978d380b2 | [
"MIT"
] | null | null | null | powerprotect/protectionpolicy.py | EMC-Underground/powerprotect | 3e599fc80c474765266bd98693d3712978d380b2 | [
"MIT"
] | null | null | null | powerprotect/protectionpolicy.py | EMC-Underground/powerprotect | 3e599fc80c474765266bd98693d3712978d380b2 | [
"MIT"
] | null | null | null | from powerprotect.ppdm import Ppdm
from powerprotect import exceptions
from powerprotect import get_module_logger
from powerprotect import helpers
protectionpolicy_logger = get_module_logger(__name__)
protectionpolicy_logger.propagate = False
class ProtectionPolicy(Ppdm):
def __init__(self, **kwargs):
try:
self.exists = False
self.changed = False
self.check_mode = kwargs.get('check_mode', False)
self.msg = ""
self.failure = False
self.fail_msg = ""
self.name = kwargs['name']
self.body = {}
self.target_body = {}
self.url = ""
super().__init__(**kwargs)
if 'token' not in kwargs:
super().login()
self.get_policy()
except KeyError as e:
protectionpolicy_logger.error(f"Missing required field: {e}")
raise exceptions.PpdmException(f"Missing required field: {e}")
def get_policy(self):
protection_policy = self.__get_protection_policy_by_name()
if bool(protection_policy.response) is not False:
self.exists = True
self.body = protection_policy.response
def delete_policy(self):
if self.exists:
if not self.check_mode:
return_body = self.__delete_protection_policy(self.body['id'])
self.exists = False
if self.check_mode:
protectionpolicy_logger.info("check mode enabled, "
"no action taken")
return_body = helpers.ReturnBody()
return_body.success = True
if return_body.success:
self.changed = True
self.body = {}
self.msg = f"Protection rule {self.name} deleted"
elif return_body.success is False:
self.failure = True
self.fail_msg = return_body.fail_msg
def create_rule(self, **kwargs):
policy_name = kwargs['policy_name']
inventory_type = kwargs['inventory_type']
label = kwargs['label']
if not self.exists:
if not self.check_mode:
return_body = self.__create_protection_policy(
rule_name=self.name,
policy_name=policy_name,
inventory_type=inventory_type,
label=label)
self.get_rule()
if self.check_mode:
protectionpolicy_logger.info("check mode enabled, "
"no action taken")
return_body = helpers.ReturnBody()
return_body.success = True
if return_body.success:
self.changed = True
self.msg = f"Protection Rule {self.name} created"
elif return_body.success is False:
self.failure = True
self.fail_msg = return_body.fail_msg
elif self.exists:
self.msg = f"Protection Rule {self.name} already exists"
def update_rule(self):
if (self.exists and
helpers._body_match(self.body, self.target_body) is False):
self.body.update(self.target_body)
if not self.check_mode:
return_body = self.__update_protection_policy(self.body)
self.get_rule()
if self.check_mode:
protectionpolicy_logger.info("check mode enabled, "
"no action taken")
return_body = helpers.ReturnBody()
return_body.success = True
if return_body.success:
self.changed = True
self.target_body = {}
self.msg = f"Protection Rule {self.name} updated"
elif return_body.success is False:
self.failure = True
self.fail_msg = return_body.fail_msg
def __create_protection_policy(self, policy_name, rule_name, inventory_type,
label, **kwargs):
protectionpolicy_logger.debug("Method: create_protection_policy")
return_body = helpers.ReturnBody()
inventory_types = ["KUBERNETES",
"VMWARE_VIRTUAL_MACHINE",
"FILE_SYSTEM",
"MICROSOFT_SQL_DATABASE",
"ORACLE_DATABASE"]
if inventory_type not in inventory_types:
err_msg = "Protection Rule not Created. Inventory Type not valid"
protectionpolicy_logger.error(err_msg)
return_body.success = False
return_body.fail_msg = err_msg
if return_body.success is None:
protection_policy = (self.get_protection_policy_by_name(
policy_name))
if protection_policy.success is False:
err_msg = f"Protection Policy not found: {policy_name}"
protectionpolicy_logger.error(err_msg)
return_body.success = False
return_body.fail_msg = (err_msg)
return_body.status_code = protection_policy.status_code
if return_body.success is None:
body = {'action': kwargs.get('action', 'MOVE_TO_GROUP'),
'name': rule_name,
'actionResult': (protection_policy.response['id']),
'conditions': [{
'assetAttributeName': 'userTags',
'operator': 'EQUALS',
'assetAttributeValue': label
}],
'connditionConnector': 'AND',
'inventorySourceType': inventory_type,
'priority': kwargs.get('priority', 1),
'tenant': {
'id': '00000000-0000-4000-a000-000000000000'
}
}
response = self._rest_post("/protection-policies", body)
if response.ok is False:
protectionpolicy_logger.error("Protection Rule not Created")
return_body.success = False
return_body.fail_msg = response.json()
return_body.status_code = response.status_code
elif response.ok is True:
return_body.success = True
return_body.response = response.json()
return_body.status_code = response.status_code
return return_body
def __get_protection_policy_by_name(self):
protectionpolicy_logger.debug("Method: get_protection_policy_by_name")
return_body = helpers.ReturnBody()
response = super()._rest_get("/protection-policies"
f"?filter=name%20eq%20%22{self.name}%22")
if response.ok is False:
return_body.success = False
return_body.fail_msg = response.json()
return_body.status_code = response.status_code
if response.ok:
if not response.json()['content']:
err_msg = f"Protection rule not found: {self.name}"
protectionpolicy_logger.debug(err_msg)
return_body.success = True
return_body.status_code = response.status_code
return_body.response = {}
else:
return_body.success = True
return_body.response = response.json()['content'][0]
return_body.status_code = response.status_code
return return_body
def __update_protection_policy(self, body):
protectionpolicy_logger.debug("Method: update_protection_policy")
return_body = helpers.ReturnBody()
protection_policy_id = body["id"]
response = self._rest_put("/protection-policies"
f"/{protection_policy_id}", body)
if not response.ok:
protectionpolicy_logger.error("Protection Rule not Updated")
return_body.success = False
return_body.fail_msg = response.json()
return_body.status_code = response.status_code
if return_body.success is None:
return_body.success = True
return_body.response = response.json()
return_body.status_code = response.status_code
return return_body
def __delete_protection_policy(self, id):
protectionpolicy_logger.debug("Method: delete_protection_policy")
return_body = helpers.ReturnBody()
response = self._rest_delete(f"/protection-policies/{id}")
if not response.ok:
protectionpolicy_logger.error(f"Protection Rule id \"{id}\" "
"not deleted")
return_body.success = False
return_body.fail_msg = response.json()
if return_body.success is None:
return_body.success = True
return_body.response = f"Protection Rule id \"{id}\" "\
"successfully deleted"
return_body.status_code = response.status_code
return return_body
| 44.285024 | 80 | 0.565507 |
7452bd73642331896e721892fbc64fccf2c300a7 | 22,258 | py | Python | src/wx.py | jack139/fair | fe0ff64f8edbd794c3fb951ab6af420054e9e585 | [
"BSD-3-Clause"
] | 1 | 2019-07-16T09:46:39.000Z | 2019-07-16T09:46:39.000Z | src/wx.py | jack139/fair | fe0ff64f8edbd794c3fb951ab6af420054e9e585 | [
"BSD-3-Clause"
] | null | null | null | src/wx.py | jack139/fair | fe0ff64f8edbd794c3fb951ab6af420054e9e585 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
import time, json, urllib, urllib3
import gc
from bson.objectid import ObjectId
from config.url_wx import urls
from config import setting
from config.mongosession import MongoStore
import app_helper, sms
from app_helper import time_str, get_token
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
db = setting.db_web # 默认db使用web本地
#file_db = setting.db_file1
app = web.application(urls, globals())
application = app.wsgifunc()
#--session---------------------------------------------------
#web.config.session_parameters['cookie_name'] = 'uwx_session'
#web.config.session_parameters['secret_key'] = 'f6102bff8452386b8ca1'
#web.config.session_parameters['timeout'] = 86400
#web.config.session_parameters['ignore_expiry'] = True
#if setting.debug_mode==False:
# ### for production
# session = web.session.Session(app, MongoStore(db, 'sessions'),
# initializer={'login': 0, 'privilege': 0, 'uname':'', 'openid':''})
#else:
# ### for staging,
# if web.config.get('_session') is None:
# session = web.session.Session(app, MongoStore(db, 'sessions'),
# initializer={'login': 0, 'privilege': 0, 'uname':'', 'openid':''})
# web.config._session = session
# else:
# session = web.config._session
#----------------------------------------
gc.set_threshold(300,5,5)
# U掌柜 生产用
#wx_appid='wx2527355bfd909dbe'
#wx_secret='49e8eb83c3fce102215a92047e8e9290'
# F8KAM 测试用
#wx_appid='wxb920ef74b6a20e69'
#wx_secret='ddace9d14b3413c65991278f09a03896'
wx_appid=setting.wx_setting['wx_appid']
wx_secret=setting.wx_setting['wx_appsecret']
##############################################
def create_render(plain=False):
if plain: layout=None
else: layout='layout'
render = web.template.render('templates/wx', base=layout)
return render
def check_wx_user(wx_user):
db_wx=db.wx_user.find_one({'wx_user':wx_user},{'owner':1})
if db_wx!=None: # 已登记
return db_wx['owner']
else: # 未登记
db.wx_user.insert_one({'wx_user' : wx_user, 'owner': '', 'time' : time_str()})
return ''
def bind_wx_user(wx_user, fair_user):
check_wx_user(wx_user)
db.wx_user.update_one({'wx_user':wx_user},{'$set': {'owner':fair_user}})
def reply_none():
web.header("Content-Type", "text/plain") # Set the Header
return ""
class PostMsg:
def __init__(self, str_xml):
self.xml=ET.fromstring(str_xml)
self.fromUser=self.xml.find("FromUserName").text
self.toUser=self.xml.find("ToUserName").text
self.msgType=self.xml.find("MsgType").text
self.key=''
def reply_text(self, content):
render = create_render(plain=True)
return render.xml_reply(self.fromUser, self.toUser, int(time.time()), content)
def reply_media(self, content):
# 标题,说明,图片url,页面url
#content = [(u'标题2', u'', u'', u'http://wx.f8geek.com/live2')]
render = create_render(plain=True)
return render.xml_media(self.fromUser, self.toUser, int(time.time()), content)
def text_process(self): # 处理文本消息回复
content=self.xml.find("Content").text
cmd0 = content.split()
if u'免单' in cmd0[0].lower(): # 测试
return self.reply_media([
(u'魔都U粉会专享免单,人人有份!',u'',u'',
u'http://mp.weixin.qq.com/s?__biz=MzI3OTAwODMyOQ==&mid=400211195&idx=1&sn=f3e41093c3b4594d8746fac4dd3f3106#rd')
])
if setting.region_id=='003': # 华东
return self.reply_text( u"亲爱的客官你好,感谢您对我们的支持。上海外环内U掌柜鲜果零食均可享受19.9元包邮,1小时送达的服务。如有任何问题可拨打客服电话:400-966-9966,对果品不满意可立即退货或退款。")
else:
return self.reply_text( u"亲爱的客官你好,感谢您对我们的支持。如有任何问题可拨打客服电话:400-966-9966。")
def event_process(self): # 处理事件请求
event=self.xml.find("Event").text
if event=='CLICK':
self.key=self.xml.find("EventKey").text
#print self.key
if self.key=='CLICK_WAIT':
return self.reply_text(u"敬请期待!")
elif self.key=='CLICK_SERVICE':
return self.reply_text(u"亲爱的客官,感谢您关注U掌柜拼团。在线客服服务每天9:00-20:00真人值守,全心全意为您服务!请客官概述一下您遇到的问题,我们将对应问题安排客服快速、有效的帮助您,谢谢")
elif self.key=='CLICK_SUGGEST':
return self.reply_text(u"谢谢您对我们的支持。在下方聊天框中输入“我有建议+您的建议+姓名+联系电话”即可。我们会认真考虑您的每一个宝贵的意见和建议,我们会在您的帮助下做得越来越好,再次感谢您的支持。")
elif event=='subscribe':
#print "NEW: %s" % self.fromUser
bind_wx_user(self.fromUser, '')
# 取得用户信息
#info = get_info(self.fromUser)
#print info
#return self.reply_text(u"欢迎使用U掌柜微信服务号!")
if setting.region_id=='003': # 华东
ret_media = [
(u'优鲜美味,掌上专柜!',u'',u'http://urfresh.cn/static/home/images/logo.png',u'http://app.urfresh.cn/u'),
(u'魔都U粉会专享免单,人人有份!',u'',u'',u'http://mp.weixin.qq.com/s?__biz=MzI3OTAwODMyOQ==&mid=400211195&idx=1&sn=f3e41093c3b4594d8746fac4dd3f3106#rd'),
(u'下载App',u'',u'http://urfresh.cn/static/home/images/download1.jpg',u'http://app.urfresh.cn/u')
]
if setting.region_id=='001': # 东南
ret_media = [
(u'轻松几步,玩转掌柜拼团',u'',u'http://img.urfresh.cn/image/product/wx_pt.jpg',u'http://mp.weixin.qq.com/s?__biz=MzA5NjUzNjQ0Ng==&mid=400701472&idx=1&sn=5b7c75fe4bc6a3a6657e05b210694645&scene=1&srcid=1117o9j4kbTC6Lr6OgsrxvFl&key=d4b25ade3662d643c704f637869cf7388cd7bd3cbf54480a99109929c68b9ba2a0c202f7e706cd057840d427d8f84be6&ascene=0&uin=MjY2ODIxNjE4MQ%3D%3D&devicetype=iMac+MacBookPro5%2C4+OSX+OSX+10.11.1+build(15B42)&version=11020201&pass_ticket=gO%2F06k1awC23cLIXWWX61aeouLubsV%2B%2BXmEVvu8quX2Dvog526AXCfBfOEDf1L6e'),
#(u'U掌柜东南还未上线正在测试中,所显示商品为测试环境不能成单不发货,敬请期待哦~~',u'',u'',u''),
]
else: # 其他地区暂不提示下载
ret_media = [
(u'优鲜美味,掌上专柜!',u'',u'http://urfresh.cn/static/home/images/logo.png',u'http://app.urfresh.cn/u'),
(u'魔都U粉会专享免单,人人有份!',u'',u'',u'http://mp.weixin.qq.com/s?__biz=MzI3OTAwODMyOQ==&mid=400211195&idx=1&sn=f3e41093c3b4594d8746fac4dd3f3106#rd'),
]
return self.reply_media(ret_media)
elif event=='unsubscribe':
#print "LEFT: %s" % self.fromUser
bind_wx_user(self.fromUser, 'N/A')
return reply_none()
def do_process(self):
if self.msgType=='text':
return self.text_process()
elif self.msgType=='event':
return self.event_process()
else:
return reply_none()
class First:
def GET(self):
# test1='<xml><ToUserName><![CDATA[gh_96ef24d64c49]]></ToUserName>' \
# '<FromUserName><![CDATA[ogQxxuBJi1KR_BLn86aRIKTHrcPM]]></FromUserName>' \
# '<CreateTime>1411443827</CreateTime>' \
# '<MsgType><![CDATA[event]]></MsgType>' \
# '<Event><![CDATA[CLICK]]></Event>' \
# '<EventKey><![CDATA[KAM_SNAPSHOT]]></EventKey>' \
# '</xml>'
# pm=PostMsg(test1)
# return pm.do_process()
import hashlib
user_data=web.input(signature='', timestamp='', nonce='', echostr='')
if '' in (user_data.signature, user_data.timestamp, user_data.nonce, user_data.echostr):
return reply_none()
token1='7a710d7955acb49fbf1a' # hashlib.sha1('ilovekam').hexdigest()[5:25]
tmp=[token1, user_data.timestamp, user_data.nonce]
tmp.sort()
tmp1=tmp[0]+tmp[1]+tmp[2]
tmp2=hashlib.sha1(tmp1).hexdigest()
#print "%s %s %s" % (tmp1, tmp2, user_data.signature)
web.header("Content-Type", "text/plain") # Set the Header
if tmp2==user_data.signature:
return user_data.echostr
else:
return "fail!"
def POST(self):
import hashlib
user_data=web.input(signature='', timestamp='', nonce='')
if '' in (user_data.signature, user_data.timestamp, user_data.nonce):
return reply_none()
token1='7a710d7955acb49fbf1a' # hashlib.sha1('ilovekam').hexdigest()[5:25]
tmp=[token1, user_data.timestamp, user_data.nonce]
tmp.sort()
tmp1=tmp[0]+tmp[1]+tmp[2]
tmp2=hashlib.sha1(tmp1).hexdigest()
if tmp2!=user_data.signature:
return reply_none()
#从获取的xml构造xml dom树
str_xml=web.data()
#print str_xml
pm=PostMsg(str_xml)
return pm.do_process()
# 获取ticket
def get_ticket(force=False): # force==True 强制刷新
if not force:
db_ticket = db.jsapi_ticket.find_one({'region_id':setting.region_id})
if db_ticket and int(time.time())-db_ticket.get('tick', 0)<3600:
if db_ticket.get('ticket', '')!='':
print db_ticket['ticket']
return db_ticket['ticket']
token = get_token(force)
url='https://api.weixin.qq.com/cgi-bin/ticket/getticket?access_token=%s&type=jsapi' % token
f=urllib.urlopen(url)
data = f.read()
f.close()
print data
t=json.loads(data)
if t.has_key('ticket'):
print t
db.jsapi_ticket.update_one({'region_id':setting.region_id},
{'$set':{'tick':int(time.time()), 'ticket':t['ticket']}},upsert=True)
return t['ticket']
else:
db.jsapi_ticket.update_one({'region_id':setting.region_id},
{'$set':{'tick':int(time.time()), 'ticket':''}},upsert=True)
return ''
# 获取用户基本信息
def get_info(openid):
token = get_token()
url='https://api.weixin.qq.com/cgi-bin/user/info?access_token=%s&openid=%s&lang=zh_CN' % (token, openid)
f=urllib.urlopen(url)
data = f.read()
f.close()
print data
t=json.loads(data)
return t
# 微信入口
def get_redirect_loc(redirect_uri):
#redirect_uri = 'http://wx-test.urfresh.cn/wx/fair'
loc = 'https://open.weixin.qq.com/connect/oauth2/authorize?' \
'appid=%s&' \
'redirect_uri=%s&' \
'response_type=code&' \
'scope=snsapi_base&' \
'state=1#wechat_redirect' % (wx_appid, urllib.quote_plus(redirect_uri))
return loc
# 店铺入口, 测试 http://wx-test.urfresh.cn/wx/fair?code=test
def init_job(code):
if code=='':
#return render.info('参数错误',goto='/') # info页面要做微信端优化
#raise web.seeother('/wx/init_fair')
return None
if code=='test':
openid = code
else:
urllib3.disable_warnings()
http = urllib3.PoolManager(num_pools=2, timeout=180, retries=False)
url = 'https://api.weixin.qq.com/sns/oauth2/access_token?' \
'appid=%s&' \
'secret=%s&' \
'code=%s&' \
'grant_type=authorization_code' % \
(wx_appid, wx_secret, code, )
r = http.request('GET', url)
if r.status==200:
data = r.data
t=json.loads(data)
#print t
if t.has_key('openid'):
openid = t['openid']
else:
#return render.info('授权失败',goto='/')
#raise web.seeother('/wx/init_fair')
return None
else:
#raise web.seeother('/wx/init_fair')
return None
# 取得ticket
ticket = get_ticket()
if ticket=='':
print 'get ticket fail!'
#raise web.seeother('/wx/init_fair')
#return None
ticket = get_ticket(True)
#session.login = 1
#session.uname = ''
#session.openid = openid
#session.privilege = helper.PRIV_WX
uname = ''
# 检查用户是否已注册
db_user = db.app_user.find_one({'openid':openid})
if db_user==None:
# 未注册,新建用户记录
# 用户基本信息
info = get_info(openid)
if info.has_key('errcode'):
get_ticket(True)
info = get_info(openid)
print info
coupon = []
valid = app_helper.time_str(time.time()+3600*24*10, 1) # 有效期10天 2015-11-22
# 注册发抵用券 v3
for i in app_helper.reginster_coupon:
coupon.append((app_helper.my_rand(), valid, '%.2f' % float(i[0]), 1, i[1], i[2]))
db.app_user.insert_one({
'openid' : openid,
'address' : [],
'coupon' : coupon, # 送优惠券
'app_id' : '', # 微信先注册,没有app_id
'reg_time' : app_helper.time_str(),
'wx_nickname' : info.get('nickname','游客'),
'wx_headimgurl' : info.get('headimgurl', ''),
'wx_info' : info,
})
else:
if db_user.get('wx_headimgurl', '')=='':
# 用户基本信息
info = get_info(openid)
if info.has_key('errcode'):
get_ticket(True)
info = get_info(openid)
print info
# 补充微信用户信息
db.app_user.update_one({'openid':openid}, {'$set':{
'wx_nickname' : info.get('nickname','游客'),
'wx_headimgurl' : info.get('headimgurl', ''),
'wx_info' : info,
}})
uname = db_user.get('uname','')
# 生成 session ------------------
import hashlib
rand2 = app_helper.my_rand(16)
now = time.time()
secret_key = 'f6102bff8451236b8ca1'
session_id = hashlib.sha1("%s%s%s%s" %(rand2, now, web.ctx.ip.encode('utf-8'), secret_key))
session_id = session_id.hexdigest()
db.app_sessions.insert_one({
'session_id' : session_id,
'openid' : openid,
'ticket' : ticket,
'uname' : uname,
'login' : 1,
'rand' : rand2,
'ip' : web.ctx.ip,
'attime' : now,
})
# 清理 session, 12小时前的微信session ---- 有隐患
#db.app_sessions.remove({'openid':{'$exists':True},'attime':{'$lt':(now-3600*12)}})
# 清理 session, 12小时前的未登录的session
db.app_sessions.delete_many({'login':0,'attime':{'$lt':(now-3600*12)}})
# 清理 session, 30天前的未使用的session
db.app_sessions.delete_many({'attime':{'$lt':(now-3600*24*30)}})
# -------------------------------
print session_id, openid, uname
render = create_render(plain=True)
#return render.fair(session_id, uname)
#raise web.seeother('/static/wx/fair.html?session_id=%s' % session_id)
return session_id
# 下单入口
class InitFair:
def GET(self):
raise web.redirect(get_redirect_loc('http://%s/wx/fair' % setting.wx_host))
class Fair:
def GET(self):
user_data=web.input(code='')
session_id = init_job(user_data.code)
if session_id==None:
raise web.seeother('/wx/init_fair')
else:
raise web.seeother('/static/wx/fair.html?session_id=%s®ion_id=001' % session_id)
# 拼团入口
class InitTuan:
def GET(self):
raise web.redirect(get_redirect_loc('http://%s/wx/tuan' % setting.wx_host))
class Tuan:
def GET(self):
user_data=web.input(code='')
session_id = init_job(user_data.code)
if session_id==None:
raise web.seeother('/wx/init_tuan')
else:
raise web.seeother('/static/wx/tuan.html?session_id=%s®ion_id=%s' % \
(session_id, setting.region_id))
# 我的拼团入口
class InitTuanList:
def GET(self):
raise web.redirect(get_redirect_loc('http://%s/wx/tuan_list' % setting.wx_host))
class TuanList:
def GET(self):
user_data=web.input(code='')
session_id = init_job(user_data.code)
if session_id==None:
raise web.seeother('/wx/init_tuan_list')
else:
raise web.seeother('/static/wx/pt_myList.html?session_id=%s®ion_id=%s' % \
(session_id, setting.region_id))
# 拼团分享入口
class InitTuanShare:
def GET(self):
user_data=web.input(region_id='', pt_order_id='')
raise web.redirect(get_redirect_loc('http://%s/wx/tuan_share?region_id=%s&pt_order_id=%s' % \
(setting.wx_host, user_data['region_id'], user_data['pt_order_id'])))
class TuanShare:
def GET(self):
user_data=web.input(code='', region_id='', pt_order_id='')
session_id = init_job(user_data.code)
if session_id==None:
raise web.seeother('http://%s/wx/tuan_share?region_id=%s&pt_order_id=%s' % \
(setting.wx_host, user_data['region_id'], user_data['pt_order_id']))
else:
raise web.seeother('/static/wx/pt_active.html?session_id=%s®ion_id=%s&pt_order_id=%s' % \
(session_id, user_data['region_id'], user_data['pt_order_id']))
# 拼团详情页入口
class InitTuanDetail:
def GET(self):
user_data=web.input(tuan_id='')
raise web.redirect(get_redirect_loc('http://%s/wx/tuan_detail?region_id=001&tuan_id=%s' % \
(setting.wx_host, user_data['tuan_id'])))
class TuanDetail:
def GET(self):
user_data=web.input(code='', tuan_id='')
session_id = init_job(user_data.code)
if session_id==None:
raise web.seeother('http://%s/wx/tuan_detail?region_id=%s&tuan_id=%s' % \
(setting.wx_host, setting.region_id, user_data['tuan_id']))
else:
raise web.seeother('/static/wx/pt_detail.html?session_id=%s®ion_id=%s&tuan_id=%s' % \
(session_id, setting.region_id, user_data['tuan_id']))
# 我的订单入口
class InitMyOrder:
def GET(self):
raise web.redirect(get_redirect_loc('http://%s/wx/my_order' % setting.wx_host))
class MyOrder:
def GET(self):
user_data=web.input(code='')
session_id = init_job(user_data.code)
if session_id==None:
raise web.seeother('/wx/init_my_order')
else:
raise web.seeother('/static/wx/orderList.html?session_id=%s®ion_id=%s' % \
(session_id, setting.region_id))
# 我的地址簿入口
class InitMyAddr:
def GET(self):
raise web.redirect(get_redirect_loc('http://%s/wx/my_address' % setting.wx_host))
class MyAddr:
def GET(self):
user_data=web.input(code='')
session_id = init_job(user_data.code)
if session_id==None:
raise web.seeother('/wx/init_my_address')
else:
raise web.seeother('/static/wx/address.html?session_id=%s®ion_id=%s' % \
(session_id, setting.region_id))
# 我的抵用券入口
class InitMyCoupon:
def GET(self):
raise web.redirect(get_redirect_loc('http://%s/wx/my_coupon' % setting.wx_host))
class MyCoupon:
def GET(self):
user_data=web.input(code='')
session_id = init_job(user_data.code)
if session_id==None:
raise web.seeother('/wx/init_my_coupon')
else:
raise web.seeother('/static/wx/coupon.html?session_id=%s' % session_id)
# 我的余额入口
class InitMyCredit:
def GET(self):
raise web.redirect(get_redirect_loc('http://%s/wx/my_credit' % setting.wx_host))
class MyCredit:
def GET(self):
user_data=web.input(code='')
session_id = init_job(user_data.code)
if session_id==None:
raise web.seeother('/wx/init_my_credit')
else:
raise web.seeother('/static/wx/userInfo.html?session_id=%s' % session_id)
# 绑定手机入口
class InitMyBind:
def GET(self):
raise web.redirect(get_redirect_loc('http://%s/wx/my_bind' % setting.wx_host))
class MyBind:
def GET(self):
user_data=web.input(code='')
session_id = init_job(user_data.code)
if session_id==None:
raise web.seeother('/wx/init_my_bind')
else:
raise web.seeother('/static/wx/bind.html?session_id=%s' % session_id)
# 微信用户绑定电话
class WxPhone:
def POST(self):
web.header('Content-Type', 'application/json')
#print web.input()
param = web.input(openid='', session_id='', number='')
if param.number=='':
return json.dumps({'ret' : -2, 'msg' : '参数错误'})
if param.openid=='' and param.session_id=='':
return json.dumps({'ret' : -2, 'msg' : '参数错误1'})
# 同时支持openid和session_id
if param.openid!='':
uname = app_helper.check_openid(param.openid)
else:
uname = app_helper.wx_logged(param.session_id)
if uname:
#print 'user_phone', uname
if len(uname['uname'].strip())>0:
return json.dumps({'ret' : -5, 'msg' : '已绑定手机号码,不能重复绑定'})
number = param.number.strip()
if len(number)<11 or (not number.isdigit()):
return json.dumps({'ret' : -3, 'msg' : '手机号码格式错误'})
# 随机码
rand = app_helper.my_rand(base=1)
register = False
#发送短信验证码
sms.send_rand(number, rand, register)
#临时保存到phone字段
db.app_user.update({'openid':uname['openid']},{'$set':{'phone':number, 'rand':rand}})
# 返回
return json.dumps({'ret' : 0})
else:
return json.dumps({'ret' : -4, 'msg' : '无效的openid'})
# 检查随机码
class WxCheckRand:
def POST(self):
web.header('Content-Type', 'application/json')
#print web.input()
param = web.input(openid='', session_id='', rand='', invitation='')
if param.rand=='':
return json.dumps({'ret' : -2, 'msg' : '参数错误'})
if param.openid=='' and param.session_id=='':
return json.dumps({'ret' : -2, 'msg' : '参数错误1'})
# 同时支持openid和session_id
if param.openid!='':
uname = app_helper.check_openid(param.openid)
else:
uname = app_helper.wx_logged(param.session_id)
if uname:
if len(uname['uname'].strip())>0:
return json.dumps({'ret' : -5, 'msg' : '已绑定手机号码,不能重复绑定'})
#邀请码
if param.has_key('invitation'):
invitation = param.invitation
if db.invitation.find({'code': invitation}).count()==0: # 无效邀请码
invitation = ''
else:
r = db.app_user.find_one({'openid' : uname['openid']},{'invitation':1})
if r.get('invitation', '')!='': # 已填邀请码
invitation = ''
else:
invitation = ''
if invitation!='':
# 赠送优惠券
valid = app_helper.time_str(time.time()+3600*24*30, 1) # 有效期30天
r = db.app_user.find_one_and_update({'openid' : uname['openid']},{
'$set' : {'invitation' : invitation, 'last_time' : app_helper.time_str()},
#'$push' : {'coupon' : (app_helper.my_rand(), valid, '5.00', 1)}, # 邀请码送5元
})
else:
r = db.app_user.find_one_and_update({'openid' : uname['openid']},{
'$set' : {'last_time' : app_helper.time_str()}
})
# 检查验证码
if param.rand.strip()!=r['rand']:
return json.dumps({'ret' : -5, 'msg' : '短信验证码错误'})
if len(r['address'])>0: # 应该实现:返回最近使用的地址 !!!!
addr = {
'id' : r['address'][0][0],
'name' : r['address'][0][1],
'tel' : r['address'][0][2],
'addr' : r['address'][0][3],
}
else:
addr = {}
# 绑定处理
r2 = db.app_user.find_one({'uname':r['phone']})
#print r['phone'],r2
if r2:
# 手机号码已注册过app, 需要合并app用户和微信用户
#print db.app_user.update_one({'openid':uname['openid']},{'$set':{
# 'uname' : r['phone'],
# 'address' : r['address']+r2['address'],
# 'coupon' : r['coupon']+r2['coupon'],
# 'app_id' : r2['app_id']
#}})
# 使手机号帐户不能再使用
#db.app_user.update_one({'_id':r2['_id']},{'$set' : {'uname': u'~%s' % r2['uname']}})
print '合并到app用户'
print db.app_user.update_one({'_id':r2['_id']},{'$set':{
'openid' : uname['openid'],
'address' : r['address']+r2['address'],
'coupon' : r['coupon']+r2['coupon'],
}})
# 使微信帐户不能再使用
db.app_user.update_one({'_id':r['_id']},{'$set' : {'openid': u'~%s' % uname['openid']}})
else:
# 手机号码还未注册过
db.app_user.update_one({'openid':uname['openid']},{'$set':{'uname':r['phone']}})
# 更新session里的uname
db.app_sessions.update_one({'session_id':param.session_id},{'$set':{'uname':r['phone']}})
# 返回
return json.dumps({
'ret' : 0,
'data' : {
'login' : True,
'addr' : addr,
}
})
else:
return json.dumps({'ret' : -4, 'msg' : '无效的openid'})
class WxSignature:
def POST(self):
import json, hashlib
web.header('Content-Type', 'application/json')
param = web.input(currUrl='',cross='')
ticket = get_ticket()
if ticket=='':
# 重试一次
ticket = get_ticket()
if ticket=='':
print '---------- get ticket fail!'
#return None
noncestr = app_helper.my_rand()
timestamp = str(int(time.time()))
#url = 'http://test.urfresh.cn/static/hb/001.html'
url = param.currUrl
string1 = 'jsapi_ticket=%s&noncestr=%s×tamp=%s&url=%s' % (ticket, noncestr, timestamp, url)
print string1
if param.cross=='yes':
return 'jsonpcallback(%s)' % json.dumps({
'appid' : wx_appid,
'timestamp' : timestamp,
'nonceStr' : noncestr,
'sign' : hashlib.sha1(string1).hexdigest(),
})
else:
return json.dumps({
'appid' : wx_appid,
'timestamp' : timestamp,
'nonceStr' : noncestr,
'sign' : hashlib.sha1(string1).hexdigest(),
})
def GET(self):
return self.POST()
#if __name__ == "__main__":
# web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)
# app.run()
| 30.159892 | 518 | 0.660796 |
f32c25a744b0b3988cd0da07baf1f62a0700a54d | 1,173 | py | Python | utils/summaries.py | fadeevla/pytorch-deeplab-xception | dd4e092ecc47ec94e851b36523d243a484f6c672 | [
"MIT"
] | null | null | null | utils/summaries.py | fadeevla/pytorch-deeplab-xception | dd4e092ecc47ec94e851b36523d243a484f6c672 | [
"MIT"
] | null | null | null | utils/summaries.py | fadeevla/pytorch-deeplab-xception | dd4e092ecc47ec94e851b36523d243a484f6c672 | [
"MIT"
] | null | null | null | import os
import torch
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from external.dataloaders.utils import decode_seg_map_sequence
class TensorboardSummary(object):
def __init__(self, directory):
self.directory = directory
def create_summary(self):
writer = SummaryWriter(logdir=self.directory)
return writer
def visualize_image(self, writer, dataset, image, target, output, global_step):
grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
writer.add_image('Image', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(),
dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image('Predicted label', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(),
dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image('Groundtruth label', grid_image, global_step) | 51 | 108 | 0.659847 |
4013d9d184d31b317359da69e70bf3b8b8b6a2f3 | 3,951 | py | Python | client/commands/stop.py | MedRedha/pyre-check | 1e1aaceb1bfd98de5fabe67d3839e20e5ed0cd31 | [
"MIT"
] | 1 | 2019-12-31T01:08:13.000Z | 2019-12-31T01:08:13.000Z | client/commands/stop.py | MedRedha/pyre-check | 1e1aaceb1bfd98de5fabe67d3839e20e5ed0cd31 | [
"MIT"
] | null | null | null | client/commands/stop.py | MedRedha/pyre-check | 1e1aaceb1bfd98de5fabe67d3839e20e5ed0cd31 | [
"MIT"
] | null | null | null | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
import time
from logging import Logger
from typing import List, Optional
from .. import configuration_monitor
from ..analysis_directory import AnalysisDirectory
from ..configuration import Configuration
from ..project_files_monitor import ProjectFilesMonitor
from ..watchman_subscriber import WatchmanSubscriber
from .command import ClientException, Command, State
from .kill import Kill
LOG: Logger = logging.getLogger(__name__)
class Stop(Command):
NAME = "stop"
def __init__(
self,
arguments: argparse.Namespace,
original_directory: str,
configuration: Optional[Configuration] = None,
analysis_directory: Optional[AnalysisDirectory] = None,
) -> None:
super(Stop, self).__init__(
arguments, original_directory, configuration, analysis_directory
)
@classmethod
def add_subparser(cls, parser: argparse._SubParsersAction) -> None:
stop = parser.add_parser(cls.NAME, epilog="Signals the Pyre server to stop.")
stop.set_defaults(command=cls)
def _flags(self) -> List[str]:
log_directory = self._log_directory
flags = []
if log_directory:
flags.extend(["-log-directory", log_directory])
return flags
def _run(self) -> None:
def _kill() -> None:
arguments = self._arguments
# pyre-fixme[16]: `argparse.Namespace` has no attribute `with_fire`.
arguments.with_fire = False
Kill(
arguments,
self._original_directory,
self._configuration,
self._analysis_directory,
).run()
if self._state() == State.DEAD:
LOG.warning("No server running, cleaning up any left over Pyre processes.")
_kill()
else:
try:
with open(
os.path.join(self._log_directory, "server", "server.pid")
) as pid_file:
pid_to_poll = int(pid_file.read())
except (OSError, ValueError):
pid_to_poll = None
try:
stopped = False
# If this call fails, check() will throw a ClientException.
self._call_client(command=self.NAME).check()
# Poll for a second to ensure that the server has a chance to exit.
if pid_to_poll is not None:
stop_time = time.time() + 1.0
LOG.info("Polling for server's process to stop...")
while time.time() < stop_time:
# send a null signal to validate the process's existence. If the
# process has terminated, a ProcessLookupError will be thrown.
os.kill(pid_to_poll, 0)
time.sleep(0.1)
stopped = True
except ClientException:
# An error was encountered when running `pyre stop`.
stopped = False
except ProcessLookupError:
LOG.info("The server process has stopped.")
stopped = True
if not stopped:
LOG.warning("Could not stop server, attempting to kill.")
_kill()
else:
LOG.info("Stopped server at `%s`", self._analysis_directory.get_root())
WatchmanSubscriber.stop_subscriber(
ProjectFilesMonitor.base_path(self._configuration), ProjectFilesMonitor.NAME
)
WatchmanSubscriber.stop_subscriber(
configuration_monitor.ConfigurationMonitor.base_path(self._configuration),
configuration_monitor.ConfigurationMonitor.NAME,
)
| 36.583333 | 88 | 0.599342 |
9155189ca19616e0422aa10e9f9c35cbd97278c9 | 1,168 | py | Python | Meiduo/apps/payment/migrations/0001_initial.py | wanglijing615/python46 | 2c5f84ce79bf352b7a3c57be32f3210ce204c8c0 | [
"MIT"
] | null | null | null | Meiduo/apps/payment/migrations/0001_initial.py | wanglijing615/python46 | 2c5f84ce79bf352b7a3c57be32f3210ce204c8c0 | [
"MIT"
] | null | null | null | Meiduo/apps/payment/migrations/0001_initial.py | wanglijing615/python46 | 2c5f84ce79bf352b7a3c57be32f3210ce204c8c0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-10-29 02:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('trade_id', models.CharField(blank=True, max_length=100, null=True, unique=True, verbose_name='支付编号')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.OrderInfo', verbose_name='订单')),
],
options={
'db_table': 'tb_payment',
'verbose_name': '支付信息',
'verbose_name_plural': '支付信息',
},
),
]
| 34.352941 | 132 | 0.595034 |
83cd1e4219e28efd5831ca5daadee99c293f58b3 | 2,595 | py | Python | greedy/assignment1/one.py | kylepw/alg_spec | 653a4492aed87439fb3d46938c9a57e69efb16a5 | [
"MIT"
] | null | null | null | greedy/assignment1/one.py | kylepw/alg_spec | 653a4492aed87439fb3d46938c9a57e69efb16a5 | [
"MIT"
] | null | null | null | greedy/assignment1/one.py | kylepw/alg_spec | 653a4492aed87439fb3d46938c9a57e69efb16a5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""File describes a set of jobs with positive and integral weights and
lengths. It has the format:
[number_of_jobs]
[job_1_weight] [job_1_length]
[job_2_weight] [job_2_length]
...
For example, the third line of the file is "74 59", indicating that the
second job has weight 74 and length 59.
You should NOT assume that edge weights or lengths are distinct.
Your task in this problem is to run the greedy algorithm that schedules
jobs in decreasing order of the difference (weight - length). Recall from
lecture that this algorithm is not always optimal. IMPORTANT: if two jobs
have equal difference (weight - length), you should schedule the job with
higher weight first. Beware: if you break ties in a different way, you are
likely to get the wrong answer. You should report the sum of weighted
completion times of the resulting schedule --- a positive integer --- in
the box below.
"""
import argparse
def extract_jobs(filename):
"""Extract total number of jobs, job weight, length.
Args:
filename: text file with job data
Returns:
(int, list of tuples) -> (total num of jobs, [(weight, length), ...])
"""
total = 0
jobs = []
with open(filename) as f:
total = int(f.readline())
for job in f:
jobs.append(tuple(int(i) for i in job.split()))
return total, jobs
def sort_jobs(jobs, key, reverse=False):
"""Sort jobs in decreasing order of the ratio (weight - length).
Args:
jobs: list of (job weight, length)
key: key to sort by
reverse: sort in descending order (default is False)
"""
jobs.sort(key=key, reverse=reverse)
def sum_weighted_completion_times(jobs):
"""Return sum of weighted completion times."""
total = 0
time_so_far = 0
for j in jobs:
time_so_far += j[1]
total += j[0] * time_so_far
return total
def get_parser(description):
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'filename',
metavar='jobs.txt',
help='file with one job [weight] [length] per line such as "74 39"',
)
return parser
def main():
parser = get_parser(
description='Greedy algorithm that schedules jobs in decreasing order by (weight - length).'
)
parsed = vars(parser.parse_args())
_, jobs = extract_jobs(parsed['filename'])
sort_jobs(jobs, key=lambda j: (j[0] - j[1], j[0]), reverse=True)
print(sum_weighted_completion_times(jobs))
if __name__ == '__main__':
main()
| 26.479592 | 100 | 0.664355 |
270b2911c7fc5b1283bd57d0ef29a4a04bfdd555 | 2,430 | py | Python | train.py | TakLee96/discriminant | 630010e03dcdad0b7ccaf4733309ff452896cc89 | [
"MIT"
] | null | null | null | train.py | TakLee96/discriminant | 630010e03dcdad0b7ccaf4733309ff452896cc89 | [
"MIT"
] | null | null | null | train.py | TakLee96/discriminant | 630010e03dcdad0b7ccaf4733309ff452896cc89 | [
"MIT"
] | null | null | null | import numpy as np
from os import path
from scipy.io import loadmat
from timer import timer
from classifier import LDAClassifier, QDAClassifier
""" TODO: choose either mnist or spam >>HERE<< """
which = "spam.mat"
which = "mnist.mat"
""" TODO: choose either mnist or spam >>HERE<< """
timer.start("reading", which, "data from matlab file")
raw = loadmat(path.join(path.dirname(__file__), "data", which))
raw_data = raw['data']
raw_labl = raw['label'][0]
timer.end("done")
timer.start("permuting data randomly")
np.random.seed(0)
ordering = np.random.permutation(len(raw_data))
data = np.ndarray(shape=raw_data.shape, dtype=raw_data.dtype)
labl = np.ndarray(shape=raw_labl.shape, dtype=raw_labl.dtype)
for old, new in enumerate(ordering):
data[new] = raw_data[old]
labl[new] = raw_labl[old]
del raw, raw_data, raw_labl, ordering
timer.end("done")
def cross_validation(method, k=5):
if method == "lda":
Classifier = LDAClassifier
elif method == "qda":
Classifier = QDAClassifier
else:
raise Exception("lda or qda only")
timer.start("folding data into", k, "copies")
data_slice = [ None ] * k
labl_slice = [ None ] * k
train_rate = [ 0.0 ] * k
valid_rate = [ 0.0 ] * k
n = len(labl)
m = n / k
for i in range(k):
data_slice[i] = data[(i*m):min((i+1)*m,n)]
labl_slice[i] = labl[(i*m):min((i+1)*m,n)]
timer.end("done")
for j in range(k):
timer.start("validation iteration #", j)
training_data = np.concatenate(tuple(data_slice[i] for i in range(k) if i != j))
training_labl = np.concatenate(tuple(labl_slice[i] for i in range(k) if i != j))
print ".... data formating done"
c = LDAClassifier(training_data, training_labl)
print ".... classifier training done"
train_rate[j] = c.score(c.classify_all(training_data), training_labl)
print ".... training accuracy computation done"
valid_rate[j] = c.score(c.classify_all(data_slice[j]), labl_slice[j])
print ".... validation accuracy computation done"
timer.end("done; training accuracy =", train_rate[j], "; validation accuracy =", valid_rate[j])
print k, "fold cross validation for", method, "on dataset", which, "complete"
print ".... overall training accuracy =", np.mean(train_rate)
print ".... overall validation accuracy =", np.mean(valid_rate)
cross_validation("qda")
| 34.225352 | 103 | 0.651852 |
26138f625a435a81afee21d011c1929317172ac7 | 2,054 | py | Python | tests/conftest.py | mmatoscom/kopf | 0141999e8be3522fa67a372dba4b0f6fbaea6ea1 | [
"MIT"
] | null | null | null | tests/conftest.py | mmatoscom/kopf | 0141999e8be3522fa67a372dba4b0f6fbaea6ea1 | [
"MIT"
] | null | null | null | tests/conftest.py | mmatoscom/kopf | 0141999e8be3522fa67a372dba4b0f6fbaea6ea1 | [
"MIT"
] | null | null | null | import asyncio
import time
import asynctest
import pytest
import pytest_mock
from kopf.reactor.registry import Resource
# Make all tests in this directory and below asyncio-compatible by default.
def pytest_collection_modifyitems(items):
for item in items:
if asyncio.iscoroutinefunction(item.function):
item.add_marker('asyncio')
# Substitute the regular mock with the async-aware mock in the `mocker` fixture.
@pytest.fixture(scope='session', autouse=True)
def enforce_asyncio_mocker():
pytest_mock._get_mock_module = lambda config: asynctest
@pytest.fixture()
def resource():
""" The resource used in the tests. Usually mocked, so it does not matter. """
return Resource('zalando.org', 'v1', 'kopfexamples')
@pytest.fixture()
def timer():
return Timer()
class Timer(object):
"""
A helper context manager to measure the time of the code-blocks.
Also, supports direct comparison with time-deltas and the numbers of seconds.
Usage:
with Timer() as timer:
do_something()
print(f"Executing for {timer.seconds}s already.")
do_something_else()
print(f"Executed in {timer.seconds}s.")
assert timer < 5.0
"""
def __init__(self):
super().__init__()
self._ts = None
self._te = None
@property
def seconds(self):
if self._ts is None:
return None
elif self._te is None:
return time.perf_counter() - self._ts
else:
return self._te - self._ts
def __repr__(self):
status = 'new' if self._ts is None else 'running' if self._te is None else 'finished'
return f'<Timer: {self.seconds}s ({status})>'
def __enter__(self):
self._ts = time.perf_counter()
self._te = None
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._te = time.perf_counter()
def __int__(self):
return int(self.seconds)
def __float__(self):
return float(self.seconds)
| 25.04878 | 93 | 0.645083 |
c9cc385b6a77bbdce42933cb4d4bcf8440410c3a | 571 | py | Python | var/spack/repos/builtin.mock/packages/singlevalue-variant-dependent/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin.mock/packages/singlevalue-variant-dependent/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin.mock/packages/singlevalue-variant-dependent/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class SinglevalueVariantDependent(Package):
"""Simple package with one optional dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/archive-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
depends_on('multivalue_variant fee=baz')
def install(self, spec, prefix):
pass
| 27.190476 | 73 | 0.714536 |
ece0602ccf211beecf528a4d2376afb04862f209 | 784 | py | Python | app036.1.py | ChloeRuan/HelloWorld | e1297ee871c9a84a6e7c50e0d3aa1c332daef27f | [
"MIT"
] | null | null | null | app036.1.py | ChloeRuan/HelloWorld | e1297ee871c9a84a6e7c50e0d3aa1c332daef27f | [
"MIT"
] | null | null | null | app036.1.py | ChloeRuan/HelloWorld | e1297ee871c9a84a6e7c50e0d3aa1c332daef27f | [
"MIT"
] | null | null | null | # automate repetive tasks
import openpyxl as xl
from openpyxl.chart import BarChart, Reference
def process_workbook(filename):
wb = xl.load_workbook('filename') # load_workbook is a function to load excel
sheet = wb['Sheet1'] # case sensitive
for row in range(2, sheet.max_row + 1): # why? if we write (1, 4), it will only genderate 1, ,2, 3
cell = sheet.cell(row, 3) # 3 stands for column
correct_price = cell.value * 0.9
corrected_price_cell = sheet.cell(row, 4)
corrected_price_cell.value = correct_price
values = Reference(sheet, min_row=2, max_row=sheet.max_row, min_col=4, max_col=4)
chart = BarChart()
chart.add_data(values)
sheet.add_chart(chart, 'e2') # where do we want to add this chart
wb.save('transaction2.xlsx')
| 31.36 | 103 | 0.69898 |
9babd04b0027c867a0736019378dd2cdd420dcc0 | 537 | py | Python | catvae/models/__init__.py | flatironinstitute/catvae | 4bfdce83a24c0fb0e55215dd24cda5dcaa9d418a | [
"BSD-3-Clause"
] | 6 | 2021-05-23T18:50:48.000Z | 2022-02-23T20:57:36.000Z | catvae/models/__init__.py | flatironinstitute/catvae | 4bfdce83a24c0fb0e55215dd24cda5dcaa9d418a | [
"BSD-3-Clause"
] | 24 | 2021-05-19T17:43:33.000Z | 2022-03-03T21:41:13.000Z | catvae/models/__init__.py | flatironinstitute/catvae | 4bfdce83a24c0fb0e55215dd24cda5dcaa9d418a | [
"BSD-3-Clause"
] | 2 | 2021-05-19T16:21:13.000Z | 2021-09-23T01:11:29.000Z | from catvae.models.linear_cat_vae import LinearCatVAE
from catvae.models.linear_cat_vae import LinearBatchCatVAE
from catvae.models.linear_vae import LinearVAE
from catvae.models.linear_vae import LinearDLRVAE
from catvae.models.linear_vae import LinearBatchVAE
from catvae.models.batch_classifier import Q2BatchClassifier
from catvae.models.triplet_net import TripletNet
__all__ = ['LinearCatVAE', 'LinearBatchCatVAE',
'LinearVAE', 'LinearDLRVAE',
'LinearBatchVAE', 'TripletNet',
'Q2BatchClassifier']
| 38.357143 | 60 | 0.79702 |
7a9b337636c545a73a6f7798c3c1fa864adf6695 | 299 | py | Python | hooks/pre_gen_project.py | ihumphrey/cookiecutter-pypackage | 23088f77465652d0cbb028a485611f2ece90360b | [
"BSD-3-Clause"
] | null | null | null | hooks/pre_gen_project.py | ihumphrey/cookiecutter-pypackage | 23088f77465652d0cbb028a485611f2ece90360b | [
"BSD-3-Clause"
] | null | null | null | hooks/pre_gen_project.py | ihumphrey/cookiecutter-pypackage | 23088f77465652d0cbb028a485611f2ece90360b | [
"BSD-3-Clause"
] | null | null | null | import re
import sys
MODULE_REGEX = r'^[_a-zA-Z][_a-zA-Z0-9]+$'
package_name = '{{ cookiecutter.package_name}}'
if not re.match(MODULE_REGEX, package_name):
print('ERROR: %s is not a valid Python package name!' % package_name)
# exits with status 1 to indicate failure
sys.exit(1)
| 19.933333 | 73 | 0.688963 |
22c64f13d908cf76967a60895b973f1163ff8aac | 6,777 | py | Python | abtools/utils/ssh_tunnel.py | menis/abtools | bfc7c6c508b174bb3b74d8f152156242ddd2ee77 | [
"MIT"
] | 9 | 2016-06-13T20:00:04.000Z | 2022-03-19T19:07:23.000Z | abtools/utils/ssh_tunnel.py | menis/abtools | bfc7c6c508b174bb3b74d8f152156242ddd2ee77 | [
"MIT"
] | null | null | null | abtools/utils/ssh_tunnel.py | menis/abtools | bfc7c6c508b174bb3b74d8f152156242ddd2ee77 | [
"MIT"
] | 4 | 2018-04-10T09:05:21.000Z | 2022-01-27T21:23:06.000Z | #!/usr/bin/env python
# filename: ssh_tunnel.py
#
# Copyright (c) 2015 Bryan Briney
# License: The MIT license (http://opensource.org/licenses/MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Borrowed heavily from Paramiko:
# https://github.com/paramiko/paramiko/blob/master/demos/forward.py
import getpass
import os
import socket
import select
try:
import SocketServer
except ImportError:
import socketserver as SocketServer
import argparse
import sys
import paramiko
SSH_PORT = 22
DEFAULT_PORT = 27017
class ForwardServer(SocketServer.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
class Handler(SocketServer.BaseRequestHandler):
def handle(self):
try:
chan = self.ssh_transport.open_channel(
'direct-tcpip',
(self.chain_host, self.chain_port),
self.request.getpeername())
except Exception as e:
print('Incoming request to {}:{} failed: {}'.format(
self.chain_host,
self.chain_port,
repr(e)))
return
if chan is None:
print('Incoming request to {}:{} was rejected by the SSH server.'.format(
self.chain_host,
self.chain_port))
return
print('Connected! Tunnel open {} -> {} -> {}'.format(
self.request.getpeername(),
chan.getpeername(),
(self.chain_host, self.chain_port)))
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
peername = self.request.getpeername()
chan.close()
self.request.close()
print('Tunnel closed from {}'.format(peername,))
def forward_tunnel(local_port, remote_host, remote_port, transport):
# this is convoluted, but necessary to configure things for the Handler
# object. (SocketServer doesn't give Handlers any way to access the outer
# server.)
class SubHander (Handler):
chain_host = remote_host
chain_port = remote_port
ssh_transport = transport
ForwardServer(('', local_port), SubHander).serve_forever()
def get_host_port(spec, default_port):
"parse 'hostname:22' into a host and port, with the port optional"
args = (spec.split(':', 1) + [default_port])[:2]
args[1] = int(args[1])
return args[0], args[1]
HELP = """\
Set up a forward tunnel across an SSH server, using paramiko. A local port
(given with -p) is forwarded across an SSH session to an address:port from
the SSH server. This is similar to the openssh -L option.
"""
def parse_arguments():
parser = argparse.ArgumentParser(usage='usage: ssh_tunnel [options] <ssh-server>[:<server-port>]',
version='ssh_tunnel', description=HELP)
parser.add_argument('ssh_server', nargs=1,
help='SSH server, as <ssh-server>[:<ssh-port>]')
parser.add_argument('-p', '--local-port', action='store', type=int, dest='port',
default=DEFAULT_PORT,
help='local port to forward (default: {})'.format(DEFAULT_PORT))
parser.add_argument('-u', '--user', action='store', type=str, dest='user',
default=getpass.getuser(),
help='username for SSH authentication (default: {})'.format(getpass.getuser()))
parser.add_argument('-K', '--key', action='store', type=str, dest='keyfile',
default=None,
help='private key file to use for SSH authentication')
parser.add_argument('--no-key', action='store_false', dest='look_for_keys', default=True,
help="don't look for or use a private key file")
parser.add_argument('-P', '--password', dest='readpass', default=False, action='store_true',
help='Use a password for SSH.')
parser.add_argument('-r', '--remote', action='store', required=True, type=str, dest='remote',
default=None, metavar='host:port',
help='remote host and port to forward to')
args = parser.parse_args()
if args.remote is None:
parser.error('Remote address required (-r).')
server_host, server_port = get_host_port(args.ssh_server[0], SSH_PORT)
remote_host, remote_port = get_host_port(args.remote, args.port)
return args, (server_host, server_port), (remote_host, remote_port)
def main():
args, server, remote = parse_arguments()
if args.readpass:
password = getpass.getpass('Enter SSH password: ')
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
print('Connecting to ssh host {}:{} ...'.format(server[0], server[1]))
try:
client.connect(server[0], server[1], username=args.user, key_filename=args.keyfile,
look_for_keys=args.look_for_keys, password=password)
except Exception as e:
print('*** Failed to connect to {}:{}: {}'.format(server[0], server[1], e))
sys.exit(1)
print('Now forwarding port {} to {}:{} ...'.format(args.port, remote[0], remote[1]))
try:
forward_tunnel(args.port, remote[0], remote[1], client.get_transport())
except KeyboardInterrupt:
print('C-c: Port forwarding stopped.')
sys.exit(0)
if __name__ == '__main__':
main()
| 38.288136 | 103 | 0.633614 |
a78a55e521984401e5b4205752918406943e63b3 | 7,161 | py | Python | audio_style_transfer/models/uylanov.py | dak-7309/time-domain-neural-audio-style-transfer | ed31a4988489f6ce6944799d2340ab5feea8e878 | [
"Apache-2.0"
] | 139 | 2017-11-28T11:23:32.000Z | 2022-03-08T18:01:23.000Z | audio_style_transfer/models/uylanov.py | dak-7309/time-domain-neural-audio-style-transfer | ed31a4988489f6ce6944799d2340ab5feea8e878 | [
"Apache-2.0"
] | 2 | 2019-03-27T04:19:28.000Z | 2020-04-12T21:12:44.000Z | audio_style_transfer/models/uylanov.py | dak-7309/time-domain-neural-audio-style-transfer | ed31a4988489f6ce6944799d2340ab5feea8e878 | [
"Apache-2.0"
] | 31 | 2017-11-28T07:08:23.000Z | 2022-03-09T02:51:08.000Z | """NIPS2017 "Time Domain Neural Audio Style Transfer" code repository
Parag K. Mital
"""
import tensorflow as tf
import librosa
import numpy as np
import argparse
import glob
import os
from audio_style_transfer import utils
def read_audio_spectum(filename, n_fft=2048, hop_length=512, sr=22050):
x, sr = librosa.load(filename, sr=sr)
S = librosa.stft(x, n_fft, hop_length)
S = np.log1p(np.abs(S)).T
return S, sr
def compute_features(content,
style,
stride=1,
n_layers=1,
n_filters=4096,
k_h=1,
k_w=11):
n_frames = content.shape[0]
n_samples = content.shape[1]
content_tf = np.ascontiguousarray(content)
style_tf = np.ascontiguousarray(style)
g = tf.Graph()
kernels = []
layers = []
content_features = []
style_features = []
with g.as_default(), g.device('/cpu:0'), tf.Session():
x = tf.placeholder('float32', [None, n_samples], name="x")
net = tf.reshape(x, [1, 1, -1, n_samples])
for layer_i in range(n_layers):
if layer_i == 0:
std = np.sqrt(2) * np.sqrt(2.0 / ((n_frames + n_filters) * k_w))
kernel = np.random.randn(k_h, k_w, n_samples, n_filters) * std
else:
std = np.sqrt(2) * np.sqrt(2.0 / (
(n_filters + n_filters) * k_w))
kernel = np.random.randn(k_h, k_w, n_filters, n_filters) * std
kernels.append(kernel)
kernel_tf = tf.constant(
kernel, name="kernel{}".format(layer_i), dtype='float32')
conv = tf.nn.conv2d(
net,
kernel_tf,
strides=[1, stride, stride, 1],
padding="VALID",
name="conv{}".format(layer_i))
net = tf.nn.relu(conv)
layers.append(net)
content_feature = net.eval(feed_dict={x: content_tf})
content_features.append(content_feature)
style_feature = net.eval(feed_dict={x: style_tf})
features = np.reshape(style_feature, (-1, n_filters))
style_gram = np.matmul(features.T, features) / n_frames
style_features.append(style_gram)
return content_features, style_features, kernels
def compute_stylization(kernels,
n_samples,
n_frames,
content_features,
style_features,
stride=1,
n_layers=1,
alpha=1e-4,
learning_rate=1e-3,
iterations=100):
result = None
with tf.Graph().as_default():
x = tf.Variable(
np.random.randn(1, 1, n_frames, n_samples).astype(np.float32) *
1e-3,
name="x")
net = x
content_loss = 0
style_loss = 0
for layer_i in range(n_layers):
kernel_tf = tf.constant(
kernels[layer_i],
name="kernel{}".format(layer_i),
dtype='float32')
conv = tf.nn.conv2d(
net,
kernel_tf,
strides=[1, stride, stride, 1],
padding="VALID",
name="conv{}".format(layer_i))
net = tf.nn.relu(conv)
content_loss = content_loss + \
alpha * 2 * tf.nn.l2_loss(net - content_features[layer_i])
_, height, width, number = map(lambda i: i.value, net.get_shape())
feats = tf.reshape(net, (-1, number))
gram = tf.matmul(tf.transpose(feats), feats) / n_frames
style_loss = style_loss + 2 * tf.nn.l2_loss(gram - style_features[
layer_i])
loss = content_loss + style_loss
opt = tf.contrib.opt.ScipyOptimizerInterface(
loss, method='L-BFGS-B', options={'maxiter': iterations})
# Optimization
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print('Started optimization.')
opt.minimize(sess)
print('Final loss:', loss.eval())
result = x.eval()
return result
def run(content_fname,
style_fname,
output_fname,
n_fft=2048,
hop_length=256,
alpha=0.02,
n_layers=1,
n_filters=8192,
k_w=15,
stride=1,
iterations=300,
phase_iterations=500,
sr=22050,
signal_length=1, # second
block_length=1024):
content, sr = read_audio_spectum(
content_fname, n_fft=n_fft, hop_length=hop_length, sr=sr)
style, sr = read_audio_spectum(
style_fname, n_fft=n_fft, hop_length=hop_length, sr=sr)
n_frames = min(content.shape[0], style.shape[0])
n_samples = content.shape[1]
content = content[:n_frames, :]
style = style[:n_frames, :]
content_features, style_features, kernels = compute_features(
content=content,
style=style,
stride=stride,
n_layers=n_layers,
n_filters=n_filters,
k_w=k_w)
result = compute_stylization(
kernels=kernels,
n_samples=n_samples,
n_frames=n_frames,
content_features=content_features,
style_features=style_features,
stride=stride,
n_layers=n_layers,
alpha=alpha,
iterations=iterations)
mags = np.zeros_like(content.T)
mags[:, :n_frames] = np.exp(result[0, 0].T) - 1
p = 2 * np.pi * np.random.random_sample(mags.shape) - np.pi
for i in range(phase_iterations):
S = mags * np.exp(1j * p)
x = librosa.istft(S, hop_length)
p = np.angle(librosa.stft(x, n_fft, hop_length))
librosa.output.write_wav('prelimiter.wav', x, sr)
limited = utils.limiter(x)
librosa.output.write_wav(output_fname, limited, sr)
def batch(content_path, style_path, output_path):
content_files = glob.glob('{}/*.wav'.format(content_path))
style_files = glob.glob('{}/*.wav'.format(style_path))
for content_filename in content_files:
for style_filename in style_files:
output_filename = '{}/{}+{}.wav'.format(
output_path,
content_filename.split('/')[-1], style_filename.split('/')[-1])
if os.path.exists(output_filename):
continue
run(content_filename, style_filename, output_filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--style', help='style file', required=True)
parser.add_argument('-c', '--content', help='content file', required=True)
parser.add_argument('-o', '--output', help='output file', required=True)
parser.add_argument(
'-m',
'--mode',
help='mode for training [single] or batch',
default='single')
args = vars(parser.parse_args())
if args['mode'] == 'single':
run(args['content'], args['style'], args['output'])
else:
batch(args['content'], args['style'], args['output'])
| 34.762136 | 80 | 0.559419 |
c7f265c901ad22fbfe0107c0ab9f1d0d4f22c430 | 945 | py | Python | Chapter 06/add_coffee_record.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 35 | 2019-05-03T00:30:31.000Z | 2022-01-20T06:57:25.000Z | Chapter 06/add_coffee_record.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 1 | 2020-09-04T02:04:33.000Z | 2020-09-04T02:04:33.000Z | Chapter 06/add_coffee_record.py | nescience8/starting-out-with-python-global-4th-edition | c16f93b7cbb4c7ae7b57653a7190bf192fe6b472 | [
"MIT"
] | 22 | 2020-05-13T21:20:02.000Z | 2021-12-21T08:35:59.000Z | # This program adds coffee inventory records to
# the coffee.txt file.
def main():
# Create a variable to control the loop.
another = 'y'
# Open the coffee.txt file in append mode.
coffee_file = open('coffee.txt', 'a')
# Add records to the file.
while another == 'y' or another == 'Y':
# Get the coffee record data.
print('Enter the following coffee data:')
descr = input('Description: ')
qty = int(input('Quantity (in pounds): '))
# Append the data to the file.
coffee_file.write(descr + '\n')
coffee_file.write(str(qty) + '\n')
# Determine whether the user wants to add
# another record to the file.
print('Do you want to add another record?')
another = input('Y = yes, anything else = no: ')
# Close the file.
coffee_file.close()
print('Data appended to coffee.txt.')
# Call the main function.
main()
| 27 | 56 | 0.597884 |
5fa02624304161a4a6884950af46e5b8a3503ea3 | 13,697 | py | Python | Misc/Legacy/Stats_funcs.py | BadenLab/2Panalysis | 8b23b3eaa1700f4fb5729eb7e89bfd55e7b4811a | [
"MIT"
] | null | null | null | Misc/Legacy/Stats_funcs.py | BadenLab/2Panalysis | 8b23b3eaa1700f4fb5729eb7e89bfd55e7b4811a | [
"MIT"
] | null | null | null | Misc/Legacy/Stats_funcs.py | BadenLab/2Panalysis | 8b23b3eaa1700f4fb5729eb7e89bfd55e7b4811a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 20:39:22 2021
@author: skrem
"""
def shapirowilk(dataname, plot = 0, **kwargs):
"""Returns Shapiro-Wilk test for normality (stat, p-value), with the null hypothesis that
data are normally distributed.
Parameters
----------
dataname: array-like
Input data
plot: 0 or 1
1 plots histogram of sampled values and Q-Q plot for visual inspection
kwargs:
title = , hist_x =
"""
#for i in range(data):
#generates probability plot for visual inspection of normality
print("Null hypothesis: data are normally distributed")
if isinstance(dataname, pd.Series) or isinstance(dataname, pd.DataFrame):
dataname = dataname.dropna()
if plot == 1:
fig, (ax1, ax2) = plt.subplots(1, 2, sharex = False, sharey = False, figsize = (10, 5), dpi = 800)
normality_plot, stat = scipy.stats.probplot(dataname, plot = ax2, rvalue= True)
ax1.hist(dataname, color = 'black', histtype = 'stepfilled')
ax2.get_lines()[0].set_marker('.')
ax2.get_lines()[0].set_color('black')
ax2.get_lines()[1].set_color('slategray')
plt.subplots_adjust(wspace = .1)
ax1.set_title("Histogram")
ax2.set_title("Q-Q plot")
ax1.set_ylabel("Count")
ax2.set_ylabel("Ordered values")
if 'title' in kwargs:
the_title = kwargs["title"]
plt.suptitle(the_title)
if 'hist_x' in kwargs:
the_xlabel = kwargs["hist_x"]
ax1.set_xlabel(the_xlabel)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=.2, hspace=None)
plt.show()
#Runs Shapiro-Wilkns test for normality
SW_res=scipy.stats.shapiro(dataname)
print(SW_res)
if SW_res[1] <= 0.05:
print("Shapiro-Wilk test indicates rejection of null hypothesis: data are NOT normally distributed.")
if SW_res[1] >= 0.05:
print("Shapiro-Wilk test indicates null hypothesis is correct: data ARE normally distributed.")
return SW_res
#_____________________________________________________________________________
def Levenes(*sample_n): #assesses homogeneity of variances
"Works best if passing DataFrames"
lev_list = []
for i in sample_n:
if isinstance(i, pd.Series) or isinstance(i, pd.DataFrame):
i_no_nan = i.dropna()
lev_list.append(i_no_nan)
else:
lev_list.append(i)
test = scipy.stats.levene(*lev_list)
print(test)
N = 0 #Number of cases in all groups
k = 0 #Number of groups
for i in lev_list:
k += 1
for j in i:
N +=1
levenes_dof = N-k
print(f"Degrees of Freedom = {levenes_dof:.4f}")
if test[1] < 0.05:
print("Hypothesis of homogeniety of variance is rejected.")
return test
#_____________________________________________________________________________
def WelchTtest(x,y):
if isinstance(x, pd.Series) or isinstance(x, pd.DataFrame):
x = x.dropna()
if isinstance(y, pd.Series) or isinstance(y, y.DataFrame):
y = y.dropna()
Welchstat = scipy.stats.ttest_ind(x,y, equal_var = False)
print(Welchstat)
welch_dof = (np.var(x)/x.size + np.var(y)/y.size)**2 / (np.var(x)/x.size)**2 / (x.size-1) + (np.var(y)/y.size)**2 / (y.size-1)
print(f"Welch-Sattertwaite Degrees of Freedom = {welch_dof:.4f}")
#_____________________________________________________________________________
def StudentTtest(x,y):
if isinstance(x, pd.Series) or isinstance(x, pd.DataFrame):
x = x.dropna()
if isinstance(y, pd.Series) or isinstance(y, y.DataFrame):
y = y.dropna()
Studstat = scipy.stats.ttest_ind(x,y, equal_var = True)
print(Studstat)
stud_dof = x.size + y.size - 2
print("Degrees of freedom df = nx+ny-2 =" ,stud_dof)
#--> TO DO: Insert scipy.stats.ANOVA (or equivalent) for s to execute non/parametric test (EZ PZ)
#_____________________________________________________________________________
def kruskal(*sample_n):
"""a non-parametric method for testing whether samples originate from the same distribution
"""
lev_list = []
for i in sample_n:
if isinstance(i, pd.Series) or isinstance(i, pd.DataFrame):
i_no_nan = i.dropna()
lev_list.append(i_no_nan)
else:
lev_list.append(i)
result = scipy.stats.kruskal(*lev_list, nan_policy = 'omit')
return result
def mann_whit(x, y, **kwargs):
if isinstance(x, pd.Series) or isinstance(x, pd.DataFrame):
x = x.dropna()
if isinstance(y, pd.Series) or isinstance(y, pd.DataFrame):
y = y.dropna()
if 'hypothesis' in kwargs:
hypth = kwargs['hypothesis']
else:
hypth = 'two-sided'
if 'cont_corr' in kwargs:
cont_binary = kwargs['cont_corr']
else:
cont_binary = True
basic_result = scipy.stats.mannwhitneyu(x, y, alternative = hypth, use_continuity = cont_binary)
advanced_result = pingouin.mwu(x, y, tail = hypth)
return basic_result, advanced_result
#_____________________________________________________________________________
def wilcoxon_SR(x, y, **kwargs):
if isinstance(x, pd.Series) or isinstance(x, pd.DataFrame):
x = x.dropna()
if isinstance(y, pd.Series) or isinstance(y, pd.DataFrame):
y = y.dropna()
"Non-parametric, paired samples"
if 'method' in kwargs:
method = kwargs['method']
else:
method = "wilcox"
if 'correction' in kwargs:
binary = kwargs['correction'] #True/false
else:
binary = False
if 'alternative' in kwargs:
alt = kwargs['alternative']
else:
alt = 'two-sided'
if 'mode' in kwargs:
mode = kwargs['mode']
else:
mode = 'auto'
result = scipy.stats.wilcoxon(x, y, zero_method = method, correction = binary, alternative = alt, mode = mode)
return result
#_____________________________________________________________________________
def wilcoxon_RS(x, y, **kwargs):
if isinstance(x, pd.Series) or isinstance(x, pd.DataFrame):
x = x.dropna()
if isinstance(y, pd.Series) or isinstance(y, pd.DataFrame):
y = y.dropna()
result = scipy.stats.ranksums(x, y)
return result
def posthoc_dunn(x):
scikit_posthocs.posthoc_dunn(x)
# condition_pallette = {"AZ": '#414288', "Peripheral": '#304A39', "AZ w CNQX": '#6A8D92', "Peripheral w CNQX": '#8DB99D'}
condition_pallette = {"AZ": '#274C77', "Peripheral": '#F85A3E', "AZ (CNQX)": '#5BC0EB', "Peripheral (CNQX)": '#D0768D'}
#____________________________________________________________________________
"Statistical plotting_________________________________________________________"
def boxplot(data_df, group_list, **kwargs):
"""Plots a boxplot.
Parameters
-------------
data_df: DataFrame
Which df to plot from
group_list: list
List of columns in data_df to plot as groups
**kwargs:
*x_label = (str) label of x axis
*x_label = (str) label for y axis
*title = (str) set suptitle
*sig_bar = (list of tuples) specify which groups to sig test and set bar for (and do prep stuff)
*test_type = (str) choose: 'Mann-Whitney', 't-test_ind', 't-test_paired', 't-test_welch', 'Levene', 'Kruskal', 'Wilcoxon', 'Mann-Whitney-ls', 'Mann-Whitney-gt'
*'pvals' = (list) passes own statistics and skips test
Example: boxplot(ellipses_areas, ['AZ', 'Peripheral', 'AZ w CNQX', 'Peripheral w CNQX'], x_label = 'x', y_label = 'y', plotpoints = 1, sig_bar = [("AZ", "Peripheral"), ("AZ", "AZ w CNQX")], test_type = 't-test_ind')"""
plt.figure(dpi = 800)
fig = sns.boxplot(data=data_df[group_list], palette = condition_pallette, linewidth = .75)
if 'x_label' in kwargs:
plt.xlabel(kwargs['x_label'])
if 'y_label' in kwargs:
plt.ylabel(kwargs['y_label'])
if 'title' in kwargs:
plt.suptitle(kwargs['title'])
if 'sig_bar' in kwargs:
pairs = kwargs['sig_bar'] #[(("AZ", "Peripheral"))]
if 'test_type' in kwargs:
test_type = kwargs['test_type'] #'t-test_ind'
run_test = True
else:
test_type = None
if 'pvals' in kwargs:
pvals = kwargs['pvals']
run_test = False
else:
pvals = None
if 'correction' in kwargs:
corr = kwargs['correction']
add_stat_annotation(fig, data = data_df[group_list], box_pairs = pairs, test = test_type,
perform_stat_test = run_test, text_format = 'star', verbose=2, pvalues = pvals, comparisons_correction = corr)
else:
add_stat_annotation(fig, data = data_df[group_list], box_pairs = pairs, test = test_type,
perform_stat_test = run_test, text_format = 'star', verbose=2, pvalues = pvals) #alternatively use text_format = 'simple'
if 'plotpoints' in kwargs:
sns.swarmplot(data=data_df[group_list], color = "w", edgecolor = 'gray', linewidth = 1 , size = 3)
plt.grid()
if 'showXaxis' in kwargs:
if kwargs['showXaxis'] == False or kwargs['showXaxis'] == 0:
fig.axes.get_xaxis().set_visible(False)
if 'saveas' in kwargs:
plt.figure(dpi = 2000)
plt.savefig(r'C://Users//skrem//OneDrive//Universitet//MSc//Experimental project//Figures//Python generated//{}'.format(kwargs['saveas']), dpi = 2000, bbox_inches='tight')
plt.show()
"See: https://seaborn.pydata.org/tutorial/aesthetics.html"
def violinplot(data_df, group_list, **kwargs):
"""
Parameters
-------------
data_df: DataFrame
Which df to plot from
group_list: list
List of columns in data_df to plot as groups
**kwargs:
*x_label = (str) label of x axis
*x_label = (str) label for y axis
*title = (str) set suptitle
*sig_bar = (list of tuples) specify which groups to sig test and set bar for (and do prep stuff)
*test_type = (str) choose: 'Mann-Whitney', 't-test_ind', 't-test_paired', 't-test_welch', 'Levene', 'Kruskal', 'Wilcoxon', 'Mann-Whitney-ls', 'Mann-Whitney-gt'
*'pvals' = (list) passes own statistics and skips test
"""
plt.figure(dpi = 800)
if 'cut' in kwargs:
fig = sns.violinplot(data=data_df[group_list], palette = condition_pallette, linewidth = .75, scale = "width", cut = kwargs['cut'])
else:
fig = sns.violinplot(data=data_df[group_list], palette = condition_pallette, linewidth = .75, scale = "count")
if 'title' in kwargs:
plt.suptitle(kwargs['title'])
if 'x_label' in kwargs:
plt.xlabel(kwargs['x_label'])
if 'y_label' in kwargs:
plt.ylabel(kwargs['y_label'])
if 'sig_bar' in kwargs:
pairs = kwargs['sig_bar'] #[(("AZ", "Peripheral"))]
if 'test_type' in kwargs:
test_type = kwargs['test_type'] #'t-test_ind'
run_test = True
else:
test_type = None
if 'pvals' in kwargs:
pvals = kwargs['pvals']
run_test = False
else:
pvals = None
add_stat_annotation(fig, data = data_df[group_list], box_pairs = pairs, test = test_type,
perform_stat_test = run_test, text_format = 'star', verbose=2, pvalues = pvals) #alternatively use text_format = 'simple'
plt.grid()
if 'saveas' in kwargs:
plt.savefig(r'C://Users//skrem//OneDrive//Universitet//MSc//Experimental project//Figures//Python generated//{}'.format(kwargs['saveas']), dpi = 2000, bbox_inches='tight')
plt.show()
"Data import/export___________________________________________________________"
Cond_order = ['AZ', 'AZ (CNQX)', 'Peripheral', 'Peripheral (CNQX)']
#_____________________________________________________________________________
def export_data(input_data, file_name, *col_label_list, path = r'C:\Users\skrem\OneDrive\Universitet\MSc\Experimental project\Data export'):
make_df = pd.DataFrame.from_records(input_data)
if make_df.shape[0] < make_df.shape[1]:
make_df = make_df.transpose()
if col_label_list:
make_df.columns = col_label_list
# create excel writer object
data_name = file_name
writer = pd.ExcelWriter(path + '\{}.xlsx'.format(data_name))
# write dataframe to excel
make_df.to_excel(writer)
# save the excel
writer.save()
print('DataFrame is written successfully to Excel File.')
#_____________________________________________________________________________
def import_data(filename, original_formatting = 0, path = r'C:\Users\skrem\OneDrive\Universitet\MSc\Experimental project\Data export'):
get_data = pd.read_excel(path + '\{}.xlsx'.format(filename))
return_data = get_data.drop(get_data.columns[0], axis = 1)
if original_formatting == 1:
data_list = []
for i in range(return_data.shape[1]):
curr_list = return_data[return_data.columns[i]].tolist()
data_list.append(curr_list)
for n, j in enumerate(data_list):
data_list[n] = [x for x in j if np.isnan(x) == False]
return pd.DataFrame(data_list).T
else:
return pd.DataFrame(return_data)
def sci_not(n):
"Simple function for converting scientific notation to decimal-point. Forces str instead of float."
decimal = ("%.17f" % n).rstrip('0').rstrip('.')
# floated_decimal = float(n)
print("Scientific notation: ", n)
print("In decimal notation: ", decimal)
return decimal | 41.632219 | 222 | 0.638607 |
11a2fbe6979effe1904d5387cfd6a9dee9af6510 | 2,881 | py | Python | Stock_Data/green_line_test.py | vhn0912/Finance | 39cf49d4d778d322537531cee4ce3981cc9951f9 | [
"MIT"
] | 441 | 2020-04-22T02:21:19.000Z | 2022-03-29T15:00:24.000Z | Stock_Data/green_line_test.py | happydasch/Finance | 4f6c5ea8f60fb0dc3b965ffb9628df83c2ecef35 | [
"MIT"
] | 5 | 2020-07-06T15:19:58.000Z | 2021-07-23T18:32:29.000Z | Stock_Data/green_line_test.py | happydasch/Finance | 4f6c5ea8f60fb0dc3b965ffb9628df83c2ecef35 | [
"MIT"
] | 111 | 2020-04-21T11:40:39.000Z | 2022-03-20T07:26:17.000Z | import datetime as dt
import pandas as pd
from pandas_datareader import DataReader
import matplotlib.pyplot as plt
from pylab import rcParams
import yahoo_fin.stock_info as si
start = dt.datetime(1980,12,1)
now = dt.datetime.now()
stock = input('enter a ticker: ')
while stock != 'quit':
price = si.get_live_price(stock)
df = DataReader(stock, 'yahoo', start, now)
df.drop(df[df["Volume"]<1000].index, inplace=True)
dfmonth=df.groupby(pd.Grouper(freq="M"))["High"].max()
glDate=0
lastGLV=0
currentDate=""
curentGLV=0
for index, value in dfmonth.items():
if value > curentGLV:
curentGLV=value
currentDate=index
counter=0
if value < curentGLV:
counter=counter+1
if counter==3 and ((index.month != now.month) or (index.year != now.year)):
if curentGLV != lastGLV:
print(curentGLV)
glDate=currentDate
lastGLV=curentGLV
counter=0
if lastGLV==0:
message=stock+" has not formed a green line yet"
else:
if lastGLV > 1.15 * price:
diff = price/lastGLV
diff = round(diff - 1, 3)
diff = diff*100
message = f"\n{stock.upper()}'s current price ({round(price, 2)}) is {diff}% away from it's last green line value ({round(lastGLV, 2)})"
else:
if lastGLV < 1.05 * price:
diff = lastGLV/price
diff = round(diff - 1, 3)
diff = diff*100
print (f"\n{stock.upper()}'s last green line value ({round(lastGLV, 2)}) is {diff}% greater than it's current price ({round(price, 2)})")
message=("Last Green Line: "+str(round(lastGLV, 2))+" on "+str(glDate.strftime('%Y-%m-%d')))
fig, ax = plt.subplots()
rcParams['figure.figsize'] = 15, 10
ax.plot(df['Close'].tail(120))
ax.axhline(lastGLV, color='g')
plt.title(f"{stock.upper()}'s Close Price Green Line Test")
plt.xlabel('Dates')
plt.ylabel('Close Price')
plt.show()
else:
message=("Last Green Line: "+str(round(lastGLV, 2))+" on "+str(glDate.strftime('%Y-%m-%d')))
fig, ax = plt.subplots()
rcParams['figure.figsize'] = 15, 10
ax.plot(df['Close'])
ax.axhline(lastGLV, color='g')
plt.title(f"{stock.upper()}'s Close Price Green Line Test")
plt.xlabel('Dates')
plt.ylabel('Close Price')
plt.show()
print(message)
stock = input('enter another ticker: ') | 35.567901 | 154 | 0.508157 |
5bfb21804e6aad4148486f735f8bbdcdc5a528fd | 194 | py | Python | Listas_do_Lop/Lista_06/Testes.py | Teuszin/Calculo-Numerico | 3e8fc03166df5476b8e207e2877b02d47b81c5bc | [
"MIT"
] | null | null | null | Listas_do_Lop/Lista_06/Testes.py | Teuszin/Calculo-Numerico | 3e8fc03166df5476b8e207e2877b02d47b81c5bc | [
"MIT"
] | null | null | null | Listas_do_Lop/Lista_06/Testes.py | Teuszin/Calculo-Numerico | 3e8fc03166df5476b8e207e2877b02d47b81c5bc | [
"MIT"
] | null | null | null |
num_de_n = int(input())
pesos = []
for i in range(0, num_de_n):
if i % 2 == 0:
pesos.append(2)
else:
pesos.append(4)
pesos[0] = 1
pesos[num_de_n - 1] = 1
print(pesos) | 14.923077 | 28 | 0.546392 |
7bf97fe32d769cc5c201e2a68429507cc2bac068 | 130,901 | py | Python | PyU4V/provisioning.py | travatine/PyU4V | fce08e8fab3578d95e53e3a2200a81f4debf57da | [
"Apache-2.0"
] | null | null | null | PyU4V/provisioning.py | travatine/PyU4V | fce08e8fab3578d95e53e3a2200a81f4debf57da | [
"Apache-2.0"
] | null | null | null | PyU4V/provisioning.py | travatine/PyU4V | fce08e8fab3578d95e53e3a2200a81f4debf57da | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""provisioning.py."""
import logging
import math
import random
import re
from PyU4V.common import CommonFunctions
from PyU4V.utils import constants
from PyU4V.utils import decorators
from PyU4V.utils import exception
from PyU4V.utils import file_handler
LOG = logging.getLogger(__name__)
# Resource constants
SLOPROVISIONING = constants.SLOPROVISIONING
CREATE_VOL_STRING = constants.CREATE_VOL_STRING
ASYNC_UPDATE = constants.ASYNC_UPDATE
SYSTEM = constants.SYSTEM
SYMMETRIX = constants.SYMMETRIX
DIRECTOR = constants.DIRECTOR
PORT = constants.PORT
HOST = constants.HOST
HOSTGROUP = constants.HOSTGROUP
INITIATOR = constants.INITIATOR
MASKINGVIEW = constants.MASKINGVIEW
CONNECTIONS = constants.CONNECTIONS
PORTGROUP = constants.PORTGROUP
SLO = constants.SLO
SRP = constants.SRP
COMPRESSIBILITY_REPORT = constants.COMPRESSIBILITY_REPORT
STORAGEGROUP = constants.STORAGEGROUP
SG_DEMAND_REPORT = constants.SG_DEMAND_REPORT
VOLUME = constants.VOLUME
WORKLOADTYPE = constants.WORKLOADTYPE
FICON_SPLIT = constants.FICON_SPLIT
CU_IMAGE = constants.CU_IMAGE
class ProvisioningFunctions(object):
"""ProvisioningFunctions."""
def __init__(self, array_id, rest_client):
"""__init__."""
self.array_id = array_id
self.common = CommonFunctions(rest_client)
self.get_resource = self.common.get_resource
self.create_resource = self.common.create_resource
self.modify_resource = self.common.modify_resource
self.delete_resource = self.common.delete_resource
def get_array(self, array_id=None):
"""Query for details of an array from SLOPROVISIONING endpoint.
:param array_id: array serial number -- str
:returns: array details -- dict
"""
array_id = array_id if array_id else self.array_id
response = self.get_resource(
category=SLOPROVISIONING, resource_level=SYMMETRIX,
resource_level_id=array_id)
return response if response else dict()
def get_director(self, director):
"""Query for details of a director for a symmetrix.
:param director: the director ID e.g. FA-1D -- str
:returns: director details -- dict
"""
return self.get_resource(
category=SYSTEM,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=DIRECTOR, resource_type_id=director)
def get_director_list(self):
"""Query for details of Symmetrix directors for a symmetrix.
:returns: directors -- list
"""
response = self.get_resource(
category=SYSTEM,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=DIRECTOR)
return response.get('directorId', list()) if response else list()
def get_director_port(self, director, port_no):
"""Get details of the symmetrix director port.
:param director: the director ID e.g. FA-1D -- str
:param port_no: the port number e.g. 1 -- str
:returns: director port details -- dict
"""
return self.get_resource(
category=SYSTEM,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=DIRECTOR, resource_type_id=director,
resource=PORT, resource_id=port_no)
def get_director_port_list(self, director, filters=None):
"""Get list of the ports on a particular director.
Can be filtered by optional parameters, please see documentation.
:param director: the director ID e.g. FA-1D -- str
:param filters: optional filters - dict
:returns: port key dicts -- list
"""
response = self.get_resource(
category=SYSTEM,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=DIRECTOR, resource_type_id=director,
resource=PORT, params=filters)
port_key_list = (
response.get('symmetrixPortKey', list()) if response else list())
return port_key_list
def get_port_identifier(self, director, port_no):
"""Get the identifier (wwn) of the physical port.
:param director: the id of the director -- str
:param port_no: the number of the port -- str
:returns: wwn (FC) or iqn (iscsi) -- str or None
"""
wwn = None
port_info = self.get_director_port(director, port_no)
if port_info:
try:
wwn = port_info['symmetrixPort']['identifier']
except KeyError:
LOG.error('Cannot retrieve port information.')
return wwn
def get_host(self, host_id):
"""Get details on a host on the array.
:param host_id: the name of the host -- str
:returns: host details -- dict
"""
return self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=HOST, resource_type_id=host_id)
def get_host_list(self, filters=None):
"""Get list of the hosts on the array.
See documentation for applicable filters.
:param filters: optional list of filters -- dict
:returns: hosts -- list
"""
response = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=HOST, params=filters)
return response.get('hostId', list()) if response else list()
def create_host(self, host_name, initiator_list=None,
host_flags=None, init_file=None, _async=False):
"""Create a host with the given initiators.
Accepts either initiator_list, e.g.
[10000000ba873cbf, 10000000ba873cba], or file.
The initiators must not be associated with another host.
An empty host can also be created by not passing any initiator ids.
:param host_name: name of the new host -- str
:param initiator_list: list of initiators -- list
:param host_flags: optional host flags to apply -- dict
:param init_file: path to file containing initiator names -- str
:param _async: if call should be _async -- bool
:returns: new host details -- dict
"""
if init_file:
initiator_list = file_handler.create_list_from_file(init_file)
new_ig_data = {'hostId': host_name}
if initiator_list and len(initiator_list) > 0:
new_ig_data.update({'initiatorId': initiator_list})
if host_flags:
new_ig_data.update({'hostFlags': host_flags})
if _async:
new_ig_data.update(ASYNC_UPDATE)
return self.create_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=HOST, payload=new_ig_data)
def modify_host(self, host_id, host_flag_dict=None,
remove_init_list=None, add_init_list=None, new_name=None):
"""Modify an existing host.
Only one parameter can be modified at a time.
:param host_id: host name -- str
:param host_flag_dict: host flags -- dict
:param remove_init_list: initiators to be removed -- list
:param add_init_list: initiators to be added -- list
:param new_name: new host name -- str
:returns: modified host details -- dict
"""
if host_flag_dict:
edit_host_data = ({'editHostActionParam': {
'setHostFlagsParam': {'hostFlags': host_flag_dict}}})
elif remove_init_list:
edit_host_data = ({'editHostActionParam': {
'removeInitiatorParam': {'initiator': remove_init_list}}})
elif add_init_list:
edit_host_data = ({'editHostActionParam': {
'addInitiatorParam': {'initiator': add_init_list}}})
elif new_name:
edit_host_data = ({'editHostActionParam': {
'renameHostParam': {'new_host_name': new_name}}})
else:
msg = ('No modify host parameters chosen - please supply one '
'of the following: host_flag_dict, remove_init_list, '
'add_init_list, or new_name.')
raise exception.InvalidInputException(data=msg)
return self.modify_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=HOST, resource_type_id=host_id,
payload=edit_host_data)
def delete_host(self, host_id):
"""Delete a given host.
Cannot delete if associated with a masking view.
:param host_id: name of the host -- str
"""
self.delete_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=HOST, resource_type_id=host_id)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_masking_views_from_host', 9.1, 10.0)
def get_mvs_from_host(self, host_id):
"""Retrieve masking view information for a specified host.
DEPRECATION NOTICE: ProvisioningFunctions.get_mvs_from_host() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_masking_views_from_host(). For further
information please consult PyU4V 9.1 release notes.
:param host_id: name of the host -- str
:returns: masking views -- list
"""
return self.get_masking_views_from_host(host_id)
def get_masking_views_from_host(self, host_id):
"""Retrieve masking view information for a specified host.
:param host_id: name of the host -- str
:returns: masking views -- list
"""
host = self.get_host(host_id)
return host.get('maskingview', list()) if host else list()
def get_initiator_ids_from_host(self, host_id):
"""Get initiator details from a host.
:param host_id: name of the host -- str
:returns: initiator IDs -- list
"""
host = self.get_host(host_id)
return host.get('initiator', list()) if host else list()
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_host_group', 9.1, 10.0)
def get_hostgroup(self, hostgroup_id):
"""Get details on a host group on the array.
DEPRECATION NOTICE: ProvisioningFunctions.get_hostgroup() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_host_group(). For further information please
consult PyU4V 9.1 release notes.
:param hostgroup_id: name of the host group -- str
:returns: host group details -- dict
"""
return self.get_host_group(hostgroup_id)
def get_host_group(self, host_group_id):
"""Get details on a host group on the array.
:param host_group_id: name of the host group -- str
:returns: host group details -- dict
"""
return self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=HOSTGROUP, resource_type_id=host_group_id)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_host_group_list', 9.1, 10.0)
def get_hostgroup_list(self, filters=None):
"""Get list of host group(s) on the array.
DEPRECATION NOTICE: ProvisioningFunctions.get_hostgroup_list() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_host_group_list(). For further information
please consult PyU4V 9.1 release notes.
See unisphere documentation for applicable filters.
:param filters: optional list of filters -- dict
:returns: host group list -- list
"""
return self.get_host_group_list(filters)
def get_host_group_list(self, filters=None):
"""Get list of host group(s) on the array.
See unisphere documentation for applicable filters.
:param filters: optional list of filters -- dict
:returns: host group list -- list
"""
response = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=HOSTGROUP, params=filters)
return response.get('hostGroupId', list()) if response else list()
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.create_host_group', 9.1, 10.0)
def create_hostgroup(self, hostgroup_id, host_list,
host_flags=None, _async=False):
"""Create a host group containing the given hosts.
DEPRECATION NOTICE: ProvisioningFunctions.create_hostgroup() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.create_host_group(). For further information
please consult PyU4V 9.1 release notes.
:param hostgroup_id: name of the new host group -- str
:param host_list: hosts -- list
:param host_flags: optional host flags to apply -- dict
:param _async: if call should be async -- bool
:returns: new host group details -- dict
"""
return self.create_host_group(
hostgroup_id, host_list, host_flags, _async)
def create_host_group(self, host_group_id, host_list,
host_flags=None, _async=False):
"""Create a host group containing the given hosts.
:param host_group_id: name of the new host group -- str
:param host_list: hosts -- list
:param host_flags: optional host flags to apply -- dict
:param _async: if call should be async -- bool
:returns: new host group details -- dict
"""
new_ig_data = {'hostId': host_list, 'hostGroupId': host_group_id}
if host_flags:
new_ig_data.update({'hostFlags': host_flags})
if _async:
new_ig_data.update(ASYNC_UPDATE)
return self.create_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=HOSTGROUP, payload=new_ig_data)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.modify_host_group', 9.1, 10.0)
def modify_hostgroup(self, hostgroup_id, host_flag_dict=None,
remove_host_list=None, add_host_list=None,
new_name=None):
"""Modify an existing host group.
DEPRECATION NOTICE: ProvisioningFunctions.modify_hostgroup() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.modify_host_group(). For further information
please consult PyU4V 9.1 release notes.
Only one parameter can be modified at a time.
:param hostgroup_id: name of the host group -- str
:param host_flag_dict: host flags -- dict
:param remove_host_list: hosts to be removed -- list
:param add_host_list: hosts to be added -- list
:param new_name: new name of the host group -- str
:returns: modified host group details -- dict
"""
return self.modify_host_group(
hostgroup_id, host_flag_dict, remove_host_list, add_host_list,
new_name)
def modify_host_group(self, host_group_id, host_flag_dict=None,
remove_host_list=None, add_host_list=None,
new_name=None):
"""Modify an existing host group.
Only one parameter can be modified at a time.
:param host_group_id: name of the host group -- str
:param host_flag_dict: host flags -- dict
:param remove_host_list: hosts to be removed -- list
:param add_host_list: hosts to be added -- list
:param new_name: new name of the host group -- str
:returns: modified host group details -- dict
"""
if host_flag_dict:
edit_host_data = ({'editHostGroupActionParam': {
'setHostGroupFlagsParam': {'hostFlags': host_flag_dict}}})
elif remove_host_list:
edit_host_data = ({'editHostGroupActionParam': {
'removeHostParam': {'host': remove_host_list}}})
elif add_host_list:
edit_host_data = ({'editHostGroupActionParam': {
'addHostParam': {'host': add_host_list}}})
elif new_name:
edit_host_data = ({'editHostGroupActionParam': {
'renameHostGroupParam': {'new_host_group_name': new_name}}})
else:
msg = ('No modify host group parameters chosen - please supply '
'one of the following: host_flag_dict, '
'remove_host_list, add_host_list, or new_name.')
raise exception.InvalidInputException(data=msg)
return self.modify_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=HOSTGROUP, resource_type_id=host_group_id,
payload=edit_host_data)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.delete_host_group', 9.1, 10.0)
def delete_hostgroup(self, hostgroup_id):
"""Delete a given host group.
DEPRECATION NOTICE: ProvisioningFunctions.delete_hostgroup() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.delete_host_group(). For further information
please consult PyU4V 9.1 release notes.
Cannot delete if associated with a masking view.
:param hostgroup_id: name of the hostgroup -- str
"""
self.delete_host_group(hostgroup_id)
def delete_host_group(self, host_group_id):
"""Delete a given host group.
Cannot delete if associated with a masking view.
:param host_group_id: name of the hostgroup -- str
"""
self.delete_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=HOSTGROUP, resource_type_id=host_group_id)
def get_initiator(self, initiator_id):
"""Get details of an initiator.
:param initiator_id: initiator id -- str
:returns: initiator details -- dict
"""
return self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=INITIATOR, resource_type_id=initiator_id)
def get_initiator_list(self, params=None):
"""Retrieve initiator list from the array.
:param params: optional params -- dict
:returns: initiators -- list
"""
response = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=INITIATOR, params=params)
return response.get('initiatorId', list()) if response else list()
def modify_initiator(self, initiator_id, remove_masking_entry=None,
replace_init=None, rename_alias=None,
set_fcid=None, initiator_flags=None):
"""Modify an initiator.
Only one parameter can be edited at a time.
:param initiator_id: initiator id -- str
:param remove_masking_entry: 'true' or 'false' -- str
:param replace_init: new initiator id -- str
:param rename_alias: ('new node name', 'new port name') -- tuple
:param set_fcid: fcid -- str
:param initiator_flags: initiator flags to set -- dict
:returns: modified initiator details -- dict
"""
if remove_masking_entry:
edit_init_data = ({'editInitiatorActionParam': {
'removeMaskingEntry': remove_masking_entry}})
elif replace_init:
edit_init_data = ({'editInitiatorActionParam': {
'replaceInitiatorParam': {'new_initiator': replace_init}}})
elif rename_alias:
edit_init_data = ({'editInitiatorActionParam': {
'renameAliasParam': {'node_name': rename_alias[0],
'port_name': rename_alias[1]}}})
elif set_fcid:
edit_init_data = ({'editInitiatorActionParam': {
'initiatorSetAttributesParam': {'fcidValue': set_fcid}}})
elif initiator_flags:
edit_init_data = ({'editInitiatorActionParam': {
'initiatorSetFlagsParam': {
'initiatorFlags': initiator_flags}}})
else:
msg = ('No modify initiator parameters chosen - please supply '
'one of the following: removeMaskingEntry, '
'replace_init, rename_alias, set_fcid, '
'initiator_flags.')
raise exception.InvalidInputException(data=msg)
return self.modify_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=INITIATOR, resource_type_id=initiator_id,
payload=edit_init_data)
def is_initiator_in_host(self, initiator):
"""Check to see if a given initiator is already assigned to a host.
:param initiator: the initiator ID -- str
:returns: if initiator is assigned -- bool
"""
init_list = self.get_in_use_initiator_list_from_array()
for init in init_list:
if initiator in init:
return True
return False
def get_in_use_initiator_list_from_array(self):
"""Get the list of initiators which are in-use from the array.
Gets the list of initiators from the array which are in
hosts/ initiator groups.
:returns: in-use initiators -- list
"""
return self.get_initiator_list({'in_a_host': 'true'})
def get_initiator_group_from_initiator(self, initiator):
"""Given an initiator, get its corresponding initiator group, if any.
:param initiator: the initiator id -- str
:returns: found initiator group name -- str or None
"""
init_details = self.get_initiator(initiator)
return init_details.get('host', None) if init_details else None
def get_masking_view_list(self, filters=None):
"""Get a masking view or list of masking views.
See unisphere documentation for possible filters.
:param filters: filters -- dict
:returns: masking views -- list
"""
response = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=MASKINGVIEW, params=filters)
return response.get('maskingViewId', list()) if response else list()
def get_masking_view(self, masking_view_name):
"""Get details of a masking view.
:param masking_view_name: the masking view name -- str
:returns: masking view details -- dict
"""
return self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=MASKINGVIEW, resource_type_id=masking_view_name)
def create_masking_view_existing_components(
self, port_group_name, masking_view_name,
storage_group_name, host_name=None,
host_group_name=None, _async=False):
"""Create a new masking view using existing groups.
Must enter either a host name or a host group name, but not both.
:param port_group_name: name of the port group -- str
:param masking_view_name: name of the new masking view -- str
:param storage_group_name: name of the storage group -- str
:param host_name: name of the host (initiator group) -- str
:param host_group_name: name of host group -- str
:param _async: if command should be run asynchronously -- bool
:returns: masking view details -- dict
:raises: InvalidInputException
"""
if host_name:
host_details = {'useExistingHostParam': {'hostId': host_name}}
elif host_group_name:
host_details = ({'useExistingHostGroupParam': {
'hostGroupId': host_group_name}})
else:
msg = 'Must enter either a host name or a host group name.'
raise exception.InvalidInputException(data=msg)
payload = ({
'portGroupSelection': {
'useExistingPortGroupParam': {
'portGroupId': port_group_name}},
'maskingViewId': masking_view_name,
'hostOrHostGroupSelection': host_details,
'storageGroupSelection': {
'useExistingStorageGroupParam': {
'storageGroupId': storage_group_name}}})
if _async:
payload.update(ASYNC_UPDATE)
return self.create_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=MASKINGVIEW, payload=payload)
def get_masking_views_from_storage_group(self, storagegroup):
"""Return any masking views associated with a storage group.
:param storagegroup: storage group name -- str
:returns: masking view list -- list
"""
sg = self.get_storage_group(storagegroup)
return sg.get('maskingview', list()) if sg else list()
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_masking_views_by_initiator_group',
9.1, 10.0)
def get_masking_views_by_host(self, initiatorgroup_name):
"""Given a host (initiator group), retrieve the masking view name.
DEPRECATION NOTICE: ProvisioningFunctions.get_masking_views_by_host()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_masking_views_by_host(). For further
information please consult PyU4V 9.1 release notes.
Retrieve the list of masking views associated with the
given initiator group.
:param initiatorgroup_name: name of the initiator group -- str
:returns: masking view names -- list
"""
return self.get_masking_views_by_initiator_group(initiatorgroup_name)
def get_masking_views_by_initiator_group(self, initiator_group_name):
"""Given a host (initiator group), retrieve the masking view name.
Retrieve the list of masking views associated with the
given initiator group.
:param initiator_group_name: name of the initiator group -- str
:returns: masking view names -- list
"""
ig_details = self.get_host(initiator_group_name)
return ig_details.get('maskingview', list()) if ig_details else list()
def get_element_from_masking_view(
self, maskingview_name, portgroup=False, host=False,
storagegroup=False):
"""Return the name of the specified element from a masking view.
:param maskingview_name: masking view name -- str
:param portgroup: port group name -- str
:param host: the host name -- str
:param storagegroup: storage group name -- str
:returns: specified element name -- str
:raises: ResourceNotFoundException
"""
element = None
masking_view_details = self.get_masking_view(maskingview_name)
if masking_view_details:
if portgroup:
element = masking_view_details['portGroupId']
elif host:
if masking_view_details.get('hostId'):
element = masking_view_details['hostId']
elif masking_view_details.get('hostGroupId'):
element = masking_view_details['hostGroupId']
elif storagegroup:
element = masking_view_details['storageGroupId']
else:
exception_message = 'Error retrieving masking group.'
raise exception.ResourceNotFoundException(
data=exception_message)
return element
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_port_group_common_masking_views', 9.1, 10.0)
def get_common_masking_views(self, portgroup_name, ig_name):
"""Get common masking views for a given port group and initiator group.
DEPRECATION NOTICE: ProvisioningFunctions.get_common_masking_views()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_port_group_common_masking_views(). For
further information please consult PyU4V 9.1 release notes.
:param portgroup_name: port group name -- str
:param ig_name: initiator group name -- str
:returns: masking views - list
"""
return self.get_port_group_common_masking_views(portgroup_name,
ig_name)
def get_port_group_common_masking_views(self, port_group_name,
initiator_group_name):
"""Get common masking views for a given port group and initiator group.
:param port_group_name: port group name -- str
:param initiator_group_name: initiator group name -- str
:returns: masking views - list
"""
return self.get_masking_view_list(
{'port_group_name': port_group_name,
'host_or_host_group_name': initiator_group_name})
def delete_masking_view(self, maskingview_name):
"""Delete a masking view.
:param maskingview_name: masking view name -- str
"""
self.delete_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=MASKINGVIEW, resource_type_id=maskingview_name)
def rename_masking_view(self, masking_view_id, new_name):
"""Rename an existing masking view.
Currently, the only supported modification is "rename".
:param masking_view_id: current name of the masking view -- str
:param new_name: new name of the masking view -- str
:returns: modified masking view details -- dict
"""
mv_payload = {'editMaskingViewActionParam': {
'renameMaskingViewParam': {'new_masking_view_name': new_name}}}
return self.modify_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=MASKINGVIEW, resource_type_id=masking_view_id,
payload=mv_payload)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_host_from_masking_view',
9.1, 10.0)
def get_host_from_maskingview(self, masking_view_id):
"""Given a masking view, get the associated host or host group.
DEPRECATION NOTICE: ProvisioningFunctions.get_host_from_maskingview()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_host_from_masking_view(). For further
information please consult PyU4V 9.1 release notes.
:param masking_view_id: name of the masking view -- str
:returns: host id -- str
"""
return self.get_host_from_masking_view(masking_view_id)
def get_host_from_masking_view(self, masking_view_id):
"""Given a masking view, get the associated host or host group.
:param masking_view_id: name of the masking view -- str
:returns: host id -- str
"""
mv_details = self.get_masking_view(masking_view_id)
return mv_details.get('hostId', None) if mv_details else None
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_storage_group_from_masking_view', 9.1, 10.0)
def get_storagegroup_from_maskingview(self, masking_view_id):
"""Given a masking view, get the associated storage group.
DEPRECATION NOTICE:
ProvisioningFunctions.get_storagegroup_from_maskingview() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_storage_group_from_masking_view(). For
further information please consult PyU4V 9.1 release notes.
:param masking_view_id: masking view name -- str
:returns: name of the storage group -- str
"""
return self.get_storage_group_from_masking_view(masking_view_id)
def get_storage_group_from_masking_view(self, masking_view_id):
"""Given a masking view, get the associated storage group.
:param masking_view_id: masking view name -- str
:returns: name of the storage group -- str
"""
mv_details = self.get_masking_view(masking_view_id)
return mv_details.get('storageGroupId') if mv_details else None
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_port_group_from_masking_view', 9.1, 10.0)
def get_portgroup_from_maskingview(self, masking_view_id):
"""Given a masking view, get the associated port group.
DEPRECATION NOTICE:
ProvisioningFunctions.get_portgroup_from_maskingview() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_port_group_from_masking_view(). For further
information please consult PyU4V 9.1 release notes.
:param masking_view_id: masking view name -- str
:returns: name of the port group -- str
"""
return self.get_port_group_from_masking_view(masking_view_id)
def get_port_group_from_masking_view(self, masking_view_id):
"""Given a masking view, get the associated port group.
:param masking_view_id: masking view name -- str
:returns: name of the port group -- str
"""
mv_details = self.get_masking_view(masking_view_id)
return mv_details.get('portGroupId', None) if mv_details else None
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_masking_view_connections', 9.1, 10.0)
def get_maskingview_connections(self, mv_name, filters=None):
"""Get all connection information for a given masking view.
DEPRECATION NOTICE: ProvisioningFunctions.get_maskingview_connections()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_masking_view_connections(). For further
information please consult PyU4V 9.1 release notes.
:param mv_name: name of the masking view -- str
:param filters: optional filter parameters -- dict
:returns: masking view connection dicts -- list
"""
return self.get_masking_view_connections(mv_name, filters)
def get_masking_view_connections(self, masking_view_id, filters=None):
"""Get all connection information for a given masking view.
:param masking_view_id: masking view id -- str
:param filters: optional filter parameters -- dict
:returns: masking view connection dicts -- list
"""
response = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=MASKINGVIEW, resource_type_id=masking_view_id,
resource=CONNECTIONS, params=filters)
return response.get(
'maskingViewConnection', list()) if response else list()
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.find_host_lun_id_for_volume', 9.1, 10.0)
def find_host_lun_id_for_vol(self, maskingview, device_id):
"""Find the host_lun_id for a volume in a masking view.
DEPRECATION NOTICE: ProvisioningFunctions.find_host_lun_id_for_vol()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.find_host_lun_id_for_volume(). For further
information please consult PyU4V 9.1 release notes.
:param maskingview: masking view name -- str
:param device_id: the device id -- str
:returns: host lun id -- str
"""
return self.find_host_lun_id_for_volume(maskingview, device_id)
def find_host_lun_id_for_volume(self, masking_view_id, device_id):
"""Find the host_lun_id for a volume in a masking view.
:param masking_view_id: masking view id -- str
:param device_id: the device id -- str
:returns: host lun id -- str
"""
host_lun_id = None
filters = {'volume_id': device_id}
connection_info = self.get_maskingview_connections(masking_view_id,
filters)
if len(connection_info) == 0:
LOG.error(
'Cannot retrieve masking view connection information for '
'{dev} in {mv}'.format(dev=device_id, mv=masking_view_id))
else:
try:
host_lun_id = (connection_info[0]['host_lun_address'])
host_lun_id = int(host_lun_id, 16)
except Exception as e:
LOG.error(
'Unable to retrieve connection information for volume '
'{vol} in masking view {mv}. Exception received: '
'{exc}'.format(vol=device_id, mv=masking_view_id, exc=e))
return host_lun_id
def get_port_list(self, filters=None):
"""Query for a list of Symmetrix port keys.
Note a mixture of Front end, back end and RDF port specific values
are not allowed. See UniSphere documentation for possible values.
:param filters: optional filters e.g. {'vnx_attached': 'true'} -- dict
:returns: port key dicts -- list
"""
response = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=PORT, params=filters)
return response.get('symmetrixPortKey', list()) if response else list()
@decorators.refactoring_notice(
'ProvisioningFunctions', 'ProvisioningFunctions.get_port_group',
9.1, 10.0)
def get_portgroup(self, portgroup_id):
"""Get port group details.
DEPRECATION NOTICE: ProvisioningFunctions.get_portgroup() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_port_group(). For further information please
consult PyU4V 9.1 release notes.
:param portgroup_id: name of the portgroup -- str
:returns: port group details -- dict
"""
return self.get_port_group(portgroup_id)
def get_port_group(self, port_group_id):
"""Get port group details.
:param port_group_id: name of the portgroup -- str
:returns: port group details -- dict
"""
return self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=PORTGROUP, resource_type_id=port_group_id)
@decorators.refactoring_notice(
'ProvisioningFunctions', 'ProvisioningFunctions.get_port_group_list',
9.1, 10.0)
def get_portgroup_list(self, filters=None):
"""Get port group details.
DEPRECATION NOTICE: ProvisioningFunctions.get_portgroup_list() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_port_group_list(). For further information
please consult PyU4V 9.1 release notes.
:param filters: optional filters -- dict
:returns: port groups -- list
"""
return self.get_port_group_list(filters)
def get_port_group_list(self, filters=None):
"""Get port group details.
:param filters: optional filters -- dict
:returns: port groups -- list
"""
response = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=PORTGROUP, params=filters)
return response.get('portGroupId', list()) if response else list()
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_ports_from_port_group', 9.1, 10.0)
def get_ports_from_pg(self, portgroup):
"""Get a list of port identifiers from a port group.
DEPRECATION NOTICE: ProvisioningFunctions.get_ports_from_pg() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_ports_from_port_group(). For further
information please consult PyU4V 9.1 release notes.
:param portgroup: name of the portgroup -- list
:returns: port ids -- list
"""
return self.get_ports_from_port_group(portgroup)
def get_ports_from_port_group(self, port_group):
"""Get a list of port identifiers from a port group.
:param port_group: name of the portgroup -- list
:returns: port ids -- list
"""
port_list = list()
port_group_info = self.get_port_group(port_group)
if port_group_info and port_group_info.get('symmetrixPortKey'):
port_key = port_group_info['symmetrixPortKey']
for key in port_key:
port = key['portId']
port_list.append(port)
return port_list
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_target_wwns_from_port_group', 9.1, 10.0)
def get_target_wwns_from_pg(self, portgroup_id):
"""Get the director ports' WWNs.
DEPRECATION NOTICE: ProvisioningFunctions.get_target_wwns_from_pg()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_target_wwns_from_port_group(). For further
information please consult PyU4V 9.1 release notes.
:param portgroup_id: the name of the port group -- str
:returns: target_wwns -- target wwns for the port group -- list
"""
return self.get_target_wwns_from_port_group(portgroup_id)
def get_target_wwns_from_port_group(self, port_group_id):
"""Get the director ports' WWNs.
:param port_group_id: the name of the port group -- str
:returns: target_wwns -- target wwns for the port group -- list
"""
target_wwns = list()
port_group_details = self.get_port_group(port_group_id)
dir_port_list = port_group_details['symmetrixPortKey']
for dir_port in dir_port_list:
dir_id = dir_port['directorId']
port_no = dir_port['portId']
wwn = self.get_port_identifier(dir_id, port_no)
target_wwns.append(wwn)
return target_wwns
def get_iscsi_ip_address_and_iqn(self, port_id):
"""Get the ip addresses from the director port.
:param port_id: director port identifier -- str
:returns: ip addresses, iqn -- list, str
"""
ip_addresses, iqn = list(), None
dir_id = port_id.split(':')[0]
port_no = port_id.split(':')[1]
port_details = self.get_director_port(dir_id, port_no)
if port_details:
try:
ip_addresses = port_details['symmetrixPort']['ip_addresses']
iqn = port_details['symmetrixPort']['identifier']
except (KeyError, TypeError):
LOG.info('Could not get IP address from director port')
return ip_addresses, iqn
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.create_port_group', 9.1, 10.0)
def create_portgroup(self, portgroup_id, director_id, port_id):
"""Create a new port group.
DEPRECATION NOTICE: ProvisioningFunctions.create_portgroup() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.create_port_group(). For further information
please consult PyU4V 9.1 release notes.
:param portgroup_id: name of the new port group - str
:param director_id: director id -- str
:param port_id: port id -- str
:returns: new port group details -- dict
"""
return self.create_port_group(portgroup_id, director_id, port_id)
def create_port_group(self, port_group_id, director_id, port_id):
"""Create a new port group.
:param port_group_id: name of the new port group - str
:param director_id: director id -- str
:param port_id: port id -- str
:returns: new port group details -- dict
"""
payload = ({'portGroupId': port_group_id,
'symmetrixPortKey': [{'directorId': director_id,
'portId': port_id}]})
result = self.create_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=PORTGROUP, payload=payload)
result = self._update_port_group_port_ids(result)
return result
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.create_multiport_port_group', 9.1, 10.0)
def create_multiport_portgroup(self, portgroup_id, ports):
"""Create a new port group.
DEPRECATION NOTICE: ProvisioningFunctions.create_multiport_portgroup()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.create_multiport_port_group(). For further
information please consult PyU4V 9.1 release notes.
:param portgroup_id: name of the new port group -- str
:param ports: port dicts Example:
[{'directorId': director_id, 'portId': port_id}] -- list
:returns: new port group details -- dict
"""
return self.create_multiport_port_group(portgroup_id, ports)
def create_multiport_port_group(self, port_group_id, ports):
"""Create a new port group.
:param port_group_id: name of the new port group -- str
:param ports: port dicts Example:
[{'directorId': director_id, 'portId': port_id}] -- list
:returns: new port group details -- dict
"""
payload = ({'portGroupId': port_group_id,
'symmetrixPortKey': ports})
result = self.create_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=PORTGROUP, payload=payload)
result = self._update_port_group_port_ids(result)
return result
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.create_port_group_from_file', 9.1, 10.0)
def create_portgroup_from_file(self, file_name, portgroup_id):
"""Given a file with director:port pairs, create a port group.
DEPRECATION NOTICE: ProvisioningFunctions.create_portgroup_from_file()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.create_port_group_from_file(). For further
information please consult PyU4V 9.1 release notes.
Each director:port pair must be on a new line.
Example director:port - FA-1D:4.
:param file_name: path to the file -- str
:param portgroup_id: name for the port group -- str
:returns: new port group details -- dict
"""
return self.create_port_group_from_file(file_name, portgroup_id)
def create_port_group_from_file(self, file_name, port_group_id):
"""Given a file with director:port pairs, create a portgroup.
Each director:port pair must be on a new line.
Example director:port - FA-1D:4.
:param file_name: path to the file -- str
:param port_group_id: name for the port group -- str
:returns: new port group details -- dict
"""
port_list = file_handler.create_list_from_file(file_name)
combined_payload = list()
for i in port_list:
current_director_id, current_port_id = i.split(':')
temp_list = {'directorId': current_director_id,
'portId': current_port_id}
combined_payload.append(temp_list)
return self.create_multiport_portgroup(port_group_id, combined_payload)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.modify_port_group', 9.1, 10.0)
def modify_portgroup(self, portgroup_id, remove_port=None, add_port=None,
rename_portgroup=None):
"""Modify an existing port group.
DEPRECATION NOTICE: ProvisioningFunctions.modify_portgroup() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.modify_port_group(). For further information
please consult PyU4V 9.1 release notes.
Only one parameter can be modified at a time.
:param portgroup_id: name of the port group -- str
:param remove_port: port details (director_id, port_id) -- tuple
:param add_port: port details (director_id, port_id) -- tuple
:param rename_portgroup: new port group name -- str
:returns: modified port group details -- dict
"""
return self.modify_port_group(portgroup_id, remove_port, add_port,
rename_portgroup)
def modify_port_group(self, port_group_id, remove_port=None, add_port=None,
rename_port_group=None):
"""Modify an existing port group.
Only one parameter can be modified at a time.
:param port_group_id: name of the port group -- str
:param remove_port: port details (director_id, port_id) -- tuple
:param add_port: port details (director_id, port_id) -- tuple
:param rename_port_group: new port group name -- str
:returns: modified port group details -- dict
"""
if remove_port:
edit_pg_data = ({'editPortGroupActionParam': {'removePortParam': {
'port': [{'directorId': remove_port[0],
'portId': remove_port[1]}]}}})
elif add_port:
edit_pg_data = ({'editPortGroupActionParam': {'addPortParam': {
'port': [{'directorId': add_port[0],
'portId': add_port[1]}]}}})
elif rename_port_group:
edit_pg_data = ({'editPortGroupActionParam': {
'renamePortGroupParam': {
'new_port_group_name': rename_port_group}}})
else:
message = ('No modify portgroup parameters set - please set one '
'of the following: remove_port, add_port, or '
'rename_portgroup.')
raise exception.InvalidInputException(data=message)
result = self.modify_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=PORTGROUP, resource_type_id=port_group_id,
payload=edit_pg_data)
if add_port or remove_port:
result = self._update_port_group_port_ids(result)
return result
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.delete_port_group', 9.1, 10.0)
def delete_portgroup(self, portgroup_id):
"""Delete a port group.
DEPRECATION NOTICE: ProvisioningFunctions.delete_portgroup() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.delete_port_group(). For further information
please consult PyU4V 9.1 release notes.
:param portgroup_id: name of the port group -- str
"""
self.delete_port_group(portgroup_id)
def delete_port_group(self, port_group_id):
"""Delete a port group.
:param port_group_id: name of the port group -- str
"""
self.delete_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=PORTGROUP, resource_type_id=port_group_id)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_service_level_list', 9.1, 10.0)
def get_slo_list(self, filters=None):
"""Retrieve the list of service levels from the array.
DEPRECATION NOTICE: ProvisioningFunctions.get_slo_list() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_service_level_list(). For further information
please consult PyU4V 9.1 release notes.
:param filters: optional filters -- dict
:returns: service level names -- list
"""
return self.get_service_level_list(filters)
def get_service_level_list(self, filters=None):
"""Retrieve the list of service levels from the array.
:param filters: optional filters -- dict
:returns: service level names -- list
"""
slo_dict = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=SLO, params=filters)
return slo_dict.get('sloId', list()) if slo_dict else list()
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_service_level', 9.1, 10.0)
def get_slo(self, slo_id):
"""Get details on a specific service level.
DEPRECATION NOTICE: ProvisioningFunctions.get_slo() will be refactored
in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_service_level(). For further information
please consult PyU4V 9.1 release notes.
:param slo_id: service level agreement -- str
:returns: service level details -- dict
"""
return self.get_service_level(slo_id)
def get_service_level(self, service_level_id):
"""Get details on a specific service level.
:param service_level_id: service level agreement -- str
:returns: service level details -- dict
"""
return self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=SLO, resource_type_id=service_level_id)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.modify_service_level', 9.1, 10.0)
def modify_slo(self, slo_id, new_name):
"""Modify an SLO.
DEPRECATION NOTICE: ProvisioningFunctions.modify_slo() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.modify_service_level(). For further information
please consult PyU4V 9.1 release notes.
Currently, the only modification permitted is renaming.
:param slo_id: current name of the service level -- str
:param new_name: new name for the -- str
:returns: modified service level details -- dict
"""
return self.modify_service_level(slo_id, new_name)
def modify_service_level(self, service_level_id, new_name):
"""Modify an SLO.
Currently, the only modification permitted is renaming.
:param service_level_id: current name of the service level -- str
:param new_name: new name for the -- str
:returns: modified service level details -- dict
"""
edit_slo_data = ({'editSloActionParam': {
'renameSloParam': {'sloId': new_name}}})
return self.modify_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=SLO, resource_type_id=service_level_id,
payload=edit_slo_data)
def get_srp(self, srp):
"""Get details on a specific SRP.
:param srp: storage resource pool id -- str
:returns: srp details -- dict
"""
return self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=SRP, resource_type_id=srp)
def get_srp_list(self, filters=None):
"""Get a list of available SRPs on a given array.
:param filters: filter parameters -- dict
:returns: SRPs -- list
"""
response = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=SRP, params=filters)
return response.get('srpId', list()) if response else list()
def get_compressibility_report(self, srp_id):
"""Get a specified SRP Compressibility Report.
:param srp_id: srp id -- str
:returns: compressibility reports -- list
"""
response = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=SRP, resource_type_id=srp_id,
resource=COMPRESSIBILITY_REPORT)
return response.get(
'storageGroupCompressibility', list()) if response else list()
def is_compression_capable(self):
"""Check if array is compression capable.
:returns: bool
"""
array_list = self.common.get_v3_or_newer_array_list(
filters={'compressionCapable': 'true'})
return self.array_id in array_list
def get_storage_group(self, storage_group_name):
"""Given a name, return storage group details.
:param storage_group_name: name of the storage group -- str
:returns: storage group details -- dict
"""
return self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=STORAGEGROUP, resource_type_id=storage_group_name)
def get_storage_group_demand_report(self, srp_id=None):
"""Get the storage group demand report.
Get the storage group demand report from Unisphere.
:param srp_id: id of the Storage Resource Pool -- str
:returns: demand report -- dict
"""
if not srp_id:
srp_id = 'SRP_1'
return self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=SRP, resource_type_id=srp_id,
resource=SG_DEMAND_REPORT)
def get_storage_group_list(self, filters=None):
"""Return a list of storage groups.
:param filters: filter parameters -- dict
:returns: storage groups -- list
"""
sg = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=STORAGEGROUP, params=filters)
return sg.get('storageGroupId', list()) if sg else list()
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_masking_view_from_storage_group', 9.1, 10.0)
def get_mv_from_sg(self, storage_group):
"""Get the associated masking views from a given storage group.
DEPRECATION NOTICE: ProvisioningFunctions.get_mv_from_sg() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_masking_view_from_storage_group(). For
further information please consult PyU4V 9.1 release notes.
:param storage_group: name of the storage group -- str
:returns: Masking views -- list
"""
return self.get_masking_view_from_storage_group(storage_group)
def get_masking_view_from_storage_group(self, storage_group):
"""Get the associated masking views from a given storage group.
:param storage_group: name of the storage group -- str
:returns: Masking views -- list
"""
response = self.get_storage_group(storage_group)
return response.get('maskingview', list()) if response else list()
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_num_vols_in_storage_group', 9.1, 10.0)
def get_num_vols_in_sg(self, storage_group_name):
"""Get the number of volumes in a storage group.
DEPRECATION NOTICE: ProvisioningFunctions.get_num_vols_in_sg() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_num_vols_in_storage_group(). For further
information please consult PyU4V 9.1 release notes.
:param storage_group_name: storage group name -- str
:returns: number of volumes -- int
"""
return self.get_num_vols_in_storage_group(storage_group_name)
def get_num_vols_in_storage_group(self, storage_group_name):
"""Get the number of volumes in a storage group.
:param storage_group_name: storage group name -- str
:returns: number of volumes -- int
"""
sg = self.get_storage_group(storage_group_name)
return int(sg.get('num_of_vols', 0)) if sg else 0
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.is_child_storage_group_in_parent_storage_group',
9.1, 10.0)
def is_child_sg_in_parent_sg(self, child_name, parent_name):
"""Check if a child storage group is a member of a parent group.
DEPRECATION NOTICE: ProvisioningFunctions.is_child_sg_in_parent_sg()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.is_child_storage_group_in_parent_storage_group().
For further information please consult PyU4V 9.1 release notes.
:param child_name: child sg name -- str
:param parent_name: parent sg name -- str
:returns: bool
"""
return self.is_child_storage_group_in_parent_storage_group(
child_name, parent_name)
def is_child_storage_group_in_parent_storage_group(self, child_name,
parent_name):
"""Check if a child storage group is a member of a parent group.
:param child_name: child sg name -- str
:param parent_name: parent sg name -- str
:returns: bool
"""
parent_sg = self.get_storage_group(parent_name)
if parent_sg and parent_sg.get('child_storage_group'):
child_sg_list = parent_sg['child_storage_group']
if child_name in child_sg_list:
return True
return False
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_child_storage_groups_from_parent',
9.1, 10.0)
def get_child_sg_from_parent(self, parent_name):
"""Get child storage group list from parent storage group.
DEPRECATION NOTICE: ProvisioningFunctions.get_child_sg_from_parent()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_child_storage_groups_from_parent(). For
further information please consult PyU4V 9.1 release notes.
:param parent_name: parent sg name -- str
:returns: child sg details -- list
"""
return self.get_child_storage_groups_from_parent(parent_name)
def get_child_storage_groups_from_parent(self, parent_name):
"""Get child storage group list from parent storage group.
:param parent_name: parent sg name -- str
:returns: child sg details -- list
"""
sg = self.get_storage_group(parent_name)
return sg.get('child_storage_group', list()) if sg else list()
def create_storage_group(
self, srp_id, sg_id, slo=None, workload=None,
do_disable_compression=False, num_vols=0, vol_size=0,
cap_unit='GB', allocate_full=False, _async=False,
vol_name=None, snapshot_policy_ids=None, enable_mobility_id=False, emulation_type='FBA'):
"""Create a storage group with optional volumes on create operation.
:param srp_id: SRP id -- str
:param sg_id: storage group id -- str
:param slo: service level id -- str
:param workload: workload id -- str
:param do_disable_compression: disable compression -- bool
:param num_vols: number of volumes to be created -- int
:param vol_size: the volume size -- int
:param cap_unit: capacity unit (MB, GB, TB, CYL) -- str
:param allocate_full: allocate full capacity -- bool
:param _async: if call should be async -- bool
:param vol_name: name to give to the volume, optional -- str
:param snapshot_policy_ids: list of one or more snapshot policies
to associate with storage group -- list
:param enable_mobility_id: enables unique volume WWN not tied to array
serial number -- bool
:param emulation_type: device emulation type (CKD, FBA) -- str
:returns: storage group details -- dict
"""
srp_id = srp_id if srp_id else 'None'
slo = slo if slo else 'None'
workload = workload if workload else 'None'
payload = ({'srpId': srp_id,
'storageGroupId': sg_id,
'emulation': emulation_type})
volume_attributes = {'volume_size': str(vol_size),
'capacityUnit': cap_unit,
'num_of_vols': num_vols}
if vol_name:
volume_identifier = {'identifier_name': vol_name,
'volumeIdentifierChoice': 'identifier_name'}
volume_attributes.update({'volumeIdentifier': volume_identifier})
slo_param = {'sloId': slo,
'workloadSelection': workload,
'volumeAttributes': [volume_attributes]}
if do_disable_compression:
slo_param.update({'noCompression': 'true'})
if allocate_full:
# If case of full volume allocation, we must set the
# noCompression parameter at true because fully
# allocations and compression are exclusive parameters
slo_param.update({'noCompression': 'true'})
slo_param.update({'allocate_capacity_for_each_vol': 'true'})
slo_param.update({'persist_preallocated_capacity_through_'
'reclaim_or_copy': 'true'})
if snapshot_policy_ids:
payload.update({'snapshot_policies': snapshot_policy_ids})
payload.update({'sloBasedStorageGroupParam': [slo_param]})
if _async:
payload.update(ASYNC_UPDATE)
if enable_mobility_id:
slo_param.update({'enable_mobility_id': enable_mobility_id})
return self.create_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=STORAGEGROUP, payload=payload)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.create_non_empty_storage_group', 9.1, 10.0)
def create_non_empty_storagegroup(
self, srp_id, sg_id, slo, workload, num_vols, vol_size,
cap_unit, disable_compression=False, _async=False):
"""Create a new storage group with the specified volumes.
DEPRECATION NOTICE:
ProvisioningFunctions.create_non_empty_storagegroup() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.create_non_empty_storage_group(). For further
information please consult PyU4V 9.1 release notes.
Generates a dictionary for json formatting and calls the create_sg
function to create a new storage group with the specified volumes. Set
the disable_compression flag for disabling compression on an All Flash
array (where compression is on by default).
:param srp_id: SRP id -- str
:param sg_id: storage group id -- str
:param slo: service level id -- str
:param workload: workload id -- str
:param num_vols: number of volumes to be created -- int
:param vol_size: the volume size -- str
:param cap_unit: capacity unit (MB, GB, TB, CYL) -- str
:param disable_compression: disable compression -- bool
:param _async: if call should be async -- bool
:returns: storage group details -- dict
"""
return self.create_non_empty_storage_group(
srp_id, sg_id, slo, workload, num_vols, vol_size, cap_unit,
disable_compression, _async)
def create_non_empty_storage_group(
self, srp_id, storage_group_id, service_level, workload, num_vols,
vol_size, cap_unit, disable_compression=False, _async=False,
vol_name=None, snapshot_policy_ids=None, enable_mobility_id=False, emulation_type='FBA'):
"""Create a new storage group with the specified volumes.
Generates a dictionary for json formatting and calls the create_sg
function to create a new storage group with the specified volumes. Set
the disable_compression flag for disabling compression on an All Flash
array (where compression is on by default).
:param srp_id: SRP id -- str
:param storage_group_id: storage group id -- str
:param service_level: service level id -- str
:param workload: workload id -- str
:param num_vols: number of volumes to be created -- int
:param vol_size: the volume size -- str
:param cap_unit: capacity unit (MB, GB, TB, CYL) -- str
:param disable_compression: disable compression -- bool
:param _async: if call should be async -- bool
:param vol_name: name to give to the volume, optional -- str
:param snapshot_policy_ids: list of one or more snapshot policies
to associate with storage group -- list
:param enable_mobility_id: enables unique volume WWN not tied to array
serial number -- bool
:param emulation_type: device emulation type (CKD, FBA) -- str
:returns: storage group details -- dict
"""
return self.create_storage_group(
srp_id, storage_group_id, service_level, workload,
do_disable_compression=disable_compression,
num_vols=num_vols, vol_size=vol_size, cap_unit=cap_unit,
_async=_async, vol_name=vol_name,
snapshot_policy_ids=snapshot_policy_ids,
enable_mobility_id=enable_mobility_id,
emulation_type=emulation_type)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.create_empty_storage_group', 9.1, 10.0)
def create_empty_sg(self, srp_id, sg_id, slo, workload,
disable_compression=False, _async=False):
"""Create an empty storage group.
DEPRECATION NOTICE: ProvisioningFunctions.create_empty_sg() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.create_empty_storage_group(). For further
information please consult PyU4V 9.1 release notes.
Set the disable_compression flag for disabling compression on an All
Flash array (where compression is on by default).
:param srp_id: SRP id -- str
:param sg_id: storage group id -- str
:param slo: service level id -- str
:param workload: workload id -- str
:param disable_compression: disable compression -- bool
:param _async: if call should be async -- bool
:returns: storage group details -- dict
"""
return self.create_empty_storage_group(
srp_id, sg_id, slo, workload, disable_compression, _async)
def create_empty_storage_group(
self, srp_id, storage_group_id, service_level, workload,
disable_compression=False, _async=False,
snapshot_policy_ids=None, emulation_type='FBA'):
"""Create an empty storage group.
Set the disable_compression flag for disabling compression on an All
Flash array (where compression is on by default).
:param srp_id: SRP id -- str
:param storage_group_id: storage group id -- str
:param service_level: service level id -- str
:param workload: workload id -- str
:param disable_compression: disable compression -- bool
:param _async: if call should be async -- bool
:param snapshot_policy_ids: list of one or more snapshot policies
to associate with storage group -- list
:param emulation_type: device emulation type (CKD, FBA) -- str
:returns: storage group details -- dict
"""
return self.create_storage_group(
srp_id, storage_group_id, service_level, workload,
do_disable_compression=disable_compression, _async=_async,
snapshot_policy_ids=snapshot_policy_ids, emulation_type=emulation_type)
def modify_storage_group(self, storage_group_id, payload):
"""Modify a storage group.
:param storage_group_id: storage group id -- str
:param payload: request payload -- dict
:returns: modified storage group details -- dict
"""
return self.modify_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=STORAGEGROUP, resource_type_id=storage_group_id,
payload=payload)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.add_existing_volume_to_storage_group',
9.1, 10.0)
def add_existing_vol_to_sg(self, sg_id, vol_ids, _async=False):
"""Expand an existing storage group by adding existing volumes.
DEPRECATION NOTICE: ProvisioningFunctions.add_existing_vol_to_sg() will
be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.add_existing_volume_to_storage_group(). For
further information please consult PyU4V 9.1 release notes.
:param sg_id: storage group id -- str
:param vol_ids: volume device id(s) -- str or list
:param _async: if call should be async -- bool
:returns: storage group details -- dict
"""
return self.add_existing_volume_to_storage_group(
sg_id, vol_ids, _async)
def add_existing_volume_to_storage_group(
self, storage_group_id, vol_ids, _async=False,
remote_array_1_id=None, remote_array_1_sgs=None,
remote_array_2_id=None, remote_array_2_sgs=None):
"""Expand an existing storage group by adding existing volumes.
:param storage_group_id: storage group id -- str
:param vol_ids: volume device id(s) -- str or list
:param _async: if call should be async -- bool
:param remote_array_1_id: 12 digit serial number of remote array,
optional -- str
:param remote_array_1_sgs: list of storage groups on remote array to
add Remote device, Unisphere instance must
be local to R1 storage group otherwise
volumes will only be added to the local
group -- str or list
:param remote_array_2_id: optional digit serial number of remote array,
only used in multihop SRDF, e.g. R11, or
R1 - R21 - R2 optional -- str
:param remote_array_2_sgs: storage groups on remote array, optional
-- str or list
:returns: storage group details -- dict
"""
if not isinstance(vol_ids, list):
vol_ids = [vol_ids]
add_vol_data = {'editStorageGroupActionParam': {
'expandStorageGroupParam': {
'addSpecificVolumeParam': {
'volumeId': vol_ids}}}}
if _async:
add_vol_data.update(ASYNC_UPDATE)
if remote_array_1_id and remote_array_1_sgs:
if not isinstance(remote_array_1_sgs, list):
remote_array_1_sgs = [remote_array_1_sgs]
add_vol_data['editStorageGroupActionParam'][
'expandStorageGroupParam']['addSpecificVolumeParam'].update(
{'remoteSymmSGInfoParam': {
'remote_symmetrix_1_id': remote_array_1_id,
'remote_symmetrix_1_sgs': remote_array_1_sgs}})
if remote_array_2_id and remote_array_2_sgs:
if not isinstance(remote_array_2_sgs, list):
remote_array_2_sgs = [remote_array_2_sgs]
add_vol_data['editStorageGroupActionParam'][
'expandStorageGroupParam'][
'addSpecificVolumeParam'].update(
{'remoteSymmSGInfoParam': {
'remote_symmetrix_1_id': remote_array_1_id,
'remote_symmetrix_1_sgs': remote_array_1_sgs,
'remote_symmetrix_2_id': remote_array_2_id,
'remote_symmetrix_2_sgs': remote_array_2_sgs}})
return self.modify_storage_group(storage_group_id, add_vol_data)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.add_new_volume_to_storage_group', 9.1, 10.0)
def add_new_vol_to_storagegroup(self, sg_id, num_vols, vol_size,
cap_unit, _async=False, vol_name=None,
create_new_volumes=None):
"""Expand an existing storage group by adding new volumes.
DEPRECATION NOTICE: ProvisioningFunctions.add_new_vol_to_storagegroup()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.add_new_volume_to_storage_group(). For further
information please consult PyU4V 9.1 release notes.
:param sg_id: storage group id -- str
:param num_vols: number of volumes to be created -- int
:param vol_size: the volume size -- str
:param cap_unit: capacity unit (MB, GB, TB, CYL) -- str
:param _async: if call should be async -- bool
:param vol_name: name to give to the volume, optional -- str
:param create_new_volumes: new volumes only, no re-use -- bool
:returns: storage group details -- dict
"""
return self.add_new_volume_to_storage_group(
sg_id, num_vols, vol_size, cap_unit, _async, vol_name,
create_new_volumes)
def add_new_volume_to_storage_group(
self, storage_group_id, num_vols, vol_size, cap_unit, _async=False,
vol_name=None, create_new_volumes=None,
remote_array_1_id=None, remote_array_1_sgs=None,
remote_array_2_id=None, remote_array_2_sgs=None,
enable_mobility_id=False, emulation_type='FBA'):
"""Expand an existing storage group by adding new volumes.
:param storage_group_id: storage group id -- str
:param num_vols: number of volumes to be created -- int
:param vol_size: the volume size -- str
:param cap_unit: capacity unit (MB, GB, TB, CYL) -- str
:param _async: if call should be async -- bool
:param vol_name: name to give to the volume, optional -- str
:param create_new_volumes: new volumes only, no ro-use -- bool
:param remote_array_1_id: 12 digit serial number of remote array,
optional -- str
:param remote_array_1_sgs: list of storage groups on remote array to
add Remote device, Unisphere instance must
be local to R1 storage group otherwise
volumes will only be added to the local
group -- str or list
:param remote_array_2_id: optional digit serial number of remote array,
only used in multihop SRDF, e.g. R11, or
R1 - R21 - R2 optional -- str
:param remote_array_2_sgs: storage groups on remote array, optional
-- str or list
:param enable_mobility_id: enables unique volume WWN not tied to array
serial number -- bool
:param emulation_type: device emulation type (CKD, FBA) -- str
:returns: storage group details -- dict
"""
add_volume_param = {'emulation': emulation_type }
if not create_new_volumes:
add_volume_param.update({'create_new_volumes': False})
volume_attributes = ({
'num_of_vols': num_vols,
'volume_size': vol_size,
'capacityUnit': cap_unit})
if vol_name:
volume_identifier = ({
'identifier_name': vol_name,
'volumeIdentifierChoice': 'identifier_name'
})
volume_attributes.update({
'volumeIdentifier': volume_identifier})
add_volume_param.update({'volumeAttributes': [volume_attributes]})
if enable_mobility_id:
add_volume_param.update({'enable_mobility_id': enable_mobility_id})
expand_sg_data = ({'editStorageGroupActionParam': {
'expandStorageGroupParam': {
'addVolumeParam': add_volume_param
}}})
if remote_array_1_id and remote_array_1_sgs:
if not isinstance(remote_array_1_sgs, list):
remote_array_1_sgs = [remote_array_1_sgs]
add_volume_param.update({'remoteSymmSGInfoParam': {
'remote_symmetrix_1_id': remote_array_1_id,
'remote_symmetrix_1_sgs': remote_array_1_sgs}})
if remote_array_2_id and remote_array_2_sgs:
if not isinstance(remote_array_2_sgs, list):
remote_array_2_sgs = [remote_array_2_sgs]
add_volume_param['remoteSymmSGInfoParam'].update({
'remote_symmetrix_2_id': remote_array_2_id,
'remote_symmetrix_2_sgs': remote_array_2_sgs})
if _async:
expand_sg_data.update(ASYNC_UPDATE)
return self.modify_storage_group(storage_group_id, expand_sg_data)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.remove_volume_from_storage_group', 9.1, 10.0)
def remove_vol_from_storagegroup(self, sg_id, vol_id, _async=False):
"""Remove a volume from a given storage group.
DEPRECATION NOTICE:
ProvisioningFunctions.remove_vol_from_storagegroup() will be refactored
in PyU4V version 10.0 in favour of
ProvisioningFunctions.remove_volume_from_storage_group(). For further
information please consult PyU4V 9.1 release notes.
:param sg_id: storage group id -- str
:param vol_id: device id -- str
:param _async: if call should be async -- bool
:returns: storage group details -- dict
"""
return self.remove_volume_from_storage_group(sg_id, vol_id, _async)
def remove_volume_from_storage_group(
self, storage_group_id, vol_id, _async=False,
remote_array_1_id=None, remote_array_1_sgs=None,
remote_array_2_id=None, remote_array_2_sgs=None):
"""Remove a volume from a given storage group.
:param storage_group_id: storage group id -- str
:param vol_id: device id -- str
:param _async: if call should be async -- bool
:param remote_array_1_id: 12 digit serial number of remote array,
optional -- str
:param remote_array_1_sgs: list of storage groups on remote array to
add Remote device, Unisphere instance must be local to R1
storage group otherwise volumes will only be added to the
local group -- str or list
:param remote_array_2_id: optional digit serial number of remote array,
only used in multihop SRDF, e.g. R11, or R1 - R21 - R2 optional
-- str
:param remote_array_2_sgs: storage groups on remote array, optional
-- str or list
:returns: storage group details -- dict
"""
if not isinstance(vol_id, list):
vol_id = [vol_id]
payload = ({'editStorageGroupActionParam': {
'removeVolumeParam': {'volumeId': vol_id}}})
if remote_array_1_id and remote_array_1_sgs:
if not isinstance(remote_array_1_sgs, list):
remote_array_1_sgs = [remote_array_1_sgs]
payload.update(
{'editStorageGroupActionParam': {
'removeVolumeParam': {
'volumeId': vol_id,
'remoteSymmSGInfoParam': {
'remote_symmetrix_1_id': remote_array_1_id,
'remote_symmetrix_1_sgs': remote_array_1_sgs}}}})
if remote_array_2_id and remote_array_2_sgs:
if not isinstance(remote_array_2_sgs, list):
remote_array_2_sgs = [remote_array_2_sgs]
payload.update({'editStorageGroupActionParam': {
'removeVolumeParam': {
'volumeId': vol_id,
'remoteSymmSGInfoParam': {
'remote_symmetrix_1_id': remote_array_1_id,
'remote_symmetrix_1_sgs': remote_array_1_sgs,
'remote_symmetrix_2_id': remote_array_2_id,
'remote_symmetrix_2_sgs': remote_array_2_sgs}}}})
if _async:
payload.update(ASYNC_UPDATE)
return self.modify_storage_group(storage_group_id, payload)
def move_volumes_between_storage_groups(
self, device_ids, source_storagegroup_name,
target_storagegroup_name, force=False, _async=False):
"""Move volumes to a different storage group.
Requires force set to True if volume is in a masking view.
:param device_ids: volume device id(s) -- str or list
:param source_storagegroup_name: originating storage group name -- str
:param target_storagegroup_name: destination storage group name -- str
:param force: force flag -- bool
:param _async: if call should be async -- bool
:returns: storage group details -- dict
"""
force_flag = 'true' if force else 'false'
if not isinstance(device_ids, list):
device_ids = [device_ids]
payload = ({
'editStorageGroupActionParam': {
'moveVolumeToStorageGroupParam': {
'volumeId': device_ids,
'storageGroupId': target_storagegroup_name,
'force': force_flag}}})
if _async:
payload.update(ASYNC_UPDATE)
return self.modify_storage_group(source_storagegroup_name, payload)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.create_volume_from_storage_group_return_id',
9.1, 10.0)
def create_volume_from_sg_return_dev_id(
self, volume_name, storagegroup_name, vol_size, cap_unit='GB'):
"""Create a new volume in the given storage group.
DEPRECATION NOTICE:
ProvisioningFunctions.create_volume_from_sg_return_dev_id() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.create_volume_from_storage_group_return_id(). For
further information please consult PyU4V 9.1 release notes.
:param volume_name: volume name -- str
:param storagegroup_name: storage group id -- str
:param vol_size: volume size -- str
:param cap_unit: capacity unit (MB, GB, TB, CYL) -- str
:returns: device id -- str
"""
return self.create_volume_from_storage_group_return_id(
volume_name, storagegroup_name, vol_size, cap_unit)
def create_volume_from_storage_group_return_id(
self, volume_name, storage_group_id, vol_size, cap_unit='GB',
enable_mobility_id=False, emulation_type='FBA'):
"""Create a new volume in the given storage group.
:param volume_name: volume name -- str
:param storage_group_id: storage group id -- str
:param vol_size: volume size -- str
:param cap_unit: capacity unit (MB, GB, TB, CYL) -- str
:param enable_mobility_id: enables unique volume WWN not tied to array
serial number -- bool
:param emulation_type: device emulation type (CKD, FBA) -- str
:returns: device id -- str
"""
job = self.add_new_volume_to_storage_group(
storage_group_id, 1, vol_size, cap_unit,
_async=True, vol_name=volume_name,
enable_mobility_id=enable_mobility_id,
emulation_type=emulation_type)
task = self.common.wait_for_job('Create volume from storage group',
202, job)
# Find the newly created volume.
device_id = None
if task:
for t in task:
try:
desc = t['description']
if CREATE_VOL_STRING in desc:
t_list = desc.split()
device_id = t_list[(len(t_list) - 1)]
device_id = device_id[1:-1]
break
except Exception as e:
LOG.info(
'Could not retrieve device id from job. Exception '
'received was {exc}. Attempting retrieval by '
'volume_identifier.'.format(exc=e))
if not device_id:
device_id = self.find_volume_device_id(volume_name)
return device_id
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.add_child_storage_group_to_parent_group',
9.1, 10.0)
def add_child_sg_to_parent_sg(self, child_sg, parent_sg):
"""Add a storage group to a parent storage group.
DEPRECATION NOTICE: ProvisioningFunctions.add_child_sg_to_parent_sg()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.add_child_storage_group_to_parent_group(). For
further information please consult PyU4V 9.1 release notes.
This method adds an existing storage group to another storage
group, i.e. cascaded storage groups.
:param child_sg: child storage group id -- str
:param parent_sg: parent storage group id -- str
:returns: storage group details -- dict
"""
return self.add_child_storage_group_to_parent_group(child_sg,
parent_sg)
def add_child_storage_group_to_parent_group(self, child_storage_group,
parent_storage_group):
"""Add a storage group to a parent storage group.
This method adds an existing storage group to another storage
group, i.e. cascaded storage groups.
:param child_storage_group: child storage group id -- str
:param parent_storage_group: parent storage group id -- str
:returns: storage group details -- dict
"""
payload = ({'editStorageGroupActionParam': {
'expandStorageGroupParam': {
'addExistingStorageGroupParam': {
'storageGroupId': [child_storage_group]}}}})
return self.modify_storage_group(parent_storage_group, payload)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.remove_child_storage_group_from_parent_group',
9.1, 10.0)
def remove_child_sg_from_parent_sg(self, child_sg, parent_sg):
"""Remove a storage group from its parent storage group.
DEPRECATION NOTICE:
ProvisioningFunctions.remove_child_sg_from_parent_sg()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.remove_child_storage_group_from_parent_group().
For further information please consult PyU4V 9.1 release notes.
This method removes a child storage group from its parent group.
:param child_sg: child storage group id -- str
:param parent_sg: parent storage group id -- str
:returns: storage group details -- dict
"""
return self.remove_child_storage_group_from_parent_group(child_sg,
parent_sg)
def remove_child_storage_group_from_parent_group(self, child_storage_group,
parent_storage_group):
"""Remove a storage group from its parent storage group.
This method removes a child storage group from its parent group.
:param child_storage_group: child storage group id -- str
:param parent_storage_group: parent storage group id -- str
:returns: storage group details -- dict
"""
payload = ({'editStorageGroupActionParam': {
'removeStorageGroupParam': {
'storageGroupId': [child_storage_group], 'force': 'true'}}})
return self.modify_storage_group(parent_storage_group, payload)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.update_storage_group_qos', 9.1, 10.0)
def update_storagegroup_qos(self, storage_group_name, qos_specs):
"""Update the storage group instance with QoS details.
DEPRECATION NOTICE: ProvisioningFunctions.update_storagegroup_qos()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.update_storage_group_qos(). For further
information please consult PyU4V 9.1 release notes.
If maxIOPS or maxMBPS is in qos_specs, then DistributionType can be
modified in addition to maxIOPs or/and maxMBPS.
If maxIOPS or maxMBPS is NOT in qos_specs, we check to see if either
is set in Storage Group. If so, then DistributionType can be modified.
Example qos specs:
{'maxIOPS': '4000', 'maxMBPS': '4000', 'DistributionType': 'Dynamic'}
:param storage_group_name: storage group id -- str
:param qos_specs: qos specifications -- dict
:returns: storage group details -- dict
"""
return self.update_storage_group_qos(storage_group_name, qos_specs)
def update_storage_group_qos(self, storage_group_id, qos_specs):
"""Update the storage group instance with QoS details.
If maxIOPS or maxMBPS is in qos_specs, then DistributionType can be
modified in addition to maxIOPs or/and maxMBPS.
If maxIOPS or maxMBPS is NOT in qos_specs, we check to see if either
is set in Storage Group. If so, then DistributionType can be modified.
Example qos specs:
{'maxIOPS': '4000', 'maxMBPS': '4000', 'DistributionType': 'Dynamic'}
:param storage_group_id: storage group id -- str
:param qos_specs: qos specifications -- dict
:returns: storage group details -- dict
"""
message = None
sg_details = self.get_storage_group(storage_group_id)
sg_qos_details = None
sg_max_iops = None
sg_max_mbps = None
sg_distribution_type = None
max_iops = 'nolimit'
max_mbps = 'nolimit'
distribution_type = 'Never'
property_list = list()
try:
sg_qos_details = sg_details['hostIOLimit']
sg_max_iops = sg_qos_details['host_io_limit_io_sec']
sg_max_mbps = sg_qos_details['host_io_limit_mb_sec']
sg_distribution_type = sg_qos_details['dynamicDistribution']
except KeyError:
LOG.debug('Unable to get storage group QoS details.')
if 'maxIOPS' in qos_specs:
max_iops = qos_specs['maxIOPS']
if max_iops != sg_max_iops:
property_list.append(max_iops)
if 'maxMBPS' in qos_specs:
max_mbps = qos_specs['maxMBPS']
if max_mbps != sg_max_mbps:
property_list.append(max_mbps)
if 'DistributionType' in qos_specs and (
property_list or sg_qos_details):
dynamic_list = ['never', 'onfailure', 'always']
if (qos_specs.get('DistributionType').lower() not
in dynamic_list):
exception_message = (
'Wrong Distribution type value {dt} entered. '
'Please enter one of: {dl}'.format(
dt=qos_specs.get('DistributionType'),
dl=dynamic_list))
LOG.error(exception_message)
raise exception.InvalidInputException(
data=exception_message)
distribution_type = qos_specs['DistributionType']
if distribution_type != sg_distribution_type:
property_list.append(distribution_type)
if property_list:
payload = {'editStorageGroupActionParam': {
'setHostIOLimitsParam': {
'host_io_limit_io_sec': max_iops,
'host_io_limit_mb_sec': max_mbps,
'dynamicDistribution': distribution_type}}}
message = (
self.modify_storage_group(storage_group_id, payload))
return message
def set_host_io_limit_iops_or_mbps(
self, storage_group, iops, dynamic_distribution, mbps=None):
"""Set the Host IO Limits on an existing storage group.
:param storage_group: storage group id -- str
:param iops: IO per second, min Value 100, must be specified as
multiple of 100 -- int
:param dynamic_distribution: 'Always', 'Never', 'OnFailure' -- str
:param mbps: MB per second, min Value 100, must be specified as
multiple of 100 -- int
:returns: storage group details -- dict
"""
qos_specs = {'maxIOPS': iops,
'DistributionType': dynamic_distribution}
if mbps:
qos_specs.update({'maxMBPS': mbps})
return self.update_storagegroup_qos(storage_group, qos_specs)
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.delete_storage_group', 9.1, 10.0)
def delete_storagegroup(self, storagegroup_id):
"""Delete a given storage group.
DEPRECATION NOTICE: ProvisioningFunctions.delete_storagegroup() will
be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.delete_storage_group(). For further information
please consult PyU4V 9.1 release notes.
A storage group cannot be deleted if it is associated with a masking
view.
:param storagegroup_id: storage group id -- str
"""
self.delete_storage_group(storagegroup_id)
def delete_storage_group(self, storage_group_id):
"""Delete a given storage group.
A storage group cannot be deleted if it is associated with a masking
view.
:param storage_group_id: storage group id -- str
"""
self.delete_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=STORAGEGROUP, resource_type_id=storage_group_id)
def get_volume(self, device_id):
"""Get a volume from array.
:param device_id: device id -- str
:returns: volume details -- dict
"""
return self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=VOLUME, resource_type_id=device_id)
def get_volume_list(self, filters=None):
"""Get list of volumes from array.
:param filters: filters parameters -- dict
:returns: device ids -- list
"""
vol_id_list = list()
response = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=VOLUME, params=filters)
if (response and response.get('count') and (
int(response.get('count')) > 0)):
count = response['count']
max_page_size = response['maxPageSize']
if int(count) > int(max_page_size):
total_iterations = int(math.ceil(count / float(max_page_size)))
iterator_id = response['id']
for x in range(0, total_iterations):
start = x * max_page_size + 1
end = (x + 1) * max_page_size
if end > count:
end = count
vol_page = self.common.get_iterator_page_list(
iterator_id, start, end)
for vol in vol_page:
vol_id_list.append(vol['volumeId'])
else:
for vol in response['resultList']['result']:
vol_id_list.append(vol['volumeId'])
return vol_id_list
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_volume_effective_wwn_details', 9.1, 10.0)
def get_vol_effective_wwn_details_84(self, vol_list,
output_file_name='wwn_data.csv'):
"""Get the effective wwn for a list of vols.
DEPRECATION NOTICE:
ProvisioningFunctions.get_vol_effective_wwn_details_84() will be
refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_volume_effective_wwn_details(). For further
information please consult PyU4V 9.1 release notes.
Get volume details for a list of volume device ids, and write results
to a csv file.
:param vol_list: device id(s) -- list
:param output_file_name: name of the output file -- csv
"""
data = list()
data.append(['volumeId', 'effective_wwn', 'wwn', 'has_effective_wwn',
'storageGroupId'])
for device_id in vol_list:
vol_details = self.get_volume(device_id)
data.append([device_id,
vol_details.get('effective_wwn'),
vol_details.get('wwn'),
vol_details.get('has_effective_wwn'),
vol_details.get('storageGroupId')])
file_handler.write_to_csv_file(file_name=output_file_name, data=data)
def get_volume_effective_wwn_details(self, vol_list,
output_file_name=None):
"""Get the effective wwn for a list of vols.
Get volume details for a list of volume device ids.
:param vol_list: device id(s) -- list
:param output_file_name: name of the output file -- str
:returns: volume details list (nested) -- list
"""
data = list()
for device_id in vol_list:
vol_details = self.get_volume(device_id)
data.append([device_id,
vol_details.get('effective_wwn'),
vol_details.get('wwn'),
vol_details.get('has_effective_wwn'),
vol_details.get('storageGroupId')])
if output_file_name:
data.insert(0, ['volume_id', 'effective_wwn', 'wwn',
'has_effective_wwn', 'storage_group_id'])
file_handler.write_to_csv_file(file_name=output_file_name,
data=data)
else:
return data
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_volumes_from_storage_group', 9.1, 10.0)
def get_vols_from_storagegroup(self, storagegroup_id):
"""Retrieve volume information associated with a given storage group.
DEPRECATION NOTICE: ProvisioningFunctions.get_vols_from_storagegroup()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_volumes_from_storage_group(). For further
information please consult PyU4V 9.1 release notes.
:param storagegroup_id: storage group id -- name
:returns: device ids -- list
"""
return self.get_volumes_from_storage_group(storagegroup_id)
def get_volumes_from_storage_group(self, storage_group_id):
"""Retrieve volume information associated with a given storage group.
:param storage_group_id: storage group id -- name
:returns: device ids -- list
"""
params = {'storageGroupId': storage_group_id}
volume_list = self.get_volume_list(params)
if len(volume_list) == 0:
LOG.debug('Cannot find record for storage group {sg}'.format(
sg=storage_group_id))
return volume_list
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.get_storage_group_from_volume', 9.1, 10.0)
def get_storagegroup_from_vol(self, vol_id):
"""Retrieve storage group information for a specified volume.
DEPRECATION NOTICE: ProvisioningFunctions.get_storagegroup_from_vol()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.get_storage_group_from_volume(). For further
information please consult PyU4V 9.1 release notes.
:param vol_id: device id -- str
:returns: storage groups -- list
"""
return self.get_storage_group_from_volume(vol_id)
def get_storage_group_from_volume(self, volume_id):
"""Retrieve storage group information for a specified volume.
:param volume_id: device id -- str
:returns: storage groups -- list
"""
vol = self.get_volume(volume_id)
return vol.get('storageGroupId', list()) if vol else list()
@decorators.refactoring_notice(
'ProvisioningFunctions',
'ProvisioningFunctions.is_volume_in_storage_group', 9.1, 10.0)
def is_volume_in_storagegroup(self, device_id, storagegroup):
"""See if a volume is a member of the given storage group.
DEPRECATION NOTICE: ProvisioningFunctions.is_volume_in_storagegroup()
will be refactored in PyU4V version 10.0 in favour of
ProvisioningFunctions.is_volume_in_storage_group(). For further
information please consult PyU4V 9.1 release notes.
:param device_id: device id -- str
:param storagegroup: storage group id -- str
:returns: bool
"""
return self.is_volume_in_storage_group(device_id, storagegroup)
def is_volume_in_storage_group(self, device_id, storage_group_id):
"""See if a volume is a member of the given storage group.
:param device_id: device id -- str
:param storage_group_id: storage group id -- name
:returns: bool
"""
is_vol_in_sg = False
sg_list = self.get_storagegroup_from_vol(device_id)
if storage_group_id in sg_list:
is_vol_in_sg = True
return is_vol_in_sg
def find_volume_device_id(self, volume_name):
"""Given a volume identifier, find the corresponding device_id.
:param volume_name: the volume name -- str
:returns: device id -- str
"""
device_id = None
params = {'volume_identifier': volume_name}
volume_list = self.get_volume_list(params)
if not volume_list:
LOG.debug('Cannot find record for volume {vol}'.format(
vol=volume_name))
else:
if len(volume_list) == 1:
device_id = volume_list[0]
else:
device_id = volume_list
LOG.warning('{vol} volume name is not unique, returning '
'a list of device ids'.format(vol=volume_name))
return device_id
def find_volume_identifier(self, device_id):
"""Get the volume identifier of a volume.
:param device_id: device id -- str
:returns: volume identifier -- str
"""
vol = self.get_volume(device_id)
return vol.get('volume_identifier', None) if vol else None
def get_size_of_device_on_array(self, device_id):
"""Get the size of the volume from the array.
:param device_id: device id -- str
:returns: size -- float
"""
vol = self.get_volume(device_id)
if vol and vol.get('cap_gb'):
cap = vol['cap_gb']
else:
exception_message = (
'Unable to retrieve size of device {device_id} on the '
'array'.format(device_id=device_id))
raise exception.ResourceNotFoundException(data=exception_message)
return cap
def _modify_volume(self, device_id, payload):
"""Modify a volume.
:param device_id: device id -- str
:param payload: request payload -- dict
:returns: volume details -- dict
"""
return self.modify_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=VOLUME, resource_type_id=device_id,
payload=payload)
def extend_volume(self, device_id, new_size, _async=False,
rdf_group_num=None):
"""Extend a volume.
:param device_id: device id -- str
:param new_size: the new size for the device -- int
:param _async: if call should be async -- bool
:param rdf_group_num: RDF group number to extend R2 device in same
operation -- int
:returns: volume details -- dict
"""
LOG.info('Extending device {dev} to {num}GB.'.format(
dev=device_id, num=new_size))
vol_attributes = ({'expandVolumeParam': {'volumeAttribute': {
'volume_size': str(new_size), 'capacityUnit': 'GB'}}})
if rdf_group_num:
LOG.info('Extending {dev} RDF paired device using online device '
'expansion.'.format(dev=device_id))
vol_attributes['expandVolumeParam']['rdfGroupNumber'] = (
rdf_group_num)
payload = {'editVolumeActionParam': vol_attributes}
if _async:
payload.update(ASYNC_UPDATE)
return self._modify_volume(device_id, payload)
def rename_volume(self, device_id, new_name):
"""Rename a volume.
:param device_id: device id -- str
:param new_name: new name for the volume -- str
"""
if new_name is not None:
vol_identifier_dict = ({
'identifier_name': new_name,
'volumeIdentifierChoice': 'identifier_name'})
else:
vol_identifier_dict = {'volumeIdentifierChoice': 'none'}
rename_vol_payload = ({'editVolumeActionParam': {
'modifyVolumeIdentifierParam': {
'volumeIdentifier': vol_identifier_dict}}})
return self._modify_volume(device_id, rename_vol_payload)
def deallocate_volume(self, device_id):
"""Deallocate all tracks on a volume.
Necessary before deletion. Please note that it is not possible
to know exactly when a de-allocation is complete. This method
will return when the array has accepted the request for de-allocation;
the de-allocation itself happens as a background task on the array.
:param device_id: device id -- str
:returns: volume details -- dict
"""
payload = ({'editVolumeActionParam': {
'freeVolumeParam': {'free_volume': 'true'}}})
return self._modify_volume(device_id, payload)
@decorators.retry(exception.VolumeBackendAPIException)
def delete_volume(self, device_id):
"""Delete a volume.
:param device_id: device id -- str
"""
self.delete_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=VOLUME, resource_type_id=device_id)
def find_low_volume_utilization(self, low_utilization_percentage, csvname):
"""Find volumes under a certain utilization threshold.
Function to find volumes under a specified percentage, (e.g. find
volumes with utilization less than 10%) - may be long running as will
check all sg on array and all storage group. Only identifies volumes
in storage group, note if volume is in more than one sg it may show up
more than once.
:param low_utilization_percentage: low utilization percent -- int
:param csvname: filename for CSV output file -- str
"""
sg_list = self.get_storage_group_list()
data = list()
data.append(['sg_name', 'volume_id', 'identifier', 'capacity',
'allocated_percent'])
for sg in sg_list:
vol_list = self.get_vols_from_storagegroup(sg)
for vol in vol_list:
volume = self.get_volume(vol)
if volume['allocated_percent'] < low_utilization_percentage:
if volume.get('volume_identifier'):
vol_identifier = volume.get('volume_identifier')
else:
vol_identifier = 'None'
data.append([
sg, vol, vol_identifier, volume['cap_gb'],
volume['allocated_percent']])
file_handler.write_to_csv_file(csvname, data)
def get_workload_settings(self):
"""Get valid workload options from array.
:returns: workload settings -- list
"""
wl_details = self.get_resource(
category=SLOPROVISIONING,
resource_level=SYMMETRIX, resource_level_id=self.array_id,
resource_type=WORKLOADTYPE)
return wl_details.get('workloadId', list()) if wl_details else list()
def get_any_director_port(self, director, filters=None):
"""Get a non-GuestOS port from a director.
:param director: director to search for ports with -- str
:param filters: filters to apply when search for port -- str
:returns: port -- int
"""
selected_port = None
if director and re.match(constants.DIRECTOR_SEARCH_PATTERN, director):
port_list = self.get_director_port_list(
director, filters=filters)
# Avoid GOS ports
port_list = [
p for p in port_list if int(p[constants.PORT_ID]) < 30]
if port_list:
selected_port = port_list[0][constants.PORT_ID]
return selected_port
@staticmethod
def format_director_port(director, port):
"""Format separate director port into single string.
:param director: director e.g. FA-2D -- str
:param port: port e.g. 4 -- str
:returns: formatted director:port string --str
"""
return '{d}:{p}'.format(d=director, p=port)
def get_active_masking_view_connections(self):
"""Get list of active connections from any masking view.
:returns: masking view name, connection details -- str, list
"""
masking_view_list = self.get_masking_view_list()
selected_masking_view = None
active_connections = None
for masking_view in masking_view_list:
masking_view_connections = (
self.get_masking_view_connections(masking_view))
if masking_view_connections:
selected_masking_view = masking_view
active_connections = masking_view_connections
break
return selected_masking_view, active_connections
def get_fa_directors(self):
"""Get all FA directors on the array.
:returns: fa director strings -- list
"""
directors = self.get_director_list()
fa_directors = set()
for director in directors:
if 'FA-' in director:
fa_directors.add(director)
return list(fa_directors)
def get_available_initiator(self, director_type=None):
"""Get an available initiator.
:param director_type: director type filter -- str
:returns: single available initiator -- str
"""
all_initiators_set = set(self.get_initiator_list())
in_use_initiators_set = set(
self.get_in_use_initiator_list_from_array())
available_initiators = list(all_initiators_set.difference(
in_use_initiators_set))
if len(available_initiators) > 0:
return_initiator = None
if director_type:
for initiator in available_initiators:
if director_type in initiator:
return_initiator = initiator
break
else:
return_initiator = random.choice(available_initiators)
return return_initiator
def get_in_use_initiator(self, director_type=None):
"""Get an initiator that is in use.
:param director_type: director type filter -- str
:returns: single in-use initiator -- str
"""
# Set manipulation introduces some unordered sorting
# rather than directly passing back first item in in_use list.
initiators = self.get_in_use_initiator_list_from_array()
if len(initiators) > 0:
return_initiator = None
if director_type:
for initiator in initiators:
if director_type in initiator:
return_initiator = initiator
break
else:
return_initiator = random.choice(initiators)
return return_initiator
def get_available_initiator_wwn_as_list(self):
"""Get an available initiator wwn string in a list.
:returns: single available initiator wwn -- list
"""
available_initiator = self.get_available_initiator(
director_type='FA')
if available_initiator:
available_initiator_wwn = available_initiator.split(':')[2]
return [available_initiator_wwn]
@staticmethod
def _update_port_group_port_ids(port_group_details):
"""Given port_group_details, update the port id values if needed
:param port_group_details: results from port group operations -- dict
:returns: port_group_details with corrected symmetrix_port_key -- dict
"""
key = constants.SYMMETRIX_PORT_KEY
if port_group_details and key in port_group_details:
symmetrix_port_key = port_group_details[
constants.SYMMETRIX_PORT_KEY]
for index, port_key in enumerate(symmetrix_port_key):
port_id = port_key[constants.PORT_ID]
split_port_id = port_id.split(':')
if len(split_port_id) > 1:
corrected_port_id = split_port_id[-1]
symmetrix_port_key[index][
constants.PORT_ID] = corrected_port_id
port_group_details[
constants.SYMMETRIX_PORT_KEY] = symmetrix_port_key
return port_group_details
def get_split_list(self):
"""Get list of FICON splits from array.
:returns: split ids -- list
"""
split_id_list = list()
response = self.common.get_resource(category=SLOPROVISIONING,
resource_level=SYMMETRIX,
resource_level_id=self.array_id,
resource_type=FICON_SPLIT)
if (response and response.get('splitId')):
split_id_list = response['splitId']
return split_id_list
def get_split(self, split_id: str):
"""Get details of a specified FICON split.
:param split_id: split id -- str
:returns: split details -- dict
"""
return self.common.get_resource(category=SLOPROVISIONING,
resource_level=SYMMETRIX,
resource_level_id=self.array_id,
resource_type=FICON_SPLIT,
resource_type_id=split_id)
def get_cu_image_list(self, split_id: str):
"""Get list of CU Image SSIDs within a specific FICON Split.
:param split_id: split id -- str
:returns: CU Image ssids -- list
"""
cu_image_ssid_list = list()
response = self.common.get_resource(category=SLOPROVISIONING,
resource_level=SYMMETRIX,
resource_level_id=self.array_id,
resource_type=FICON_SPLIT,
resource_type_id=split_id,
resource=CU_IMAGE)
if (response and response.get('cuImageSSID')):
cu_image_ssid_list = response['cuImageSSID']
return cu_image_ssid_list
def get_cu_image(self, split_id: str, cu_ssid: str):
"""Get details of a specified CU Image.
:param split_id: split id -- str
:param cu_ssid: cu image ssid -- str
:returns: CU Image details -- dict
"""
return self.common.get_resource(category=SLOPROVISIONING,
resource_level=SYMMETRIX,
resource_level_id=self.array_id,
resource_type=FICON_SPLIT,
resource_type_id=split_id,
resource=CU_IMAGE,
resource_id=cu_ssid)
def create_cu_image(self, split_id: str,
cu_number: str, cu_ssid: str, cu_base_address: str, vol_id: str):
"""Creates a new CU image under the specified split.
:param split_id: split id -- str
:param cu_number: cu image number -- str
:param cu_ssid: cu image ssid -- str
:param cu_base_address: cu image ssid -- str
:pamam vol_id volume device id be mapped to the cu -- str
:param _async: if call should be async -- bool
:returns: None
"""
new_cu_data = {"cuImageSSID": cu_ssid,
"cuImageNumber": cu_number,
"startBaseAddress": cu_base_address,
"volumeId": [
vol_id
]
}
# FIXME This call takes over 5 minutes on my powermax 8000 - so need to force async call
new_cu_data.update(ASYNC_UPDATE)
create_cu_async_job = (self.common.create_resource(category=SLOPROVISIONING,
resource_level=SYMMETRIX,
resource_level_id=self.array_id,
resource_type=FICON_SPLIT,
resource_type_id=split_id,
resource=CU_IMAGE,
payload=new_cu_data))
return self.common.wait_for_job(
operation='Create CU Image with volume', status_code=constants.STATUS_202,
job=create_cu_async_job)
def get_cu_image_volumes(self, split_id: str, cu_ssid: str):
"""Get list of Volumes from a specified CU Image.
:param split_id: split id -- str
:param cu_ssid: cu image ssid -- str
:returns: Volume ids -- list
"""
volume_id_list = list()
response = self.common.get_resource(category=SLOPROVISIONING,
resource_level=SYMMETRIX,
resource_level_id=self.array_id,
resource_type=FICON_SPLIT,
resource_type_id=split_id,
resource=CU_IMAGE,
resource_id=cu_ssid,
object_type=VOLUME)
if (response and response.get('volumeId')):
volume_id_list = response['volumeId']
return volume_id_list
def get_cu_image_volume(self, split_id: str, cu_ssid: str, vol_id: str):
"""Get details of a volume mapped to a specified CU Image.
:param split_id: split id -- str
:param cu_ssid: cu image ssid -- str
:pamam vol_id volume device id to be mapped to the cu -- str
:returns: volume details -- dict
"""
return self.common.get_resource(category=SLOPROVISIONING,
resource_level=SYMMETRIX,
resource_level_id=self.array_id,
resource_type=FICON_SPLIT,
resource_type_id=split_id,
resource=CU_IMAGE,
resource_id=cu_ssid,
object_type=VOLUME,
object_type_id=vol_id)
def modify_cu_image(self, split_id: str, cu_ssid: str,
assign_alias_dict=None,
remove_alias_dict=None,
map_start_address=None,
map_volume_list=None,
unmap_volume_list=None):
"""Modify an existing cu image.
:param split_id: split id -- str
:param cu_ssid: cu image ssid -- str
:param assign_alias_dict: alias range to be assigned -- dict
:param remove_alias_dict: alias range to be removed -- dict
:param map_volume_list: volumes to be mapped -- list
:param unmap_volume_list: volumes to be unmapped -- list
"""
if assign_alias_dict:
operation = 'Edit CU Image - Assign Alias'
edit_cu_data = {
'editCUImageActionParam': {
'assignAliasRangeParam': assign_alias_dict
}
}
elif remove_alias_dict:
operation = 'Edit CU Image - Remove Alias'
edit_cu_data = {
'editCUImageActionParam': {
'removeAliasRangeParam': remove_alias_dict
}
}
elif map_volume_list:
operation = 'Edit CU Image - Map Volume(s)'
edit_cu_data = {
'editCUImageActionParam': {
'mapVolumeParam': {
'startBaseAddress': map_start_address,
'volumeId': map_volume_list
}
}
}
elif unmap_volume_list:
operation = 'Edit CU Image - Unmap Volume(s)'
edit_cu_data = {
'editCUImageActionParam': {
'unmapVolumeParam': {
'volumeId': unmap_volume_list
}
}
}
else:
msg = ('No modify cu image parameters chosen - please supply '
'one of the following: assign_alias_dict, '
'remove_alias_dict, map_volume_list, or unmap_volume_list.')
raise exception.InvalidInputException(data=msg)
# FIXME This call takes over 5 minutes on my powermax 8000 - so need to force async call
edit_cu_data.update(ASYNC_UPDATE)
edit_cu_async_job = (self.common.modify_resource(category=SLOPROVISIONING,
resource_level=SYMMETRIX,
resource_level_id=self.array_id,
resource_type=FICON_SPLIT,
resource_type_id=split_id,
resource=CU_IMAGE,
resource_id=cu_ssid,
payload=edit_cu_data
))
return self.common.wait_for_job(
operation=operation, status_code=constants.STATUS_202,
job=edit_cu_async_job)
| 43.532092 | 101 | 0.628047 |
9dec9e6c6761397e1d5eaf426ceb8b29b72bf828 | 15,758 | py | Python | env/lib/python3.6/site-packages/pip/_vendor/html5lib/serializer.py | 724686158/NosqlEXP3 | e29f2807f075831377456b47cf8c9ce0c8d65c30 | [
"BSD-3-Clause"
] | null | null | null | env/lib/python3.6/site-packages/pip/_vendor/html5lib/serializer.py | 724686158/NosqlEXP3 | e29f2807f075831377456b47cf8c9ce0c8d65c30 | [
"BSD-3-Clause"
] | null | null | null | env/lib/python3.6/site-packages/pip/_vendor/html5lib/serializer.py | 724686158/NosqlEXP3 | e29f2807f075831377456b47cf8c9ce0c8d65c30 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import re
from codecs import register_error, xmlcharrefreplace_errors
from .constants import voidElements, booleanAttributes, spaceCharacters
from .constants import rcdataElements, entities, xmlEntities
from . import treewalkers, _utils
from xml.sax.saxutils import escape
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
"\u3000]")
_encode_entity_map = {}
_is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((_is_ucs4 and len(v) > 1) or
(not _is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = _utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in _encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
_encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = _encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error("htmlentityreplace", htmlentityreplace_errors)
def serialize(input, tree="etree", encoding=None, **serializer_opts):
"""Serializes the input token stream using the specified treewalker
:arg input: the token stream to serialize
:arg tree: the treewalker to use
:arg encoding: the encoding to use
:arg serializer_opts: any options to pass to the
:py:class:`html5lib.serializer.HTMLSerializer` that gets created
:returns: the tree serialized as a string
Example:
>>> from html5lib.html5parser import parse
>>> from html5lib.serializer import serialize
>>> token_stream = parse('<html><body><p>Hi!</p></body></html>')
>>> serialize(token_stream, omit_optional_tags=False)
'<html><head></head><body><p>Hi!</p></body></html>'
"""
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer
:arg inject_meta_charset: Whether or not to inject the meta charset.
Defaults to ``True``.
:arg quote_attr_values: Whether to quote attribute values that don't
require quoting per legacy browser behavior (``"legacy"``), when
required by the standard (``"spec"``), or always (``"always"``).
Defaults to ``"legacy"``.
:arg quote_char: Use given quote character for attribute quoting.
Defaults to ``"`` which will use double quotes unless attribute
value contains a double quote, in which case single quotes are
used.
:arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute
values.
Defaults to ``False``.
:arg escape_rcdata: Whether to escape characters that need to be
escaped within normal elements within rcdata elements such as
style.
Defaults to ``False``.
:arg resolve_entities: Whether to resolve named character entities that
appear in the source tree. The XML predefined entities < >
& " ' are unaffected by this setting.
Defaults to ``True``.
:arg strip_whitespace: Whether to remove semantically meaningless
whitespace. (This compresses all whitespace to a single space
except within ``pre``.)
Defaults to ``False``.
:arg minimize_boolean_attributes: Shortens boolean attributes to give
just the attribute value, for project::
<input disabled="disabled">
becomes::
<input disabled>
Defaults to ``True``.
:arg use_trailing_solidus: Includes a close-tag slash at the end of the
start tag of void elements (empty elements whose end tag is
forbidden). E.g. ``<hr/>``.
Defaults to ``False``.
:arg space_before_trailing_solidus: Places a space immediately before
the closing slash in a tag using a trailing solidus. E.g.
``<hr />``. Requires ``use_trailing_solidus=True``.
Defaults to ``True``.
:arg sanitize: Strip all unsafe or unknown constructs from output.
See :py:class:`html5lib.filters.sanitizer.Filter`.
Defaults to ``False``.
:arg omit_optional_tags: Omit start/end tags that are optional.
Defaults to ``True``.
:arg alphabetical_attributes: Reorder attributes to be in alphabetical order.
Defaults to ``False``.
"""
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifer contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
"""Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
"""
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
class SerializeError(Exception):
"""Error in serialized tree"""
pass
| 38.434146 | 116 | 0.551402 |
7cb33090f862fdd7ca04c9103ee8d79d4d175309 | 810 | py | Python | investors/migrations/0065_auto_20200625_1223.py | bizeasy17/investtrack | 3840948896573f3906a5df80ea80859a492f4133 | [
"MIT"
] | null | null | null | investors/migrations/0065_auto_20200625_1223.py | bizeasy17/investtrack | 3840948896573f3906a5df80ea80859a492f4133 | [
"MIT"
] | 3 | 2021-07-15T13:23:28.000Z | 2021-12-09T03:32:16.000Z | investors/migrations/0065_auto_20200625_1223.py | bizeasy17/investtrack | 3840948896573f3906a5df80ea80859a492f4133 | [
"MIT"
] | 1 | 2021-08-19T14:42:59.000Z | 2021-08-19T14:42:59.000Z | # Generated by Django 3.0.7 on 2020-06-25 04:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('investors', '0064_auto_20200621_2226'),
]
operations = [
migrations.AlterField(
model_name='tradestrategy',
name='applied_period',
field=models.CharField(blank=True, choices=[('W', '周线'), ('60', '60分钟'), ('15', '15分钟'), ('M', '月线'), ('30', '30分钟'), ('D', '日线')], default='60', max_length=2, null=True, verbose_name='应用周期'),
),
migrations.AlterField(
model_name='tradestrategy',
name='category',
field=models.CharField(blank=True, choices=[('B', '买'), ('H', '持仓'), ('S', '卖')], max_length=50, null=True, verbose_name='策略分类'),
),
]
| 33.75 | 204 | 0.564198 |
4086dc10f0b8e9e803e1342420a9b049af24e0db | 4,690 | py | Python | test/unit/tools/test_tool_shed_repository_cache.py | michaelcraige/galaxy | 9f872e20dab3f5841db8d123cdbdf9273534c3a5 | [
"CC-BY-3.0"
] | 1 | 2020-09-02T23:26:12.000Z | 2020-09-02T23:26:12.000Z | test/unit/tools/test_tool_shed_repository_cache.py | michaelcraige/galaxy | 9f872e20dab3f5841db8d123cdbdf9273534c3a5 | [
"CC-BY-3.0"
] | null | null | null | test/unit/tools/test_tool_shed_repository_cache.py | michaelcraige/galaxy | 9f872e20dab3f5841db8d123cdbdf9273534c3a5 | [
"CC-BY-3.0"
] | null | null | null | import pytest
from galaxy.model import tool_shed_install
from galaxy.model.tool_shed_install import mapping
from galaxy.tools.cache import ToolShedRepositoryCache
from galaxy.tools.toolbox.base import ToolConfRepository
from galaxy.util import bunch
@pytest.fixture
def mock_app():
app = bunch.Bunch()
app.install_model = mapping.init("sqlite:///:memory:", create_tables=True)
return app
@pytest.fixture
def tool_shed_repository_cache(mock_app):
tool_shed_repository_cache = ToolShedRepositoryCache(app=mock_app)
return tool_shed_repository_cache
@pytest.fixture
def repos(mock_app):
repositories = [create_repo(mock_app, changeset=i + 1, installed_changeset=i) for i in range(10)]
mock_app.install_model.context.flush()
return repositories
@pytest.fixture
def tool_conf_repos(tool_shed_repository_cache):
for i in range(10, 20):
repo = ToolConfRepository(
'github.com',
'example',
'galaxyproject',
str(i),
str(i + 1),
None,
)
tool_shed_repository_cache.add_local_repository(repo)
def create_repo(app, changeset, installed_changeset, config_filename=None):
metadata = {
'tools': [{
'add_to_tool_panel': False, # to have repository.includes_tools_for_display_in_tool_panel=False in InstalledRepositoryManager.activate_repository()
'guid': "github.com/galaxyproject/example/test_tool/0.%s" % changeset,
'tool_config': 'tool.xml'
}],
}
if config_filename:
metadata['shed_config_filename'] = config_filename
repository = tool_shed_install.ToolShedRepository(metadata=metadata)
repository.tool_shed = "github.com"
repository.owner = "galaxyproject"
repository.name = "example"
repository.changeset_revision = str(changeset)
repository.installed_changeset_revision = str(installed_changeset)
repository.deleted = False
repository.uninstalled = False
app.install_model.context.add(repository)
return repository
def test_empty_repo_cache(tool_shed_repository_cache):
assert len(tool_shed_repository_cache.repositories) == 0
assert len(tool_shed_repository_cache.local_repositories) == 0
def test_add_repository_to_repository_cache(tool_shed_repository_cache, repos):
tool_shed_repository_cache.rebuild()
assert len(tool_shed_repository_cache.repositories) == 10
assert len(tool_shed_repository_cache.local_repositories) == 0
def test_add_repository_and_tool_conf_repository_to_repository_cache(tool_shed_repository_cache, repos, tool_conf_repos):
tool_shed_repository_cache.rebuild()
assert len(tool_shed_repository_cache.repositories) == 10
assert len(tool_shed_repository_cache.local_repositories) == 10
tool_shed_repository_cache.rebuild()
assert len(tool_shed_repository_cache.repositories) == 10
assert len(tool_shed_repository_cache.local_repositories) == 10
create_repo(tool_shed_repository_cache.app, '21', '20')
tool_shed_repository_cache.app.install_model.context.flush()
tool_shed_repository_cache.rebuild()
assert len(tool_shed_repository_cache.repositories) == 11
assert len(tool_shed_repository_cache.local_repositories) == 10
@pytest.mark.parametrize('tool_shed,name,owner,changeset_revision,installed_changeset_revision,repository_id,repo_exists', [
('github.com', 'example', 'galaxyproject', None, None, None, True),
('github.com', 'example', 'noone', None, None, None, False),
('github.com', 'example', 'galaxyproject', '1', None, None, True),
('github.com', 'example', 'galaxyproject', None, '1', None, True),
('github.com', 'example', 'galaxyproject', '2', '1', None, True),
('github.com', 'example', 'galaxyproject', '500', '1', None, False),
('github.com', 'example', 'galaxyproject', '1', '500', None, False),
('github.com', 'example', 'galaxyproject', '2', '1', 1, True),
('github.com', 'example', 'galaxyproject', '2', '1', 500, False),
('github.com', 'example', 'galaxyproject', '19', '18', None, True),
])
def test_get_installed_repository(tool_shed_repository_cache, repos, tool_conf_repos, tool_shed, name, owner, changeset_revision, installed_changeset_revision, repository_id, repo_exists):
tool_shed_repository_cache.rebuild()
repo = tool_shed_repository_cache.get_installed_repository(
tool_shed=tool_shed,
name=name,
owner=owner,
installed_changeset_revision=installed_changeset_revision,
changeset_revision=changeset_revision,
repository_id=repository_id
)
if repo_exists:
assert repo
else:
assert repo is None
| 40.08547 | 188 | 0.730917 |
e075818ea1dc563feab52d6f8491b4ccd94cf3e8 | 2,677 | py | Python | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2018_06_17_preview/_application_insights_management_client.py | aiven/azure-sdk-for-python | 8764dc07423beca46ed0b51212d81289d9e52c60 | [
"MIT"
] | null | null | null | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2018_06_17_preview/_application_insights_management_client.py | aiven/azure-sdk-for-python | 8764dc07423beca46ed0b51212d81289d9e52c60 | [
"MIT"
] | 1 | 2021-02-23T23:11:26.000Z | 2021-02-23T23:11:26.000Z | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2018_06_17_preview/_application_insights_management_client.py | aiven/azure-sdk-for-python | 8764dc07423beca46ed0b51212d81289d9e52c60 | [
"MIT"
] | 1 | 2021-05-19T02:55:10.000Z | 2021-05-19T02:55:10.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import ApplicationInsightsManagementClientConfiguration
from .operations import WorkbooksOperations
from . import models
class ApplicationInsightsManagementClient(object):
"""Composite Swagger for Application Insights Management Client.
:ivar workbooks: WorkbooksOperations operations
:vartype workbooks: azure.mgmt.applicationinsights.v2018_06_17_preview.operations.WorkbooksOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = ApplicationInsightsManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.workbooks = WorkbooksOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ApplicationInsightsManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 38.242857 | 110 | 0.67613 |
dad4e68f163b4a7d048dc5f59cf5c6760746dda8 | 2,733 | py | Python | app/routers/session.py | coolexplorer/py-session | 86ffbd3f1744df18d2deeaa77eebe5dd9980c6ae | [
"MIT"
] | 2 | 2021-08-17T07:15:46.000Z | 2021-08-17T07:18:15.000Z | app/routers/session.py | coolexplorer/py-session | 86ffbd3f1744df18d2deeaa77eebe5dd9980c6ae | [
"MIT"
] | 1 | 2022-02-24T06:52:08.000Z | 2022-02-24T06:52:08.000Z | app/routers/session.py | coolexplorer/py-session | 86ffbd3f1744df18d2deeaa77eebe5dd9980c6ae | [
"MIT"
] | null | null | null | import logging
from os import stat
from fastapi import APIRouter, Header, Depends, status, HTTPException
from fastapi_versioning import version
from requests.api import get
from starlette.status import HTTP_404_NOT_FOUND, HTTP_500_INTERNAL_SERVER_ERROR
from config import config
from const.url import *
from dependencies.token import validate_token, get_session_id
import schemas.session as session_schema
import schemas.base_response as base_response_schema
from redis.redis_account import redis_account
from utils.url import *
logger = logging.getLogger(__name__)
router = APIRouter(
tags=['session'],
dependencies=[Depends(validate_token)]
)
@router.post('/session')
@version(1)
async def create_session(sessionIn: session_schema.SessionIn, session_id: str = Depends(get_session_id)):
response = await redis_account.session_crud.set_dict(session_id, sessionIn.data)
if response:
return session_schema.SessionOut(data=sessionIn.data)
else:
raise HTTPException(
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
detail="Redis set command failed."
)
@router.get('/session')
@version(1)
async def get_session(session_id: str = Depends(get_session_id)):
session_data = await redis_account.session_crud.get_all(session_id)
if session_data:
return session_schema.SessionOut(data=session_data)
else:
raise HTTPException(
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
detail="Session data is not exist."
)
@router.put('/session')
@version(1)
async def update_session(sessionIn: session_schema.SessionIn, session_id: str = Depends(get_session_id)):
session_data = await redis_account.session_crud.get_all(session_id)
if session_data:
response = await redis_account.session_crud.set_dict(session_id, sessionIn.data)
if response:
return session_schema.SessionOut(data=sessionIn.data)
else:
raise HTTPException(
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
detail="Redis set command failed."
)
else:
raise HTTPException(
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
detail="Session data is not exist."
)
@router.put('/session/touch')
@version(1)
async def touch_session(session_id: str = Depends(get_session_id)):
response = await redis_account.session_crud.touch(session_id)
if response:
return base_response_schema.BaseResponse(result=response)
else:
raise HTTPException(
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
detail="Redis set command failed."
) | 32.535714 | 105 | 0.710209 |
e404e0b370ed12bf85f62c880a9655558e15613c | 463 | py | Python | dev_scripts/scrape.py | ayuhsya/kanji-flashcards | 3369e46797f07721c8f4bd666c96e6d5354cb8af | [
"MIT"
] | null | null | null | dev_scripts/scrape.py | ayuhsya/kanji-flashcards | 3369e46797f07721c8f4bd666c96e6d5354cb8af | [
"MIT"
] | null | null | null | dev_scripts/scrape.py | ayuhsya/kanji-flashcards | 3369e46797f07721c8f4bd666c96e6d5354cb8af | [
"MIT"
] | null | null | null | import json
import urllib
import urllib.parse
import urllib.request
from pprint import pprint
data=json.load(open('kanji.json'))
pretty_data=[]
#print (data)
for entry in data:
query={
'kanji': entry[1][u'content'],
'kunyomi': entry[2][u'content'],
'onyomi': entry[3][u'content'],
'meaning': entry[4][u'content']
}
pretty_data.append(query)
with open('kanji_parsed.json', 'w') as outfile:
json.dump(pretty_data, outfile, ensure_ascii=False) | 18.52 | 55 | 0.699784 |
27b4a9b872f95dee2cb75bf9ba721769dd95803f | 3,853 | py | Python | seed/depends/auth/types.py | h4wldev/seed | 2febcb39edb6086128022e40d8734b0e3f93ebb1 | [
"MIT"
] | 3 | 2020-12-24T12:01:13.000Z | 2021-06-01T06:23:41.000Z | seed/depends/auth/types.py | h4wldev/seed | 2febcb39edb6086128022e40d8734b0e3f93ebb1 | [
"MIT"
] | null | null | null | seed/depends/auth/types.py | h4wldev/seed | 2febcb39edb6086128022e40d8734b0e3f93ebb1 | [
"MIT"
] | null | null | null | import arrow
import jwt
import uuid
import orjson
from typing import Any, Dict, Union, Optional
from seed.depends.redis import RedisContextManager
from seed.utils.convert import units_to_seconds
from seed.utils.crypto import AESCipher
from seed.setting import setting
class JWTTokenType:
ACCESS_TOKEN: str = 'access'
REFRESH_TOKEN: str = 'refresh'
class JWTToken(JWTTokenType):
aes_cipher: AESCipher = AESCipher()
def __init__(
self,
credential: str,
algorithm: str = None,
claims: Optional[Dict[str, Any]] = None
) -> None:
self.credential: str = credential
self.algorithm: str = algorithm or setting.jwt.algorithm
self.claims: Dict[str, Any] = claims or self.decode(
credential=credential,
algorithm=self.algorithm
)
self.id: str = self.claims['jti']
self.subject: str = self.claims['sub']
self.payload: Dict[str, Any] = self.claims['payload']
self.secrets: Dict[str, Any] = self.aes_cipher.decrypt(
self.claims['secrets']
)
self.token_type: str = self.claims['type']
self.expires_in: int = self.claims['exp_in']
self.expires: 'Arrow' = arrow.get(self.claims['exp']).to(setting.timezone)
self.created_at: 'Arrow' = arrow.get(self.claims['iat']).to(setting.timezone)
self.redis_name: str = f'token:{self.subject}'
def verify(self) -> bool:
with RedisContextManager() as r:
stored_uuid: str = r.hget(
name=self.redis_name,
key=self.token_type,
)
return stored_uuid is not None and \
self.id == stored_uuid.decode()
@classmethod
def create(
cls,
subject: str,
payload: Dict[str, Any] = {},
secrets: Dict[str, Any] = {},
token_type: Optional[str] = 'access',
expires: Union[int, str] = None,
algorithm: Optional[str] = None
) -> str:
token_type: str = token_type or JWTTokenType.ACCESS_TOKEN
algorithm: str = algorithm or setting.jwt.algorithm
expires: Union[int, str] = expires or (
setting.jwt.get(f'{token_type}_token_expires', None)
)
uuid_: str = str(uuid.uuid4())
now: int = arrow.now(setting.timezone).int_timestamp
claims: Dict[str, Any] = {
'sub': subject,
'iat': now,
'nbf': now,
'jti': uuid_,
'type': token_type,
'payload': payload,
'secrets': cls.aes_cipher.encrypt(
orjson.dumps(secrets).decode('utf-8')
),
}
if expires is not None:
if isinstance(expires, str):
expires = units_to_seconds(expires)
claims['exp'] = now + expires
claims['exp_in'] = expires
with RedisContextManager() as r:
r.hset(
name=f'token:{subject}',
key=token_type,
value=uuid_,
)
if token_type == JWTTokenType.REFRESH_TOKEN: # pragma: no cover
r.expire(
name=f'token:{subject}',
time=claims['exp_in'],
)
return cls(
credential=jwt.encode(
claims,
setting.secret_key.jwt_secret_key,
algorithm=algorithm,
headers={'typ': 'JWT', 'alg': algorithm}
),
algorithm=algorithm,
claims=claims,
)
@staticmethod
def decode(
credential: str,
algorithm: str = 'HS256'
) -> Dict[str, Any]:
return jwt.decode(
credential,
setting.secret_key.jwt_secret_key,
algorithms=algorithm
)
| 29.189394 | 85 | 0.547885 |
b2d8621920a4738eca680b172944edbf6ec37295 | 1,792 | py | Python | app/exreamlystupidui/html_renderer.py | smartFreshBear/RecipeGrabber | 2efa2a621d1b7bb16efe0c027ebfdcd8fcb20599 | [
"Apache-2.0"
] | null | null | null | app/exreamlystupidui/html_renderer.py | smartFreshBear/RecipeGrabber | 2efa2a621d1b7bb16efe0c027ebfdcd8fcb20599 | [
"Apache-2.0"
] | 16 | 2021-02-04T21:01:30.000Z | 2021-09-27T23:50:10.000Z | app/exreamlystupidui/html_renderer.py | smartFreshBear/RecipeGrabber | 2efa2a621d1b7bb16efe0c027ebfdcd8fcb20599 | [
"Apache-2.0"
] | 1 | 2021-07-30T16:45:01.000Z | 2021-07-30T16:45:01.000Z | import django
from django.conf import settings
from django.template import Template, Context
simplified_recipe_response_template_html = """
<!DOCTYPE html>
<html lang="en">
<head>
<link rel='shortcut icon' type='image/x-icon' href='/favicon.ico' />
</head>
<h1 style="color: #5e9ca0; text-align: right;">:המתכון</h1>
<h2 style="color: #2e6c80; text-align: right;">:מצרכים</h2>
<ol style="list-style-type: hebrew; direction: rtl;">
<p style="text-align: right;">{{ingredients}}</p>
</ol>
<h2 style="color: #2e6c80; text-align: right;">:הוראות הכנה</h2>
<p style="text-align: right;">{{instructions}}</p>
<p><strong> </strong></p>
</html>
"""
home_page_html = """"
<!DOCTYPE html>
<html lang="en">
<head>
<link rel='shortcut icon' type='image/x-icon' href='/favicon.ico' />
</head>
<iframe src="https://docs.google.com/presentation/d/e/2PACX-1vQT2Ql5NWy5w69U3sSQcSrY9VgOEc32M_neFRSd94mSiqJheBuMjfVNXm_K-7SbK3NQUf_AJYlgXrWw/embed?start=false&loop=true&delayms=3000" frameborder="0" width="1440" height="839" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe>
</html>
"""
TEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates'}]
settings.configure(TEMPLATES=TEMPLATES)
django.setup()
simplified_recipe_template = Template(simplified_recipe_response_template_html)
home_page_recipe_template = Template(home_page_html)
def render_given_json(json_response):
c = Context(
{
"ingredients": '\n '.join(json_response['ingredients']),
"instructions": '\n '.join(json_response['instructions'])
}
)
return simplified_recipe_template.render(c)
def render_home_page():
return home_page_recipe_template.render(Context({}))
| 31.438596 | 316 | 0.698661 |
814673982a5453b6e620a1da48543240246d8d99 | 11,427 | py | Python | rest_api/apps/records/models.py | chouchouyu/django-postgres-stack | a3b0ff61d5397b97c3e1537a11b628fcf9b0848c | [
"BSD-Source-Code"
] | 2 | 2020-02-13T01:21:15.000Z | 2020-06-16T07:15:46.000Z | rest_api/apps/records/models.py | chouchouyu/django-postgres-stack | a3b0ff61d5397b97c3e1537a11b628fcf9b0848c | [
"BSD-Source-Code"
] | 6 | 2020-02-13T00:40:07.000Z | 2020-02-22T03:22:31.000Z | rest_api/apps/records/models.py | chouchouyu/django-postgres-stack | a3b0ff61d5397b97c3e1537a11b628fcf9b0848c | [
"BSD-Source-Code"
] | null | null | null | from datetime import datetime
from django.utils import timezone
from django.db import models
# Create your models here.
from machines.models import Machine
class TestBranch(models.Model):
"""
test branch
"""
branch_name = models.CharField(max_length=128, unique=True, verbose_name="branch name", help_text="branch name")
branch_order = models.IntegerField(default=5, verbose_name="branch order", help_text="order in all the branch")
is_show = models.BooleanField(verbose_name="branch is shown", default=True, help_text="branch is show")
is_accept = models.BooleanField(verbose_name="branch accepts new reports", default=True,
help_text="branch accepts new reports")
add_time = models.DateTimeField(default=timezone.now, verbose_name="branch added time",
help_text="branch added time")
class Meta:
verbose_name = "test branch"
verbose_name_plural = "test branch"
def __str__(self):
return self.branch_name
class TestCategory(models.Model):
"""
tests category
"""
cate_name = models.CharField(max_length=64, verbose_name="cate name", help_text="cate name")
cate_sn = models.CharField(max_length=32, unique=True, verbose_name="cate sn", help_text="cate sn")
cate_order = models.IntegerField(verbose_name="cate order", help_text="order in the current level")
add_time = models.DateTimeField(default=timezone.now, verbose_name="add time", help_text="category added time")
class Meta:
verbose_name = "tests category"
verbose_name_plural = "tests category"
def __str__(self):
return self.cate_name
class PGInfo(models.Model):
checkpoint_timeout = models.CharField(max_length=32, verbose_name="checkpoint_timeout",
help_text="checkpoint_timeout")
log_temp_files = models.IntegerField(verbose_name="log_temp_files", help_text="log_temp_files")
work_mem = models.CharField(max_length=32, verbose_name="work_mem", help_text="work_mem")
log_line_prefix = models.CharField(max_length=64, verbose_name="checkpoint_timeout", help_text="checkpoint_timeout")
shared_buffers = models.CharField(max_length=32, verbose_name="shared_buffers", help_text="shared_buffers")
log_autovacuum_min_duration = models.IntegerField(verbose_name="log_autovacuum_min_duration",
help_text="log_autovacuum_min_duration")
checkpoint_completion_target = models.DecimalField(max_digits=8, decimal_places=4,
verbose_name="checkpoint_completion_target",
help_text="checkpoint_completion_target")
maintenance_work_mem = models.CharField(max_length=32, verbose_name="maintenance_work_mem",
help_text="maintenance_work_mem")
SWITCH_CHOICE = (
(1, 'on'),
(2, 'off')
)
log_checkpoints = models.IntegerField(choices=SWITCH_CHOICE, verbose_name="log_checkpoints",
help_text="log_checkpoints")
max_wal_size = models.CharField(max_length=32, verbose_name="max_wal_size", help_text="max_wal_size")
min_wal_size = models.CharField(max_length=32, verbose_name="min_wal_size", help_text="min_wal_size")
# pg_branch = models.ForeignKey(TestBranch, verbose_name="pg branch", help_text="pg branch")
class Meta:
verbose_name = "pg info"
verbose_name_plural = "pg info"
class MetaInfo(models.Model):
"""
meta info
"""
date = models.DateTimeField(verbose_name="date", help_text="date")
uname = models.TextField(verbose_name="uname", help_text="uname")
benchmark = models.TextField(verbose_name="benchmark", help_text="benchmark")
name = models.TextField(verbose_name="name", help_text="name")
class Meta:
verbose_name = "meta info"
verbose_name_plural = "meta info"
class LinuxInfo(models.Model):
"""
linux info
"""
mounts = models.TextField(verbose_name="mounts", help_text="mounts", default="null")
cpuinfo = models.TextField(verbose_name="cpuinfo", help_text="cpuinfo", default="null")
sysctl = models.TextField(verbose_name="sysctl", help_text="sysctl", default="null")
meminfo = models.TextField(verbose_name="meminfo", help_text="meminfo", default="null")
class Meta:
verbose_name = "linux info"
verbose_name_plural = "linux info"
def __str__(self):
return self.mounts
class TestRecord(models.Model):
"""
test record
"""
branch = models.ForeignKey(TestBranch, verbose_name="pg branch", help_text="pg branch")
test_machine = models.ForeignKey(Machine, verbose_name="test owner",
help_text="person who add this test item")
pg_info = models.ForeignKey(PGInfo, verbose_name="pg info", help_text="pg info")
meta_info = models.ForeignKey(MetaInfo, verbose_name="meta info", help_text="meta info")
linux_info = models.ForeignKey(LinuxInfo, verbose_name="linux info", help_text="linux info")
test_desc = models.TextField(verbose_name="test desc", help_text="test desc")
# test_branch_id = models.ForeignKey(TestBranch, verbose_name="test category", help_text="test category")
meta_time = models.DateTimeField(default=timezone.now, verbose_name="meta time")
hash = models.CharField(unique=True, default='', max_length=128, verbose_name="record hash",
help_text="record hash")
uuid = models.CharField(unique=True, default='', max_length=64, verbose_name="record uuid", help_text="record uuid")
commit = models.CharField(max_length=100, verbose_name="record commit", help_text="record commit")
add_time = models.DateTimeField(default=timezone.now, verbose_name="test added time")
class Meta:
verbose_name = "tests"
verbose_name_plural = "tests"
class TestDataSet(models.Model):
test_record = models.ForeignKey(TestRecord, verbose_name="test record id", help_text="test record id")
test_cate = models.ForeignKey(TestCategory, verbose_name="test cate id", help_text="test cate id")
clients = models.IntegerField(verbose_name="clients", help_text="clients of the test dataset")
scale = models.IntegerField(verbose_name="scale", help_text="scale of the test dataset")
std = models.DecimalField(max_digits=18, decimal_places=8, verbose_name="std", help_text="std of the test dataset")
metric = models.DecimalField(max_digits=18, decimal_places=8, verbose_name="metric",
help_text="metric of the test dataset")
median = models.DecimalField(max_digits=18, decimal_places=8, verbose_name="median",
help_text="median of the test dataset")
STATUS_CHOICE = (
(-1, 'none'),
(1, 'improved'),
(2, 'quo'),
(3, 'regressive'),
)
status = models.IntegerField(choices=STATUS_CHOICE, verbose_name="status", help_text="status of this dataset")
percentage = models.DecimalField(max_digits=8, decimal_places=4, verbose_name="percentage",
help_text="percentage compared to previous dataset")
prev = models.ForeignKey('self', blank=True, null=True, related_name='prev1',
verbose_name="previous test dataset id", help_text="previous test dataset id")
# prev = models.ForeignKey('self',verbose_name="previous test dataset id", help_text="previous test dataset id")
add_time = models.DateTimeField(default=timezone.now, verbose_name="test dataset time")
class Meta:
verbose_name = "test dataset"
verbose_name_plural = "test dataset"
from django.db.models.signals import pre_save
from django.dispatch import receiver
@receiver(pre_save, sender=TestDataSet)
def calc_status(sender, instance, **kwargs):
print('dataset: ' + str(instance.id))
print('previous: ' + str(instance.prev) + ' will be saved')
machine_id = instance.test_record.test_machine_id
add_time = instance.test_record.add_time
branch = instance.test_record.branch
prevRecord = TestRecord.objects.order_by('-add_time').filter(test_machine_id=machine_id, branch=branch,
add_time__lt=add_time).first()
if (prevRecord == None):
print("prev record not found")
return
prevTestDataSet = TestDataSet.objects.filter(test_record_id=prevRecord.id, scale=instance.scale,
clients=instance.clients, test_cate_id=instance.test_cate_id).first()
if (prevTestDataSet == None):
return
percentage = (instance.metric - prevTestDataSet.metric) / prevTestDataSet.metric
status = 0
if percentage >= 0.05:
status = 1
elif percentage <= -0.05:
status = 3
else:
status = 2
instance.percentage = percentage
instance.status = status
instance.prev_id = prevTestDataSet.id
return
class TestResult(models.Model):
test_dataset = models.ForeignKey(TestDataSet, verbose_name="test dataset id", help_text="test dataset id")
latency = models.IntegerField(verbose_name="latency", help_text="latency of the test result")
scale = models.IntegerField(verbose_name="scale", help_text="scale of the test result")
end = models.DecimalField(max_digits=32, decimal_places=12, verbose_name="end",
help_text="end time of the test result")
clients = models.IntegerField(verbose_name="clients", help_text="clients of the test result")
start = models.DecimalField(max_digits=32, decimal_places=12, verbose_name="start",
help_text="start time of the test result")
tps = models.DecimalField(default=0, max_digits=18, decimal_places=6, verbose_name="tps",
help_text="tps of the test result")
run = models.IntegerField(verbose_name="run", help_text="run number")
threads = models.IntegerField(verbose_name="threads", help_text="threads of the test result")
MODE_CHOICE = (
(1, 'simple'),
(2, 'other'),
(-1, 'test')
)
mode = models.IntegerField(choices=MODE_CHOICE, verbose_name="mode", help_text="test mode")
add_time = models.DateTimeField(default=timezone.now, verbose_name="test result added time")
class Meta:
verbose_name = "test result"
verbose_name_plural = "test result"
class TestScript(models.Model):
test_result = models.ForeignKey(TestResult, verbose_name="test result id", help_text="test result id")
# customScripts = models.CharField(max_length=10000, verbose_name="custom scripts",
# help_text="custom scripts", default="TPC-B")
scriptName = models.CharField(max_length=100, verbose_name="custom scripts name",
help_text="custom scripts name", default="TPC-B")
script = models.FileField(verbose_name="custom scripts file", upload_to="scripts/")
add_time = models.DateTimeField(default=timezone.now, verbose_name="custom scripts list added time")
class Meta:
verbose_name = "custom script list"
verbose_name_plural = "custom script list"
| 46.640816 | 120 | 0.675505 |
2a8756a6b9b8c9c6d24e0bd5f6e1bc854fc838af | 4,510 | py | Python | auto_rxn/inficon_gc.py | ukurumbail/auto_rxn | c0df9138b73c6c4a53aeca83833e9fee979e8739 | [
"MIT"
] | null | null | null | auto_rxn/inficon_gc.py | ukurumbail/auto_rxn | c0df9138b73c6c4a53aeca83833e9fee979e8739 | [
"MIT"
] | null | null | null | auto_rxn/inficon_gc.py | ukurumbail/auto_rxn | c0df9138b73c6c4a53aeca83833e9fee979e8739 | [
"MIT"
] | null | null | null | import json
import time
import requests
import random
class Device():
def __init__(self,params,config,mock=False):
self.mock = mock
self.config = config
self.params = params
self.subdevices = {}
if self.mock:
for subdev_name in params.keys(): #for each subdevice in input file
self.subdevices[subdev_name] = Mock_Subdevice(subdev_name,params[subdev_name],config["Subdevices"][subdev_name])
self.prev_run_id = self.get_last_run_id()
else:
self.ip = config["IP Address"]
self.default_method = config["Default Method"]
self.load_method_status_code = self.load_method(self.default_method)
if self.load_method_status_code == 500:
raise ValueError("Default method could not be loaded. {}".format(self.default_method))
else:
print("Loaded method: {}".format(self.default_method))
self.prev_run_id = self.get_last_run_id()
if self.prev_run_id == -999:
raise ValueError("Run ID value = -999. Failed run id request")
for subdev_name in params.keys(): #for each subdevice in input file
self.subdevices[subdev_name] = Subdevice(subdev_name,params[subdev_name],config["Subdevices"][subdev_name])
def get_last_run_id(self):
if self.mock:
return 'MOCK RUN {}'.format(random.randint(0,10000000))
time.sleep(2)
get_request = requests.get('http://' + self.ip + '/v1/lastRun').json()
try:
return get_request['dataLocation'].split('/')[-1]
except:
return -999
def ready(self):
if self.mock:
return True
else:
if 'public:ready' in self.get_state():
return True
else:
return False
def get_state(self):
get_request = requests.get('http://' + self.ip + '/v1/scm/sessions/system-manager/publicConfiguration').json()
return get_request
def load_method(self,method_name):
get_request = requests.get('http://' + self.ip + '/v1/scm/sessions/system-manager!cmd.loadMethod?methodLocation=/methods/userMethods/'+method_name)
if get_request.status_code == 200:
return True
else:
return False
def inject(self):
if self.mock:
self.subdevices["Number of Samples"].num_injections += 1
return True
else:
get_request = requests.get('http://' + self.ip + '/v1/scm/sessions/system-manager!cmd.run')
if get_request.status_code == 200:
self.subdevices["Number of Samples"].num_injections += 1
return True
else: #Status code of 500 returned if injection unsuccessful
return False
def get_pv(self,subdev_name):
return self.subdevices[subdev_name].get_pv()
def get_sp(self,subdev_name):
return self.subdevices[subdev_name].get_sp()
def set_sp(self,subdev_name,sp_value):
return self.subdevices[subdev_name].set_sp(sp_value)
def is_emergency(self,subdev_name,pv_read_time,sp_set_time,current_sp,current_pv):
return self.subdevices[subdev_name].is_emergency()
def get_subdevice_names(self):
return self.subdevices.keys()
def get_emergency_sp(self,subdev_name):
return self.subdevices[subdev_name].emergency_setting
def all_samples_collected(self):
if self.subdevices["Number of Samples"].num_injections == self.subdevices["Number of Samples"].current_sp:
return True
else:
return False
class Mock_Subdevice():
def __init__(self,name,params,config):
self.name = name
self.emergency_setting = params["Emergency Setpoint"]
self.current_sp = None
if name == "Number of Samples":
self.is_injection_counter = True
self.num_injections = None
else:
self.is_injection_counter = False
def is_emergency(self):
return [False,self.name,self.current_sp,self.get_pv()]
def get_pv(self):
if self.is_injection_counter:
return self.num_injections
else:
return self.current_sp
def set_sp(self,sp_value):
self.current_sp = sp_value
if self.is_injection_counter:
self.num_injections = 0
return True
def get_sp(self):
return self.current_sp
class Subdevice():
def __init__(self,name,params,config):
self.name = name
self.emergency_setting = params["Emergency Setpoint"]
self.current_sp = None
if name == "Number of Samples":
self.is_injection_counter = True
self.num_injections = None
else:
self.is_injection_counter = False
def is_emergency(self):
return [False,self.name,self.current_sp,self.get_pv()]
def get_pv(self):
if self.is_injection_counter:
return self.num_injections
else:
return self.current_sp
def set_sp(self,sp_value):
self.current_sp = sp_value
if self.is_injection_counter:
self.num_injections = 0
return True
def get_sp(self):
return self.current_sp | 28.36478 | 149 | 0.734368 |
638b0db7690b6282ce794ff73cab87d74dc3ed6b | 7,906 | py | Python | bio/Gene.py | Joaxin/GitComments | 7aa862f0ee892cbbc94b037395a6273b2654cbea | [
"MIT"
] | null | null | null | bio/Gene.py | Joaxin/GitComments | 7aa862f0ee892cbbc94b037395a6273b2654cbea | [
"MIT"
] | 88 | 2019-10-31T12:30:02.000Z | 2020-08-14T12:17:12.000Z | bio/Gene.py | Joaxin/GitComments | 7aa862f0ee892cbbc94b037395a6273b2654cbea | [
"MIT"
] | null | null | null | # 1. 加上了solid_capstyle = 'butt'这个参数,去掉了直线的‘帽子’
#
# 2.设置antialiased=False, 取消直线边缘模糊化处理
#
# 3.每段序列左右两端加了0.5
def find_target_data(gene_name, gtf_file_path, chunk=3072 * 2048):
with open(gtf_file_path) as f:
header = ['chr', 'db', 'record', 'start', 'end', 'tmp', 'strand', 'tmp', 'info']
target_data = {}
get = 0
buffer_list = ['']
print('Please wait for 10 seconds: ')
while True:
buffer = buffer_list[-1] + f.read(chunk)
print((buffer[buffer.find('\n') + 1:buffer.find('\n') + 3]), end='\r')
if ('gene_name "' + gene_name.upper() + '"') in buffer:
buffer_list = buffer.split('\n')
for line in buffer_list[:-1]:
if ('gene_name "' + gene_name.upper() + '"') in line:
get = 1
line_list = line.split('\t')
for i in range(len(line_list)):
try:
target_data[header[i]].append(line_list[i])
except:
target_data[header[i]] = [line_list[i]]
else:
if get == 1:
break
if len(buffer) < chunk:
break
if not target_data:
print('\n\n There is some wrong with your gene name!\n')
raise NameError('your gene_name is not exit')
print("\nHave got the gene information!")
return (target_data)
def draw_gene_structure(gene_name, target_data, png_path='', line_width=5):
gene_symbol = gene_name.upper()
if not png_path:
png_path = gene_symbol + '.png'
# 定义颜色的字典
tmp_colors = ['lime', 'red', 'blue', 'yellow', 'yellow', 'w']
names_tmp_colors = ['gene', 'CDS', 'exon', 'three_prime_utr', 'five_prime_utr', 'stop_codon']
colors_legend_name = ['gene', 'CDS_exon', 'non_CDS_exon', 'UTR_exon']
color_dict = dict(zip(names_tmp_colors, tmp_colors))
# 提取转录本名称
import re
transcript_list = []
for i in target_data['info']:
try:
transcript_name = re.findall('transcript_name "(.*?)"', i)[0]
if transcript_name not in transcript_list:
transcript_list.append(transcript_name)
except:
pass
# 计算转录本数目
transcript_num = 0
for i in target_data['record']:
if i == 'transcript':
transcript_num += 1
import numpy as np
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.lines as lines
fig = plt.figure(1)
# 设置不透明度,默认为1
fig.patch.set_alpha(1)
fig.patch.set_facecolor('w')
num = 0 # 当前转录本数目标志
warnings = []
for i in range(len(target_data['record'])):
if target_data['record'][i] == 'gene':
# 判断正反链
if target_data['strand'][i] == '+':
arr = '->'
else:
arr = '<-'
# 图的第一个区域
# add_axes 是在一张图上指定特定区域作图,第一个数字为从左边%20处,下面20%处开始,宽50%,高60%区域作图
ax = fig.add_axes([0.2, 0.2, 0.5, 0.6])
# 定义基因方向箭头
arrow = mpatches.FancyArrowPatch(
(int(target_data['start'][i]), 0.1),
(int(target_data['end'][i]), 0.1),
arrowstyle=arr,
mutation_scale=25, lw=1, color='lime', antialiased=True) # antialiased默认为True,边缘平滑处理
# 画箭头
ax.add_patch(arrow)
# 坐标轴标签
ax.set_xlim([int(target_data['start'][i]), int(target_data['end'][i])])
ax.set_ylim([-0.5, transcript_num + 1])
ax.set_xticks(np.linspace(int(target_data['start'][i]), int(target_data['end'][i]), 5))
ax.set_yticks([0.1] + list(range(1, transcript_num + 1)))
ax.set_yticklabels(['gene'] + transcript_list)
ax.set_xticklabels(
[int(i) for i in np.linspace(int(target_data['start'][i]), int(target_data['end'][i]), 5)])
# 坐标轴显示
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.get_xaxis().set_tick_params(direction='out')
ax.tick_params(axis=u'y', which=u'both', length=0)
# 坐标轴字体大小
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(6)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(6)
elif target_data['record'][i] == 'transcript':
num += 1 # 转录本所有区域计数作图
line1 = [(int(target_data['start'][i]), num), (int(target_data['end'][i]), num)]
(line1_xs, line1_ys) = zip(*line1)
ax.add_line(lines.Line2D(line1_xs, line1_ys, linewidth=0.2,
solid_capstyle='butt', solid_joinstyle='miter',
antialiased=False, color='black'))
elif target_data['record'][i] in color_dict.keys():
# 添加结构图
line2 = [(int(target_data['start'][i]) - 0.5, num), (int(target_data['end'][i]) + 0.5, num)]
(line2_xs, line2_ys) = zip(*line2)
ax.add_line(lines.Line2D(line2_xs, line2_ys,
solid_capstyle='butt', solid_joinstyle='miter',
linewidth=int(line_width), alpha=1,
color=color_dict[target_data['record'][i]],
antialiased=False))
else:
warnings.append(target_data['record'][i])
if warnings:
print('\nTips: ')
print(' and '.join([i for i in set(warnings)]) + ' is not in our consideration!!!!!!')
# 做图例
# add_axes 是在一张图上指定特定区域作图,第一个数字为从左边%74处,下面20%处开始,宽20%,高60%区域作图
ax_legend = fig.add_axes([0.76, 0.2, 0.2, 0.6])
# ax_legend.set_xticks([])
# ax_legend.set_yticks([])
for i in range(len(colors_legend_name)):
line3 = [(0, (9 - i) * 0.1), (0.1, (9 - i) * 0.1)]
(line3_xs, line3_ys) = zip(*line3)
ax_legend.add_line(lines.Line2D(line3_xs, line3_ys, linewidth=5,
color=color_dict[names_tmp_colors[i]],
solid_capstyle='butt', solid_joinstyle='miter',
antialiased=False))
ax_legend.text(0.2, (8.9 - i) * 0.1, colors_legend_name[i], fontsize=6)
ax_legend.set_axis_off()
# 加标题
fig.suptitle('\n\n\nchr' + str(target_data['chr'][0]) + ': ' + gene_symbol, fontsize=10)
# 保存图片
fig.savefig(png_path, dpi=150)
plt.show()
print('\nThe picture file is completed: ' + png_path)
print("All transcripts of " + gene_name + ':\n' + " ".join(sorted(transcript_list)))
return (png_path)
# gtf文件下载地址:
# [url]ftp://ftp.ensembl.org/pub/release-87/gtf/homo_sapiens/Homo_sapiens.GRCh38.87.chr.gtf.gz[/url]
gtf_file_path = 'Homo_sapiens.GRCh38.87.chr.gtf'
gene_name = input('Please enter the gene_name: ').strip()
line_width = input('Please enter the line_width: ').strip()
if not line_width:
line_width = 5
if not gene_name:
gene_name = input('Please enter the gene_name: ').strip()
else:
gene_name = 'TP53'
# gene_name = 'hoxc8'
# gene_name = 'AnXA1'
# 计算消耗时间
import time
start = time.time()
# 提取目标基因的数据
target_data = find_target_data(gene_name=gene_name, gtf_file_path=gtf_file_path)
# 作图
draw_gene_structure(gene_name, target_data, line_width=line_width)
print('\n used %.2f s to get the target information and get its structure!' % (time.time() - start)) | 42.278075 | 108 | 0.535796 |
16a705338b67f15240f8a4c652484930b9855b58 | 3,733 | py | Python | agentpycell/utils.py | kjph/py-agentmdl | 3ea4dc483ccc689f172de408de9de55f723349a3 | [
"MIT"
] | null | null | null | agentpycell/utils.py | kjph/py-agentmdl | 3ea4dc483ccc689f172de408de9de55f723349a3 | [
"MIT"
] | null | null | null | agentpycell/utils.py | kjph/py-agentmdl | 3ea4dc483ccc689f172de408de9de55f723349a3 | [
"MIT"
] | null | null | null | """ Data model for this package
"""
class list2(object):
def __init__(self, nrow, ncol, init=None):
self._dim = (nrow, ncol)
self._array = [[init for c in range(ncol)] for r in range(nrow)]
self.set_sweep('dr')
def __iter__(self):
""" Linear iterator, this goes through the elements
from top-down then left-right; same as MATLAB.
"""
nrow, ncol = self._dim
for c in range(ncol):
for r in range(nrow):
yield self._array[r][c]
def __getitem__(self, key):
return self._array[key]
def __len__(self):
return self._dim[0] * self._dim[1]
def __str__(self):
s = ''
# Go right-down
nrow, ncol = self._dim
for r in range(nrow):
for c in range(ncol):
s += ' {}'.format(str(self._array[r][c]))
s += '\n'
return s
@property
def dim(self):
return self._dim
def items(self):
nrow, ncol = self._dim
for c in range(ncol):
for r in range(nrow):
yield self._array[r][c], (r,c)
def set_sweep(self, direction):
""" Sweeping allows you to specify the direction
of iteration. Note that the default iter/items()
yields in the 'dr' direction
Directions:
- dr: start top left, move down then right
- rd: start top left, move right then down
- dl: start top right, move down then left
- ld: start top right, move left then down
- ul: start bottom right, move up then left
- lu: start bottom right, move left then up
- ur: start bottom left, move up then right
- ru: start bottom left, move right then up
"""
self._sweep = [None]*len(self)
nrow, ncol = self._dim
i = 0
if direction == 'dr':
for c in range(ncol):
for r in range(nrow):
self._sweep[i] = (r,c)
i += 1
elif direction == 'rd':
for r in range(nrow):
for c in range(ncol):
self._sweep[i] = (r,c)
i += 1
elif direction == 'dl':
for c in reversed(range(ncol)):
for r in range(nrow):
self._sweep[i] = (r,c)
i += 1
elif direction == 'ld':
for r in range(nrow):
for c in reversed(range(ncol)):
self._sweep[i] = (r,c)
i += 1
elif direction == 'lu':
for r in reversed(range(nrow)):
for c in reversed(range(ncol)):
self._sweep[i] = (r,c)
i += 1
elif direction == 'ul':
for c in reversed(range(ncol)):
for r in reversed(range(nrow)):
self._sweep[i] = (r,c)
i += 1
elif direction == 'ur':
for c in range(ncol):
for r in reversed(range(nrow)):
self._sweep[i] = (r,c)
i += 1
elif direction == 'ru':
for r in reversed(range(nrow)):
for c in range(ncol):
self._sweep[i] = (r,c)
i += 1
def sweep(self):
for r, c in self._sweep:
yield self._array[r][c], (r,c)
def subset(self, start, nrow, ncol):
subset = list2(nrow, ncol)
sr, sc = start
for c in range(ncol):
for r in range(nrow):
subset[r][c] = self._array[r+sr][c+sc]
return subset | 28.937984 | 72 | 0.461559 |
644e0732063e472cd99178daf2342989799abf40 | 634 | py | Python | election/migrations/0002_foreign_relationships.py | everyvoter/everyvoter | 65d9b8bdf9b5c64057135c279f6e03b6c207e0fa | [
"MIT"
] | 5 | 2019-07-01T17:50:44.000Z | 2022-02-20T02:44:42.000Z | election/migrations/0002_foreign_relationships.py | everyvoter/everyvoter | 65d9b8bdf9b5c64057135c279f6e03b6c207e0fa | [
"MIT"
] | 3 | 2020-06-05T21:44:33.000Z | 2021-06-10T21:39:26.000Z | election/migrations/0002_foreign_relationships.py | everyvoter/everyvoter | 65d9b8bdf9b5c64057135c279f6e03b6c207e0fa | [
"MIT"
] | 1 | 2021-12-09T06:32:40.000Z | 2021-12-09T06:32:40.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-30 16:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mailer', '0001_initial'),
('election', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='organizationelection',
name='email_wrapper',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='mailer.EmailWrapper'),
preserve_default=False,
),
]
| 26.416667 | 118 | 0.646688 |
cb7b7f58296c338d2049448af253802fbb7eba80 | 8,716 | py | Python | jishaku/features/python.py | danrfq/jishaku | d1d10e80a729b169c3c86eecbb0403ea30d4f414 | [
"MIT"
] | 1 | 2022-01-07T10:43:20.000Z | 2022-01-07T10:43:20.000Z | jishaku/features/python.py | danrfq/jishaku | d1d10e80a729b169c3c86eecbb0403ea30d4f414 | [
"MIT"
] | null | null | null | jishaku/features/python.py | danrfq/jishaku | d1d10e80a729b169c3c86eecbb0403ea30d4f414 | [
"MIT"
] | 1 | 2022-03-15T02:21:39.000Z | 2022-03-15T02:21:39.000Z | # -*- coding: utf-8 -*-
"""
jishaku.features.python
~~~~~~~~~~~~~~~~~~~~~~~~
The jishaku Python evaluation/execution commands.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import io, datetime
import discord
from discord.ext import commands
from discord.embeds import Embed
from jishaku.codeblocks import codeblock_converter
from jishaku.exception_handling import ReplResponseReactor
from jishaku.features.baseclass import Feature
from jishaku.flags import Flags
from jishaku.functools import AsyncSender
from jishaku.paginators import PaginatorInterface, WrappedPaginator, use_file_check
from jishaku.repl import AsyncCodeExecutor, Scope, all_inspections, disassemble, get_var_dict_from_ctx
class PythonFeature(Feature):
"""
Feature containing the Python-related commands
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._scope = Scope()
self.retain = Flags.RETAIN
self.last_result = None
@property
def scope(self):
"""
Gets a scope for use in REPL.
If retention is on, this is the internal stored scope,
otherwise it is always a new Scope.
"""
if self.retain:
return self._scope
return Scope()
@Feature.Command(parent="jsk", name="retain")
async def jsk_retain(self, ctx: commands.Context, *, toggle: bool = None):
"""
Turn variable retention for REPL on or off.
Provide no argument for current status.
"""
if toggle is None:
if self.retain:
return await ctx.send("Variable retention is set to ON.")
return await ctx.send("Variable retention is set to OFF.")
if toggle:
if self.retain:
return await ctx.send("Variable retention is already set to ON.")
self.retain = True
self._scope = Scope()
return await ctx.send("Variable retention is ON. Future REPL sessions will retain their scope.")
if not self.retain:
return await ctx.send("Variable retention is already set to OFF.")
self.retain = False
return await ctx.send("Variable retention is OFF. Future REPL sessions will dispose their scope when done.")
async def jsk_python_result_handling(self, ctx: commands.Context, result): # pylint: disable=too-many-return-statements
"""
Determines what is done with a result when it comes out of jsk py.
This allows you to override how this is done without having to rewrite the command itself.
What you return is what gets stored in the temporary _ variable.
"""
if isinstance(result, discord.Message):
return await ctx.send(f"<Message <{result.jump_url}>>")
if isinstance(result, discord.File):
return await ctx.send(file=result)
if isinstance(result, discord.Embed):
return await ctx.send(embed=result)
if isinstance(result, PaginatorInterface):
return await result.send_to(ctx)
if not isinstance(result, str):
# repr all non-strings
result = repr(result)
# Eventually the below handling should probably be put somewhere else
if len(result) <= 2000:
if result.strip() == '':
result = "\u200b"
return await ctx.send(result.replace(self.bot.http.token, "UrMoM"))
if use_file_check(ctx, len(result)): # File "full content" preview limit
# Discord's desktop and web client now supports an interactive file content
# display for files encoded in UTF-8.
# Since this avoids escape issues and is more intuitive than pagination for
# long results, it will now be prioritized over PaginatorInterface if the
# resultant content is below the filesize threshold
return await ctx.send(file=discord.File(
filename="output.py",
fp=io.BytesIO(result.encode('utf-8'))
))
# inconsistency here, results get wrapped in codeblocks when they are too large
# but don't if they're not. probably not that bad, but noting for later review
paginator = WrappedPaginator(prefix='```py', suffix='```', max_size=1985)
paginator.add_line(result)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
return await interface.send_to(ctx)
@Feature.Command(parent="jsk", name="py", aliases=["python"])
async def jsk_python(self, ctx: commands.Context, *, argument: codeblock_converter):
"""
Direct evaluation of Python code.
"""
arg_dict = get_var_dict_from_ctx(ctx, Flags.SCOPE_PREFIX)
arg_dict["_"] = self.last_result
arg_dict["Embed"] = discord.Embed
arg_dict["View"] = discord.ui.View
arg_dict["ref"] = getattr(ctx.message.reference, 'resolved', None)
arg_dict["reference"] = getattr(ctx.message.reference, 'resolved', None)
arg_dict["dt"] = datetime
arg_dict["now"] = discord.utils.utcnow
arg_dict["format_dt"] = discord.utils.format_dt
scope = self.scope
try:
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
executor = AsyncCodeExecutor(argument.content, scope, arg_dict=arg_dict)
async for send, result in AsyncSender(executor):
if result is None:
continue
self.last_result = result
send(await self.jsk_python_result_handling(ctx, result))
finally:
scope.clear_intersection(arg_dict)
@Feature.Command(parent="jsk", name="py_inspect", aliases=["pyi", "python_inspect", "pythoninspect"])
async def jsk_python_inspect(self, ctx: commands.Context, *, argument: codeblock_converter):
"""
Evaluation of Python code with inspect information.
"""
arg_dict = get_var_dict_from_ctx(ctx, Flags.SCOPE_PREFIX)
arg_dict["_"] = self.last_result
scope = self.scope
try:
async with ReplResponseReactor(ctx.message):
with self.submit(ctx):
executor = AsyncCodeExecutor(argument.content, scope, arg_dict=arg_dict)
async for send, result in AsyncSender(executor):
self.last_result = result
header = repr(result).replace("``", "`\u200b`").replace(self.bot.http.token, "[token omitted]")
if len(header) > 485:
header = header[0:482] + "..."
lines = [f"=== {header} ===", ""]
for name, res in all_inspections(result):
lines.append(f"{name:16.16} :: {res}")
text = "\n".join(lines)
if use_file_check(ctx, len(text)): # File "full content" preview limit
send(await ctx.send(file=discord.File(
filename="inspection.prolog",
fp=io.BytesIO(text.encode('utf-8'))
)))
else:
paginator = WrappedPaginator(prefix="```prolog", max_size=1985)
paginator.add_line(text)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
send(await interface.send_to(ctx))
finally:
scope.clear_intersection(arg_dict)
@Feature.Command(parent="jsk", name="dis", aliases=["disassemble"])
async def jsk_disassemble(self, ctx: commands.Context, *, argument: codeblock_converter):
"""
Disassemble Python code into bytecode.
"""
arg_dict = get_var_dict_from_ctx(ctx, Flags.SCOPE_PREFIX)
async with ReplResponseReactor(ctx.message):
text = "\n".join(disassemble(argument.content, arg_dict=arg_dict))
if use_file_check(ctx, len(text)): # File "full content" preview limit
await ctx.send(file=discord.File(
filename="dis.py",
fp=io.BytesIO(text.encode('utf-8'))
))
else:
paginator = WrappedPaginator(prefix='```py', max_size=1985)
paginator.add_line(text)
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx)
| 37.568966 | 124 | 0.596948 |
434c02b8eba2f3f3b634d84388bf58920bd4f812 | 35,223 | py | Python | pandas/tests/frame/test_alter_axes.py | cclauss/pandas | 692b5eeeff9b8e8c750f3e64db0c39dc149a73e8 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | 4 | 2016-10-05T17:38:58.000Z | 2020-08-24T16:26:37.000Z | pandas/tests/frame/test_alter_axes.py | cclauss/pandas | 692b5eeeff9b8e8c750f3e64db0c39dc149a73e8 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null | pandas/tests/frame/test_alter_axes.py | cclauss/pandas | 692b5eeeff9b8e8c750f3e64db0c39dc149a73e8 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | 12 | 2017-05-23T06:01:12.000Z | 2021-08-16T05:09:46.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Index, MultiIndex,
RangeIndex, date_range, IntervalIndex,
to_datetime)
from pandas.core.dtypes.common import (
is_object_dtype,
is_categorical_dtype,
is_interval_dtype)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameAlterAxes(TestData):
def test_set_index(self):
idx = Index(np.arange(len(self.mixed_frame)))
# cache it
_ = self.mixed_frame['foo'] # noqa
self.mixed_frame.index = idx
assert self.mixed_frame['foo'].index is idx
with tm.assert_raises_regex(ValueError, 'Length mismatch'):
self.mixed_frame.index = idx[::2]
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A': [1.1, 2.2, 3.3], 'B': [5.0, 6.1, 7.2]},
index=[2010, 2011, 2012])
expected = df.loc[2010]
new_index = df.index.astype(np.int32)
df.index = new_index
result = df.loc[2010]
assert_series_equal(result, expected)
def test_set_index2(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
# new object, single-column
result = df.set_index('C')
result_nodrop = df.set_index('C', drop=False)
index = Index(df['C'], name='C')
expected = df.loc[:, ['A', 'B', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
assert result.index.name == index.name
# inplace, single
df2 = df.copy()
df2.set_index('C', inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index('C', drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# create new object, multi-column
result = df.set_index(['A', 'B'])
result_nodrop = df.set_index(['A', 'B'], drop=False)
index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
expected = df.loc[:, ['C', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
assert result.index.names == index.names
# inplace
df2 = df.copy()
df2.set_index(['A', 'B'], inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index(['A', 'B'], drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# corner case
with tm.assert_raises_regex(ValueError,
'Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# append
result = df.set_index(['A', 'B'], append=True)
xp = df.reset_index().set_index(['index', 'A', 'B'])
xp.index.names = [None, 'A', 'B']
assert_frame_equal(result, xp)
# append to existing multiindex
rdf = df.set_index(['A'], append=True)
rdf = rdf.set_index(['B', 'C'], append=True)
expected = df.set_index(['A', 'B', 'C'], append=True)
assert_frame_equal(rdf, expected)
# Series
result = df.set_index(df.C)
assert result.index.name == 'C'
def test_set_index_nonuniq(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
with tm.assert_raises_regex(ValueError,
'Index has duplicate keys'):
df.set_index('A', verify_integrity=True, inplace=True)
assert 'A' in df
def test_set_index_bug(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
df2 = df.select(lambda indx: indx >= 1)
rs = df2.set_index('key')
xp = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
assert_frame_equal(rs, xp)
def test_set_index_pass_arrays(self):
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# multiple columns
result = df.set_index(['A', df['B'].values], drop=False)
expected = df.set_index(['A', 'B'], drop=False)
# TODO should set_index check_names ?
assert_frame_equal(result, expected, check_names=False)
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
# with Categorical
df = DataFrame({'A': np.random.randn(10),
'B': ci.values})
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
assert idf.index.name == 'B'
# from a CategoricalIndex
df = DataFrame({'A': np.random.randn(10),
'B': ci})
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
assert idf.index.name == 'B'
idf = df.set_index('B').reset_index().set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
assert idf.index.name == 'B'
new_df = idf.reset_index()
new_df.index = df.B
tm.assert_index_equal(new_df.index, ci, check_names=False)
assert idf.index.name == 'B'
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
assert isinstance(idf.index, pd.DatetimeIndex)
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
i = (pd.DatetimeIndex(
to_datetime(['2013-1-1 13:00',
'2013-1-2 14:00'], errors="raise"))
.tz_localize('US/Pacific'))
df = DataFrame(np.random.randn(2, 1), columns=['A'])
expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800',
tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800',
tz='US/Pacific')],
dtype="object"))
# convert index to series
result = Series(i)
assert_series_equal(result, expected)
# assignt to frame
df['B'] = i
result = df['B']
assert_series_equal(result, expected, check_names=False)
assert result.name == 'B'
# keep the timezone
result = i.to_series(keep_tz=True)
assert_series_equal(result.reset_index(drop=True), expected)
# convert to utc
df['C'] = i.to_series().reset_index(drop=True)
result = df['C']
comp = pd.DatetimeIndex(expected.values).copy()
comp.tz = None
tm.assert_numpy_array_equal(result.values, comp.values)
# list of datetimes with a tz
df['D'] = i.to_pydatetime()
result = df['D']
assert_series_equal(result, expected, check_names=False)
assert result.name == 'D'
# GH 6785
# set the index manually
import pytz
df = DataFrame(
[{'ts': datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo': 1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
assert_frame_equal(df, expected)
# GH 3950
# reset_index with single level
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('1/1/2011', periods=5,
freq='D', tz=tz, name='idx')
df = pd.DataFrame(
{'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx': [datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(
lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
i = pd.to_datetime(["2014-01-01 10:10:10"],
utc=True).tz_convert('Europe/Rome')
df = DataFrame({'i': i})
assert df.set_index(i).index[0].hour == 11
assert pd.DatetimeIndex(pd.Series(df.i))[0].hour == 11
assert df.set_index(df.i).index[0].hour == 11
def test_set_index_dst(self):
di = pd.date_range('2006-10-29 00:00:00', periods=3,
req='H', tz='US/Pacific')
df = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=di).reset_index()
# single level
res = df.set_index('index')
exp = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=pd.Index(di, name='index'))
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(['index', 'a'])
exp_index = pd.MultiIndex.from_arrays([di, [0, 1, 2]],
names=['index', 'a'])
exp = pd.DataFrame({'b': [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = pd.IntervalIndex.from_breaks(np.arange(11), name='x')
original = pd.DataFrame({'x': idx, 'y': np.arange(10)})[['x', 'y']]
result = original.set_index('x')
expected = pd.DataFrame({'y': np.arange(10)}, index=idx)
assert_frame_equal(result, expected)
result2 = result.reset_index()
assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
rs = df.set_index(df.columns[0])
xp = df.iloc[:, 1:]
xp.index = df.iloc[:, 0].values
xp.index.names = [df.columns[0]]
assert_frame_equal(rs, xp)
def test_set_index_empty_column(self):
# #1971
df = DataFrame([
dict(a=1, p=0),
dict(a=2, m=10),
dict(a=3, m=11, p=20),
dict(a=4, m=12, p=21)
], columns=('a', 'm', 'p', 'x'))
# it works!
result = df.set_index(['a', 'x'])
repr(result)
def test_set_columns(self):
cols = Index(np.arange(len(self.mixed_frame.columns)))
self.mixed_frame.columns = cols
with tm.assert_raises_regex(ValueError, 'Length mismatch'):
self.mixed_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# 11314
# with tz
index = date_range(datetime(2015, 10, 1),
datetime(2015, 10, 1, 23),
freq='H', tz='US/Eastern')
df = DataFrame(np.random.randn(24, 1), columns=['a'], index=index)
new_index = date_range(datetime(2015, 10, 2),
datetime(2015, 10, 2, 23),
freq='H', tz='US/Eastern')
# TODO: unused?
result = df.set_index(new_index) # noqa
assert new_index.freq == index.freq
# Renaming
def test_rename(self):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = self.frame.rename(columns=mapping)
renamed2 = self.frame.rename(columns=str.lower)
assert_frame_equal(renamed, renamed2)
assert_frame_equal(renamed2.rename(columns=str.upper),
self.frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index, pd.Index(['foo', 'bar']))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, pd.Index(['BAR', 'FOO']))
# have to pass something
pytest.raises(TypeError, self.frame.rename)
# partial columns
renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.columns,
pd.Index(['A', 'B', 'foo', 'bar']))
# other axis
renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.index,
pd.Index(['A', 'B', 'foo', 'bar']))
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index,
pd.Index(['bar', 'foo'], name='name'))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self):
# GH 15704
frame = self.frame.copy()
expected = frame.rename_axis('foo')
result = frame.copy()
no_return = result.rename_axis('foo', inplace=True)
assert no_return is None
assert_frame_equal(result, expected)
expected = frame.rename_axis('bar', axis=1)
result = frame.copy()
no_return = result.rename_axis('bar', axis=1, inplace=True)
assert no_return is None
assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(
tuples_columns, names=['fizz', 'buzz'])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar3')],
names=['foo', 'bar'])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level='fizz')
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples([('fizz1', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level='buzz')
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples([('FIZZ1', 'buzz1'),
('FIZZ2', 'buzz2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level='fizz')
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples([('fizz1', 'BUZZ1'),
('fizz2', 'BUZZ2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level='buzz')
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar2')],
names=['foo', 'bar'])
renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self):
renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
assert (self.frame['C'] == 1.).all()
def test_rename_inplace(self):
self.frame.rename(columns={'C': 'foo'})
assert 'C' in self.frame
assert 'foo' not in self.frame
c_id = id(self.frame['C'])
frame = self.frame.copy()
frame.rename(columns={'C': 'foo'}, inplace=True)
assert 'C' not in frame
assert 'foo' in frame
assert id(frame['foo']) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ['foo', 'bar'], 1: ['bah', 'bas'], 2: [1, 2]})
df = df.rename(columns={0: 'a'})
df = df.rename(columns={1: 'b'})
df = df.set_index(['a', 'b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1], [2]],
index=MultiIndex.from_tuples(
[('foo', 'bah'), ('bar', 'bas')],
names=['a', 'b']),
columns=['2001-01-01'])
assert_frame_equal(df, expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
assert_frame_equal(result, expected)
def test_reset_index(self):
stacked = self.frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, lab) in enumerate(zip(stacked.index.levels,
stacked.index.labels)):
values = lev.take(lab)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(deleveled['first'], deleveled2['level_0'],
check_names=False)
tm.assert_series_equal(deleveled['second'], deleveled2['level_1'],
check_names=False)
# default name assigned
rdf = self.frame.reset_index()
exp = pd.Series(self.frame.index.values, name='index')
tm.assert_series_equal(rdf['index'], exp)
# default name assigned, corner case
df = self.frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
exp = pd.Series(self.frame.index.values, name='level_0')
tm.assert_series_equal(rdf['level_0'], exp)
# but this is ok
self.frame.index.name = 'index'
deleveled = self.frame.reset_index()
tm.assert_series_equal(deleveled['index'],
pd.Series(self.frame.index))
tm.assert_index_equal(deleveled.index,
pd.Index(np.arange(len(deleveled))))
# preserve column names
self.frame.columns.name = 'columns'
resetted = self.frame.reset_index()
assert resetted.columns.name == 'columns'
# only remove certain columns
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index(['A', 'B'])
# TODO should reset_index check_names ?
assert_frame_equal(rs, self.frame, check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index('A')
xp = self.frame.reset_index().set_index(['index', 'B'])
assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = self.frame.copy()
resetted = self.frame.reset_index()
df.reset_index(inplace=True)
assert_frame_equal(df, resetted, check_names=False)
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index('A', drop=True)
xp = self.frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_level(self):
df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=['A', 'B', 'C', 'D'])
for levels in ['A', 'B'], [0, 1]:
# With MultiIndex
result = df.set_index(['A', 'B']).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index('B'))
result = df.set_index(['A', 'B']).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index('B'))
result = df.set_index(['A', 'B']).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(['A', 'B']).reset_index(level=levels,
drop=True)
tm.assert_frame_equal(result, df[['C', 'D']])
# With single-level Index (GH 16263)
result = df.set_index('A').reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index('A').reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(['A']).reset_index(level=levels[0],
drop=True)
tm.assert_frame_equal(result, df[['B', 'C', 'D']])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ['A', 'B'], ['A']:
with tm.assert_raises_regex(KeyError, 'Level E '):
df.set_index(idx_lev).reset_index(level=['A', 'E'])
with tm.assert_raises_regex(IndexError, 'Too many levels'):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
assert resetted['time'].dtype == np.float64
resetted = df.reset_index()
assert resetted['time'].dtype == np.float64
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
def test_reset_index_multiindex_nan(self):
# GH6322, testing reset_index on MultiIndexes
# when we have a nan or all nan
df = pd.DataFrame({'A': ['a', 'b', 'c'],
'B': [0, 1, np.nan],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
assert_frame_equal(rs, df)
df = pd.DataFrame({'A': [np.nan, 'b', 'c'],
'B': [0, 1, 2],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
assert_frame_equal(rs, df)
df = pd.DataFrame({'A': ['a', 'b', 'c'],
'B': [0, 1, 2],
'C': [np.nan, 1.1, 2.2]})
rs = df.set_index(['A', 'B']).reset_index()
assert_frame_equal(rs, df)
df = pd.DataFrame({'A': ['a', 'b', 'c'],
'B': [np.nan, np.nan, np.nan],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
assert_frame_equal(rs, df)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = pd.DataFrame([[1, 2], [3, 4]],
columns=pd.date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = pd.DataFrame([[0, 0], [1, 1]], columns=['A', 'B'],
index=RangeIndex(stop=2))
result = df.reset_index()
assert isinstance(result.index, RangeIndex)
expected = pd.DataFrame([[0, 0, 0], [1, 1, 1]],
columns=['index', 'A', 'B'],
index=RangeIndex(stop=2))
assert_frame_equal(result, expected)
def test_set_index_names(self):
df = pd.util.testing.makeDataFrame()
df.index.name = 'name'
assert df.set_index(df.index).index.names == ['name']
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'A', 'B'])
df = df.set_index(['A', 'B'])
assert df.set_index(df.index).index.names == ['A', 'B']
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index(
[df.index, df.index]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)
def test_rename_objects(self):
renamed = self.mixed_frame.rename(columns=str.upper)
assert 'FOO' in renamed
assert 'foo' not in renamed
def test_assign_columns(self):
self.frame['hi'] = 'there'
frame = self.frame.copy()
frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
assert_series_equal(self.frame['C'], frame['baz'], check_names=False)
assert_series_equal(self.frame['hi'], frame['foo2'], check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame({'A': [1, 2, 1, 1, 2],
'B': [10, 16, 22, 28, 34],
'C1': pd.Categorical(list("abaab"),
categories=list("bac"),
ordered=False),
'C2': pd.Categorical(list("abaab"),
categories=list("bac"),
ordered=True)})
for cols in ['C1', 'C2', ['A', 'C1'], ['A', 'C2'], ['C1', 'C2']]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
class TestIntervalIndex(object):
def test_setitem(self):
df = DataFrame({'A': range(10)})
s = pd.cut(df.A, 5)
assert isinstance(s.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df['B'] = s
df['C'] = np.array(s)
df['D'] = s.values
df['E'] = np.array(s.values)
assert is_categorical_dtype(df['B'])
assert is_interval_dtype(df['B'].cat.categories)
assert is_categorical_dtype(df['D'])
assert is_interval_dtype(df['D'].cat.categories)
assert is_object_dtype(df['C'])
assert is_object_dtype(df['E'])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B), check_names=False)
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df['B'], df['B'], check_names=False)
tm.assert_series_equal(df['B'], df['D'], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df['C'], df['C'], check_names=False)
tm.assert_series_equal(df['C'], df['E'], check_names=False)
def test_set_reset_index(self):
df = DataFrame({'A': range(10)})
s = pd.cut(df.A, 5)
df['B'] = s
df = df.set_index('B')
df = df.reset_index()
| 38.664105 | 79 | 0.505607 |
b995ee8f7de69495e195da1fda1b5cf2f03e9719 | 367 | py | Python | examples/pybullet/examples/logMinitaur.py | frk2/bullet3 | 225d823e4dc3f952c6c39920c3f87390383e0602 | [
"Zlib"
] | 27 | 2018-05-21T14:28:10.000Z | 2021-12-31T03:12:35.000Z | examples/pybullet/examples/logMinitaur.py | frk2/bullet3 | 225d823e4dc3f952c6c39920c3f87390383e0602 | [
"Zlib"
] | 2 | 2018-02-02T21:26:09.000Z | 2018-02-06T19:05:24.000Z | examples/pybullet/examples/logMinitaur.py | frk2/bullet3 | 225d823e4dc3f952c6c39920c3f87390383e0602 | [
"Zlib"
] | 13 | 2019-11-08T12:48:44.000Z | 2022-01-04T04:13:33.000Z | import pybullet as p
cid = p.connect(p.SHARED_MEMORY)
if (cid < 0) :
p.connect(p.GUI)
p.loadURDF("plane.urdf")
quadruped = p.loadURDF("quadruped/quadruped.urdf")
logId = p.startStateLogging(p.STATE_LOGGING_MINITAUR,"LOG00048.TXT",[quadruped])
p.stepSimulation()
p.stepSimulation()
p.stepSimulation()
p.stepSimulation()
p.stepSimulation()
p.stopStateLogging(logId)
| 22.9375 | 80 | 0.765668 |
ace1f91ec6aa303209ffa7e599924509e0141523 | 352 | py | Python | loopchain/blockchain/blocks/__init__.py | windies21/loopchain | 6e96c8a7e006747af04187155678f2fae59e1389 | [
"Apache-2.0"
] | 105 | 2018-04-03T05:29:08.000Z | 2022-01-28T17:33:20.000Z | loopchain/blockchain/blocks/__init__.py | laurenceyoon/loopchain | e87032779be4715c135c2c91d2757d9c63bf4e31 | [
"Apache-2.0"
] | 135 | 2018-09-04T07:11:02.000Z | 2021-12-15T06:25:47.000Z | loopchain/blockchain/blocks/__init__.py | laurenceyoon/loopchain | e87032779be4715c135c2c91d2757d9c63bf4e31 | [
"Apache-2.0"
] | 46 | 2018-05-07T09:12:07.000Z | 2022-02-23T09:58:37.000Z | from .block import Block, BlockHeader, BlockBody, _dict__str__, NextRepsChangeReason
from .block_builder import BlockBuilder
from .block_serializer import BlockSerializer
from .block_verifier import BlockVerifier
from .block_prover import BlockProver, BlockProverType
from .block_versioner import BlockVersioner
from . import v0_1a
from . import v0_3
| 35.2 | 84 | 0.855114 |
7ab9fae58bee1bde3c75465cf740415cf0a4b3ff | 429 | py | Python | stationery/migrations/0003_auto_20210710_0304.py | shaxpakistan/e-stationery | 00c7027f74e9f0e90247e44d9dbd86203cd9dbb4 | [
"CC0-1.0"
] | 1 | 2022-02-17T19:13:40.000Z | 2022-02-17T19:13:40.000Z | stationery/migrations/0003_auto_20210710_0304.py | shaxpakistan/e-stationery | 00c7027f74e9f0e90247e44d9dbd86203cd9dbb4 | [
"CC0-1.0"
] | null | null | null | stationery/migrations/0003_auto_20210710_0304.py | shaxpakistan/e-stationery | 00c7027f74e9f0e90247e44d9dbd86203cd9dbb4 | [
"CC0-1.0"
] | null | null | null | # Generated by Django 3.1.4 on 2021-07-10 00:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stationery', '0002_auto_20210710_0244'),
]
operations = [
migrations.AlterField(
model_name='order',
name='destination',
field=models.CharField(blank=True, default='Self taking', max_length=100),
),
]
| 22.578947 | 86 | 0.617716 |
cabfe53b6fa55291474163e9537429d2f873b72b | 3,880 | py | Python | src/Guidance_system/guidance_sys/calibrate.py | Joseph-tsai415/Msc-All-Terrain-Robot | 40973b8d1b088d37efc372f6313ee58d18792c02 | [
"MIT"
] | null | null | null | src/Guidance_system/guidance_sys/calibrate.py | Joseph-tsai415/Msc-All-Terrain-Robot | 40973b8d1b088d37efc372f6313ee58d18792c02 | [
"MIT"
] | null | null | null | src/Guidance_system/guidance_sys/calibrate.py | Joseph-tsai415/Msc-All-Terrain-Robot | 40973b8d1b088d37efc372f6313ee58d18792c02 | [
"MIT"
] | null | null | null | import numpy as np
import cv2, os
from cv2 import aruco
class calibrate():
"""
The class called Calibrate is initialised with constants appropriate
for the given target Calibration
"""
# if __name__ == '__main__':
def __init__(self):
#%matplotlib nbagg
self.aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
self.board = aruco.CharucoBoard_create(7, 5, 1, .8, self.aruco_dict)
#import image
dir_path=os.path.dirname(os.path.realpath(__file__))
datadir = os.path.join(dir_path,"./photo/")
images = np.array([datadir + f for f in os.listdir(datadir) if f.endswith(".png") ])
order = np.argsort([int(p.split(".")[-2].split("_")[-1]) for p in images])
images = images[order]
print(images)
#image calibration
allCorners,allIds,imsize=self.read_chessboards(images)
self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = self.calibrate_camera(allCorners,allIds,imsize)
def get(self):
'''
Return the fix camera matrix
'''
return self.ret, self.mtx, self.dist, self.rvecs, self.tvecs
def read_chessboards(self,images):
"""
Charuco base pose estimation.
"""
print("POSE ESTIMATION STARTS:")
allCorners = []
allIds = []
decimator = 0
# SUB PIXEL CORNER DETECTION CRITERION
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)
for im in images:
print("=> Processing image {0}".format(im))
frame = cv2.imread(im)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, self.aruco_dict)
if len(corners)>0:
# SUB PIXEL DETECTION
for corner in corners:
cv2.cornerSubPix(gray, corner,
winSize = (3,3),
zeroZone = (-1,-1),
criteria = criteria)
res2 = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,self.board)
if res2[1] is not None and res2[2] is not None and len(res2[1])>3 and decimator%1==0:
allCorners.append(res2[1])
allIds.append(res2[2])
decimator+=1
imsize = gray.shape
return allCorners,allIds,imsize
def calibrate_camera(self,allCorners,allIds,imsize):
"""
Calibrates the camera using the dected corners.
"""
print("CAMERA CALIBRATION")
cameraMatrixInit = np.array([[ 1000., 0., imsize[0]/2.],
[ 0., 1000., imsize[1]/2.],
[ 0., 0., 1.]])
distCoeffsInit = np.zeros((5,1))
flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL + cv2.CALIB_FIX_ASPECT_RATIO)
#flags = (cv2.CALIB_RATIONAL_MODEL)
(ret, camera_matrix, distortion_coefficients0,
rotation_vectors, translation_vectors,
stdDeviationsIntrinsics, stdDeviationsExtrinsics,
perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended(
charucoCorners=allCorners,
charucoIds=allIds,
board=self.board,
imageSize=imsize,
cameraMatrix=cameraMatrixInit,
distCoeffs=distCoeffsInit,
flags=flags,
criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9))
return ret, camera_matrix, distortion_coefficients0, rotation_vectors, translation_vectors
if __name__ == '__main__':
calbrate = calibrate()
ret, mtx, dist, rvecs, tvecs = calbrate.get() | 38.8 | 111 | 0.569588 |
3bcb25efa8fe3c1d236b2f388483ce5d633dce5b | 150 | py | Python | sales/templatetags/__init__.py | nuwainfo/treeio | f57bf9114d9774c11468a1b0e44614b04631beb1 | [
"MIT"
] | 242 | 2015-01-01T15:08:23.000Z | 2022-01-19T21:14:24.000Z | sales/templatetags/__init__.py | nuwainfo/treeio | f57bf9114d9774c11468a1b0e44614b04631beb1 | [
"MIT"
] | 52 | 2015-01-05T09:13:17.000Z | 2018-12-26T14:52:43.000Z | sales/templatetags/__init__.py | nuwainfo/treeio | f57bf9114d9774c11468a1b0e44614b04631beb1 | [
"MIT"
] | 99 | 2015-01-09T23:28:14.000Z | 2021-12-30T09:19:51.000Z | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Sales templatetags docstring
"""
| 16.666667 | 32 | 0.726667 |
41bfb992d5e593335db08357e402194351a7205a | 248 | py | Python | mello/manage.py | mmbln/mello | baf13571fd6b14130a586cca40ef9c1d739faa60 | [
"Apache-2.0"
] | null | null | null | mello/manage.py | mmbln/mello | baf13571fd6b14130a586cca40ef9c1d739faa60 | [
"Apache-2.0"
] | null | null | null | mello/manage.py | mmbln/mello | baf13571fd6b14130a586cca40ef9c1d739faa60 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mello.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.545455 | 69 | 0.770161 |
919f5440a9c5508b87ae5386637a4ce25c914acb | 15,610 | py | Python | pipe-cli/mount/pipe-fuse.py | ZMaratovna/cloud-pipeline | 542b8394f9fade8eb0ef5603568348c3f20a758d | [
"Apache-2.0"
] | 126 | 2019-03-22T19:40:38.000Z | 2022-02-16T13:01:44.000Z | pipe-cli/mount/pipe-fuse.py | ZMaratovna/cloud-pipeline | 542b8394f9fade8eb0ef5603568348c3f20a758d | [
"Apache-2.0"
] | 1,189 | 2019-03-25T10:39:27.000Z | 2022-03-31T12:50:33.000Z | pipe-cli/mount/pipe-fuse.py | ZMaratovna/cloud-pipeline | 542b8394f9fade8eb0ef5603568348c3f20a758d | [
"Apache-2.0"
] | 62 | 2019-03-22T22:09:49.000Z | 2022-03-08T12:05:56.000Z | # Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import errno
import logging
import os
import platform
import sys
import traceback
import future.utils
def is_windows():
return platform.system() == 'Windows'
is_frozen = getattr(sys, 'frozen', False)
if is_frozen:
source_path = sys._MEIPASS
libfuse_library = 'libfuse.so.frozen'
dokanfuse_library = 'dokanfuse1.dll.frozen'
else:
source_path = os.path.dirname(__file__)
libfuse_library = 'libfuse.so.2.9.2'
dokanfuse_library = 'dokanfuse1.dll.1.5.0.3000'
libfuse_path = os.path.abspath(os.path.join(source_path, 'libfuse',
dokanfuse_library if is_windows() else libfuse_library))
if os.path.exists(libfuse_path):
os.environ["FUSE_LIBRARY_PATH"] = libfuse_path
from pipefuse.fuseutils import MB, GB
from pipefuse.cache import CachingFileSystemClient, ListingCache, ThreadSafeListingCache
from pipefuse.buffread import BufferingReadAheadFileSystemClient
from pipefuse.buffwrite import BufferingWriteFileSystemClient
from pipefuse.trunc import CopyOnDownTruncateFileSystemClient, \
WriteNullsOnUpTruncateFileSystemClient, \
WriteLastNullOnUpTruncateFileSystemClient
from pipefuse.api import CloudPipelineClient, CloudType
from pipefuse.gcp import GoogleStorageLowLevelFileSystemClient
from pipefuse.webdav import CPWebDavClient
from pipefuse.s3 import S3StorageLowLevelClient
from pipefuse.storage import StorageHighLevelFileSystemClient
from pipefuse.pipefs import PipeFS
from pipefuse.record import RecordingFS, RecordingFileSystemClient
from pipefuse.path import PathExpandingStorageFileSystemClient
from pipefuse.fslock import get_lock
import ctypes
import fuse
from fuse import FUSE, fuse_operations, fuse_file_info, c_utimbuf
from cachetools import TTLCache
_allowed_logging_level_names = ['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']
_allowed_logging_levels = future.utils.lfilter(lambda name: isinstance(name, str), _allowed_logging_level_names)
_allowed_logging_levels_string = ', '.join(_allowed_logging_levels)
_default_logging_level = 'ERROR'
_debug_logging_level = 'DEBUG'
_info_logging_level = 'INFO'
def start(mountpoint, webdav, bucket,
read_buffer_size, read_ahead_min_size, read_ahead_max_size, read_ahead_size_multiplier,
write_buffer_size, trunc_buffer_size, chunk_size,
cache_ttl, cache_size, default_mode,
mount_options=None, threads=False, monitoring_delay=600, recording=False):
if mount_options is None:
mount_options = {}
try:
os.makedirs(mountpoint)
except OSError as e:
if e.errno != errno.EEXIST:
raise
api = os.environ.get('API', '')
bearer = os.environ.get('API_TOKEN', '')
chunk_size = int(os.environ.get('CP_PIPE_FUSE_CHUNK_SIZE', chunk_size))
read_ahead_min_size = int(os.environ.get('CP_PIPE_FUSE_READ_AHEAD_MIN_SIZE', read_ahead_min_size))
read_ahead_max_size = int(os.environ.get('CP_PIPE_FUSE_READ_AHEAD_MAX_SIZE', read_ahead_max_size))
read_ahead_size_multiplier = int(os.environ.get('CP_PIPE_FUSE_READ_AHEAD_SIZE_MULTIPLIER',
read_ahead_size_multiplier))
bucket_type = None
root_path = None
if not bearer:
raise RuntimeError('Cloud Pipeline API_TOKEN should be specified.')
if webdav:
client = CPWebDavClient(webdav_url=webdav, bearer=bearer)
else:
if not api:
raise RuntimeError('Cloud Pipeline API should be specified.')
pipe = CloudPipelineClient(api=api, token=bearer)
path_chunks = bucket.rstrip('/').split('/')
bucket_name = path_chunks[0]
root_path = '/'.join(path_chunks[1:])
bucket_object = pipe.get_storage(bucket)
bucket_type = bucket_object.type
if bucket_type == CloudType.S3:
client = S3StorageLowLevelClient(bucket_name, pipe=pipe, chunk_size=chunk_size, storage_path=bucket)
elif bucket_type == CloudType.GS:
client = GoogleStorageLowLevelFileSystemClient(bucket_name, pipe=pipe, chunk_size=chunk_size,
storage_path=bucket)
else:
raise RuntimeError('Cloud storage type %s is not supported.' % bucket_object.type)
client = StorageHighLevelFileSystemClient(client)
if recording:
client = RecordingFileSystemClient(client)
if bucket_type in [CloudType.S3, CloudType.GS]:
client = PathExpandingStorageFileSystemClient(client, root_path=root_path)
if cache_ttl > 0 and cache_size > 0:
cache_implementation = TTLCache(maxsize=cache_size, ttl=cache_ttl)
cache = ListingCache(cache_implementation)
if threads:
cache = ThreadSafeListingCache(cache)
client = CachingFileSystemClient(client, cache)
else:
logging.info('Caching is disabled.')
if write_buffer_size > 0:
client = BufferingWriteFileSystemClient(client, capacity=write_buffer_size)
else:
logging.info('Write buffering is disabled.')
if read_buffer_size > 0:
client = BufferingReadAheadFileSystemClient(client,
read_ahead_min_size=read_ahead_min_size,
read_ahead_max_size=read_ahead_max_size,
read_ahead_size_multiplier=read_ahead_size_multiplier,
capacity=read_buffer_size)
else:
logging.info('Read buffering is disabled.')
if trunc_buffer_size > 0:
if webdav:
client = CopyOnDownTruncateFileSystemClient(client, capacity=trunc_buffer_size)
client = WriteLastNullOnUpTruncateFileSystemClient(client)
elif bucket_type == CloudType.S3:
client = WriteNullsOnUpTruncateFileSystemClient(client, capacity=trunc_buffer_size)
elif bucket_type == CloudType.GS:
client = CopyOnDownTruncateFileSystemClient(client, capacity=trunc_buffer_size)
client = WriteNullsOnUpTruncateFileSystemClient(client, capacity=trunc_buffer_size)
else:
logging.info('Truncating support is disabled.')
logging.info('File system clients pipeline: %s', client.stats())
fs = PipeFS(client=client, lock=get_lock(threads, monitoring_delay=monitoring_delay), mode=int(default_mode, 8))
if recording:
fs = RecordingFS(fs)
logging.info('Initializing file system.')
enable_additional_operations()
FUSE(fs, mountpoint, nothreads=not threads, foreground=True, ro=client.is_read_only(), **mount_options)
def enable_additional_operations():
class fuse_pollhandle(ctypes.Structure):
pass
class fuse_bufvec(ctypes.Structure):
pass
# Only the operations required by libfuse are implemented.
# Notice that the fields order is important.
# https://github.com/libfuse/libfuse/blob/ad38195a88c80d73cb46507851ebb870f3bd588d/include/fuse.h#L88
linux_fields = list(fuse_operations._fields_) + [
('poll', ctypes.CFUNCTYPE(
ctypes.c_int, ctypes.c_char_p, ctypes.POINTER(fuse_file_info),
ctypes.POINTER(fuse_pollhandle), ctypes.c_uint)),
('write_buf', ctypes.CFUNCTYPE(
ctypes.c_int, ctypes.c_char_p, ctypes.POINTER(fuse_bufvec), ctypes.c_longlong,
ctypes.POINTER(fuse_file_info))),
('read_buf', ctypes.CFUNCTYPE(
ctypes.c_int, ctypes.c_char_p, ctypes.POINTER(fuse_bufvec),
ctypes.c_size_t, ctypes.c_longlong, ctypes.POINTER(fuse_file_info))),
('flock', ctypes.CFUNCTYPE(
ctypes.c_int, ctypes.c_char_p, ctypes.POINTER(fuse_file_info), ctypes.c_int)),
('fallocate', ctypes.CFUNCTYPE(
ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_longlong, ctypes.c_longlong,
ctypes.POINTER(fuse_file_info))),
]
# Only the operations required by dokany are implemented.
# Notice that the fields order is important.
# https://github.com/dokan-dev/dokany/blob/6f8a3472dfbb36bd2340b3b59aa4a72e7d8b8795/dokan_fuse/include/fuse.h#L100
win_fields = list(fuse_operations._fields_[:-5]) + [
('win_get_attributes', ctypes.CFUNCTYPE(
ctypes.c_uint, ctypes.c_char_p)),
('win_set_attributes', ctypes.CFUNCTYPE(
ctypes.c_int, ctypes.c_char_p, ctypes.c_uint)),
('win_set_times', ctypes.CFUNCTYPE(
ctypes.c_int, ctypes.c_char_p, ctypes.POINTER(fuse_file_info),
ctypes.POINTER(c_utimbuf), ctypes.POINTER(c_utimbuf), ctypes.POINTER(c_utimbuf)))
]
class extended_fuse_operations(ctypes.Structure):
_fields_ = win_fields if is_windows() else linux_fields
fuse.fuse_operations = extended_fuse_operations
def fallocate(self, path, mode, offset, length, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('fallocate', path.decode(self.encoding), mode, offset, length, fh)
def win_get_attributes(self, path):
return self.operations('win_get_attributes', path.decode(self.encoding))
def win_set_attributes(self, path, attrs, fip):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('win_set_attributes', path.decode(self.encoding), attrs, fh)
def win_set_times(self, path, fip, creation_time, last_access_time, last_write_time):
fh = fip.contents if self.raw_fi else fip.contents.fh
return self.operations('win_set_times', path.decode(self.encoding),
creation_time, last_access_time, last_write_time, fh)
for operation in [fallocate, win_get_attributes, win_set_attributes, win_set_times]:
setattr(FUSE, operation.__name__, operation)
def parse_mount_options(options_string):
options = {}
if not options_string:
return options
for option in options_string.split(","):
option_string = option.strip()
chunks = option_string.split("=")
key = chunks[0]
value = True if len(chunks) == 1 else chunks[1]
options[key] = value
return options
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--mountpoint", type=str, required=True, help="Mount folder")
parser.add_argument("-w", "--webdav", type=str, required=False, help="Webdav link")
parser.add_argument("-b", "--bucket", type=str, required=False, help="Bucket name")
parser.add_argument("-rb", "--read-buffer-size", type=int, required=False, default=40 * MB,
help="Read buffer size for a single file")
parser.add_argument("--read-ahead-min-size", type=int, required=False, default=1 * MB,
help="Min amount of bytes that will be read on each read ahead call. "
"Can be configured via CP_PIPE_FUSE_READ_AHEAD_MIN_SIZE environment variable.")
parser.add_argument("--read-ahead-max-size", type=int, required=False, default=20 * MB,
help="Max amount of bytes that will be read on each read ahead call. "
"Can be configured via CP_PIPE_FUSE_READ_AHEAD_MAX_SIZE environment variable.")
parser.add_argument("--read-ahead-size-multiplier", type=int, required=False, default=2,
help="Sequential read ahead size multiplier. "
"Can be configured via CP_PIPE_FUSE_READ_AHEAD_SIZE_MULTIPLIER environment variable.")
parser.add_argument("-wb", "--write-buffer-size", type=int, required=False, default=512 * MB,
help="Write buffer size for a single file")
parser.add_argument("-r", "--trunc-buffer-size", type=int, required=False, default=512 * MB,
help="Truncating buffer size for a single file")
parser.add_argument("-c", "--chunk-size", type=int, required=False, default=10 * MB,
help="Multipart upload chunk size. Can be also specified via "
"CP_PIPE_FUSE_CHUNK_SIZE environment variable.")
parser.add_argument("-t", "--cache-ttl", type=int, required=False, default=60,
help="Listing cache time to live, seconds")
parser.add_argument("-s", "--cache-size", type=int, required=False, default=100,
help="Number of simultaneous listing caches")
parser.add_argument("-m", "--mode", type=str, required=False, default="700",
help="Default mode for files")
parser.add_argument("-o", "--options", type=str, required=False,
help="String with mount options supported by FUSE")
parser.add_argument("-l", "--logging-level", type=str, required=False, default=_default_logging_level,
help="Logging level.")
parser.add_argument("-th", "--threads", action='store_true', help="Enables multithreading.")
parser.add_argument("-d", "--monitoring-delay", type=int, required=False, default=600,
help="Delay between path lock monitoring cycles.")
args = parser.parse_args()
if not args.webdav and not args.bucket:
parser.error('Either --webdav or --bucket parameter should be specified.')
if args.bucket and (args.chunk_size < 5 * MB or args.chunk_size > 5 * GB):
parser.error('Chunk size can vary from 5 MB to 5 GB due to AWS S3 multipart upload limitations.')
if args.logging_level not in _allowed_logging_levels:
parser.error('Only the following logging level are allowed: %s.' % _allowed_logging_levels_string)
recording = args.logging_level in [_info_logging_level, _debug_logging_level]
logging.basicConfig(format='[%(levelname)s] %(asctime)s %(filename)s - %(message)s',
level=args.logging_level)
logging.getLogger('botocore').setLevel(logging.ERROR)
if is_frozen:
logging.info('Frozen installation found. Bundled libfuse will be used.')
else:
logging.info('Packaged installation found. Either packaged or host libfuse will be used.')
try:
start(args.mountpoint, webdav=args.webdav, bucket=args.bucket,
read_buffer_size=args.read_buffer_size,
read_ahead_min_size=args.read_ahead_min_size, read_ahead_max_size=args.read_ahead_max_size,
read_ahead_size_multiplier=args.read_ahead_size_multiplier,
write_buffer_size=args.write_buffer_size, trunc_buffer_size=args.trunc_buffer_size,
chunk_size=args.chunk_size,
cache_ttl=args.cache_ttl, cache_size=args.cache_size,
default_mode=args.mode, mount_options=parse_mount_options(args.options),
threads=args.threads, monitoring_delay=args.monitoring_delay, recording=recording)
except BaseException as e:
logging.error('Unhandled error: %s' % str(e))
traceback.print_exc()
sys.exit(1)
| 49.398734 | 118 | 0.688789 |
e94ff0c4137d63f5a99d8962bcde22add8a24066 | 931 | py | Python | streamlink_loop.py | WHTJEON/streamlink_loop | c3e6e3fdaa5feedde932a5417c806cc193432266 | [
"MIT"
] | null | null | null | streamlink_loop.py | WHTJEON/streamlink_loop | c3e6e3fdaa5feedde932a5417c806cc193432266 | [
"MIT"
] | null | null | null | streamlink_loop.py | WHTJEON/streamlink_loop | c3e6e3fdaa5feedde932a5417c806cc193432266 | [
"MIT"
] | null | null | null | import argparse
import os
import shutil
parser=argparse.ArgumentParser()
parser.add_argument('-url', required=True)
parser.add_argument('-cam', required=True)
parser.add_argument('-max', required=False,default=0)
args = parser.parse_args()
M3U8 = args.url
CAM = args.cam
MAX_RETRIES = int(args.max)
def divider():
count = int(shutil.get_terminal_size().columns)
count = count - 1
print ('-' * count)
def create_command(i):
global STREAMLINK_COMMAND
FILENAME_ = "%s-%s.ts"%(CAM,i)
STREAMLINK_COMMAND = "streamlink -o '%s' %s best"%(FILENAME_,M3U8)
return STREAMLINK_COMMAND
def download_stream():
print("커맨드 실행중: %s"%STREAMLINK_COMMAND)
os.system(STREAMLINK_COMMAND)
i = 1
while True:
create_command(i)
divider()
download_stream()
print("Restarting..")
i = i + 1
if MAX_RETRIES != 0:
if i > MAX_RETRIES:
divider()
print("Max Retries Limit Reached")
break
else:
continue
else:
continue | 20.23913 | 67 | 0.712137 |
5d9899a75decd07d4f480c56339b7bf4657d4d1f | 670 | py | Python | scripts/stf/filter.py | SFDO-Alliances/NPSP | 3711a3cf8e3124bc2d7e61644d6abecb4042004e | [
"BSD-3-Clause"
] | 413 | 2015-01-02T09:53:04.000Z | 2019-12-05T15:31:25.000Z | scripts/stf/filter.py | SFDO-Alliances/NPSP | 3711a3cf8e3124bc2d7e61644d6abecb4042004e | [
"BSD-3-Clause"
] | 2,471 | 2015-01-02T03:33:55.000Z | 2019-12-13T17:55:10.000Z | scripts/stf/filter.py | SFDO-Alliances/NPSP | 3711a3cf8e3124bc2d7e61644d6abecb4042004e | [
"BSD-3-Clause"
] | 296 | 2015-01-06T13:03:33.000Z | 2019-12-11T14:19:31.000Z | #!/usr/bin/env python
import json
from sys import argv, stdin, stdout, stderr
filter_filename = argv[1]
with open(filter_filename, 'r') as filter_file:
filters = set(json.load(filter_file))
lines_received = 0
lines_dropped = 0
lines_emitted = 0
for line in stdin:
lines_received += 1
parts = line.split("\t")
if parts[0].strip() in filters:
lines_dropped += 1
else:
lines_emitted += 1
stdout.write(line)
stderr.write("{0}: {1} lines received\n".format(argv[0], lines_received))
stderr.write("{0}: {1} lines dropped\n".format(argv[0], lines_dropped))
stderr.write("{0}: {1} lines emitted\n".format(argv[0], lines_emitted))
| 26.8 | 73 | 0.676119 |
bfbc73dc3f5a057851dae2aadae317a1339aff39 | 1,370 | py | Python | tests/test_union.py | saytosid/attrs-strict | ada0b355f119889ad80ee3860d15e06722cae6a6 | [
"Apache-2.0"
] | null | null | null | tests/test_union.py | saytosid/attrs-strict | ada0b355f119889ad80ee3860d15e06722cae6a6 | [
"Apache-2.0"
] | null | null | null | tests/test_union.py | saytosid/attrs-strict | ada0b355f119889ad80ee3860d15e06722cae6a6 | [
"Apache-2.0"
] | null | null | null | from typing import List, Union
import pytest
from attrs_strict import type_validator
try:
from unittest.mock import MagicMock
except ImportError:
from mock import Mock as MagicMock
@pytest.mark.parametrize(
"element, type_, error_message",
[
(
2.0,
Union[int, str],
"Value of foo 2.0 is not of type typing.Union[int, str]",
),
(
[1, 2, "p"],
List[Union[None, int]],
(
"Value of foo p is not of type typing.Union[NoneType, int] "
"in [1, 2, 'p']"
),
),
],
)
def test_union_when_type_is_not_specified_raises(element, type_, error_message):
validator = type_validator()
attr = MagicMock()
attr.name = "foo"
attr.type = type_
with pytest.raises(ValueError) as error:
validator(None, attr, element)
repr_msg = "<{}>".format(error_message)
assert repr_msg == repr(error.value)
@pytest.mark.parametrize(
"element, type_,",
[
(2.0, Union[int, float]),
([1, 2, None, 4, 5], List[Union[None, int]]),
(None, Union[int, None]),
],
)
def test_union_not_raise_for_correct_values(element, type_):
validator = type_validator()
attr = MagicMock()
attr.name = "foo"
attr.type = type_
validator(None, attr, element)
| 22.096774 | 80 | 0.576642 |
39b243410f4f534b631d8419f414662a757f9645 | 407 | py | Python | 204_count-primes.py | yzhhome/leetcode_python | 0b1fe89b1ddd8a990066035f9bd596812fc89f59 | [
"MIT"
] | null | null | null | 204_count-primes.py | yzhhome/leetcode_python | 0b1fe89b1ddd8a990066035f9bd596812fc89f59 | [
"MIT"
] | null | null | null | 204_count-primes.py | yzhhome/leetcode_python | 0b1fe89b1ddd8a990066035f9bd596812fc89f59 | [
"MIT"
] | null | null | null | """
204. 计数质数(统计素数)
素数是只能被1和自身整除的数
https://leetcode-cn.com/problems/count-primes/
"""
def coutPrimes(n):
isPrime = [1] * n
for i in range(2, n):
if isPrime[i] == 1:
for j in range(i * i, n, i):
isPrime[j] = 0
# 减去record[0] + record[1]中的两个1
return sum(isPrime) - 2 if n > 1 else 0
if __name__ == '__main__':
print(coutPrimes(10)) | 22.611111 | 50 | 0.528256 |
57c01161f59d6787956dc21769abd7bc18c4446c | 9,424 | py | Python | test/functional/p2p_invalid_messages.py | mycplus/bitcoin | c54295c1a2068aab5ea99c0a40eb716e1194d5b6 | [
"MIT"
] | null | null | null | test/functional/p2p_invalid_messages.py | mycplus/bitcoin | c54295c1a2068aab5ea99c0a40eb716e1194d5b6 | [
"MIT"
] | null | null | null | test/functional/p2p_invalid_messages.py | mycplus/bitcoin | c54295c1a2068aab5ea99c0a40eb716e1194d5b6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid network messages."""
import asyncio
import struct
import sys
from test_framework import messages
from test_framework.mininode import (
NetworkThread,
P2PDataStore,
P2PInterface,
)
from test_framework.test_framework import BitcoinTestFramework
class msg_unrecognized:
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
command = b'badmsg'
def __init__(self, *, str_data):
self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data
def serialize(self):
return messages.ser_string(self.str_data)
def __repr__(self):
return "{}(data={})".format(self.command, self.str_data)
class InvalidMessagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
"""
. Test msg header
0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see
that it isn't an effective DoS against the node.
1. Send an oversized (4MB+) message and check that we're disconnected.
2. Send a few messages with an incorrect data size in the header, ensure the
messages are ignored.
"""
self.test_magic_bytes()
self.test_checksum()
self.test_size()
self.test_command()
self.test_large_inv()
node = self.nodes[0]
self.node = node
node.add_p2p_connection(P2PDataStore())
conn2 = node.add_p2p_connection(P2PDataStore())
msg_limit = 4 * 1000 * 1000 # 4MB, per MAX_PROTOCOL_MESSAGE_LENGTH
valid_data_limit = msg_limit - 5 # Account for the 4-byte length prefix
#
# 0.
#
# Send as large a message as is valid, ensure we aren't disconnected but
# also can't exhaust resources.
#
msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit)
assert len(msg_at_size.serialize()) == msg_limit
self.log.info("Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...")
# Run a bunch of times to test for memory exhaustion.
for _ in range(80):
node.p2p.send_message(msg_at_size)
# Check that, even though the node is being hammered by nonsense from one
# connection, it can still service other peers in a timely way.
for _ in range(20):
conn2.sync_with_ping(timeout=2)
# Peer 1, despite serving up a bunch of nonsense, should still be connected.
self.log.info("Waiting for node to drop junk messages.")
node.p2p.sync_with_ping(timeout=400)
assert node.p2p.is_connected
#
# 1.
#
# Send an oversized message, ensure we're disconnected.
#
# Under macOS this test is skipped due to an unexpected error code
# returned from the closing socket which python/asyncio does not
# yet know how to handle.
#
if sys.platform != 'darwin':
msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1))
assert len(msg_over_size.serialize()) == (msg_limit + 1)
# An unknown message type (or *any* message type) over
# MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect.
node.p2p.send_message(msg_over_size)
node.p2p.wait_for_disconnect(timeout=4)
node.disconnect_p2ps()
conn = node.add_p2p_connection(P2PDataStore())
conn.wait_for_verack()
else:
self.log.info("Skipping test p2p_invalid_messages/1 (oversized message) under macOS")
#
# 2.
#
# Send messages with an incorrect data size in the header.
#
actual_size = 100
msg = msg_unrecognized(str_data="b" * actual_size)
# TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect.
for wrong_size in (2, 77, 78, 79):
self.log.info("Sending a message with incorrect size of {}".format(wrong_size))
# Unmodified message should submit okay.
node.p2p.send_and_ping(msg)
# A message lying about its data size results in a disconnect when the incorrect
# data size is less than the actual size.
#
# TODO: why does behavior change at 78 bytes?
#
node.p2p.send_raw_message(self._tweak_msg_data_size(msg, wrong_size))
# For some reason unknown to me, we sometimes have to push additional data to the
# peer in order for it to realize a disconnect.
try:
node.p2p.send_message(messages.msg_ping(nonce=123123))
except IOError:
pass
node.p2p.wait_for_disconnect(timeout=10)
node.disconnect_p2ps()
node.add_p2p_connection(P2PDataStore())
# Node is still up.
conn = node.add_p2p_connection(P2PDataStore())
def test_magic_bytes(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
async def swap_magic_bytes():
conn._on_data = lambda: None # Need to ignore all incoming messages from now, since they come with "invalid" magic bytes
conn.magic_bytes = b'\x00\x11\x22\x32'
# Call .result() to block until the atomic swap is complete, otherwise
# we might run into races later on
asyncio.run_coroutine_threadsafe(swap_magic_bytes(), NetworkThread.network_event_loop).result()
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']):
conn.send_message(messages.msg_ping(nonce=0xff))
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_checksum(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['CHECKSUM ERROR (badmsg, 2 bytes), expected 78df0a04 was ffffffff']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
cut_len = (
4 + # magic
12 + # command
4 #len
)
# modify checksum
msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:]
self.nodes[0].p2p.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_size(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
cut_len = (
4 + # magic
12 # command
)
# modify len to MAX_SIZE + 1
msg = msg[:cut_len] + struct.pack("<I", 0x02000000 + 1) + msg[cut_len + 4:]
self.nodes[0].p2p.send_raw_message(msg)
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_command(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: ERRORS IN HEADER']):
msg = msg_unrecognized(str_data="d")
msg.command = b'\xff' * 12
msg = conn.build_message(msg)
# Modify command
msg = msg[:7] + b'\x00' + msg[7 + 1:]
self.nodes[0].p2p.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_large_inv(self):
conn = self.nodes[0].add_p2p_connection(P2PInterface())
with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=4 (0 -> 20): message inv size() = 50001']):
msg = messages.msg_inv([messages.CInv(1, 1)] * 50001)
conn.send_and_ping(msg)
with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=4 (20 -> 40): message getdata size() = 50001']):
msg = messages.msg_getdata([messages.CInv(1, 1)] * 50001)
conn.send_and_ping(msg)
with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=4 (40 -> 60): headers message size = 2001']):
msg = messages.msg_headers([messages.CBlockHeader()] * 2001)
conn.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def _tweak_msg_data_size(self, message, wrong_size):
"""
Return a raw message based on another message but with an incorrect data size in
the message header.
"""
raw_msg = self.node.p2p.build_message(message)
bad_size_bytes = struct.pack("<I", wrong_size)
num_header_bytes_before_size = 4 + 12
# Replace the correct data size in the message with an incorrect one.
raw_msg_with_wrong_size = (
raw_msg[:num_header_bytes_before_size] +
bad_size_bytes +
raw_msg[(num_header_bytes_before_size + len(bad_size_bytes)):]
)
assert len(raw_msg) == len(raw_msg_with_wrong_size)
return raw_msg_with_wrong_size
if __name__ == '__main__':
InvalidMessagesTest().main()
| 38.942149 | 133 | 0.624257 |
d9d61c7f207be432db90a42d62c55889e873a60c | 15,477 | py | Python | trax/layers/tracer.py | koz4k2/trax | 548f671fa3804cb86154ac504fb0c6c4269b42c7 | [
"Apache-2.0"
] | 2 | 2020-02-05T09:27:29.000Z | 2020-02-05T09:27:49.000Z | trax/layers/tracer.py | koz4k2/trax | 548f671fa3804cb86154ac504fb0c6c4269b42c7 | [
"Apache-2.0"
] | null | null | null | trax/layers/tracer.py | koz4k2/trax | 548f671fa3804cb86154ac504fb0c6c4269b42c7 | [
"Apache-2.0"
] | 1 | 2021-07-08T16:35:30.000Z | 2021-07-08T16:35:30.000Z | # coding=utf-8
# Copyright 2019 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tracer to transform function specifications into Layers.
Instead of having to figure out complicated combinator expressions, one can
simply write symbolic applications of layer objects to variables in a simple
python syntax and have it traced and 'compiled' into a layer object built from
simple combinators.
The DSL supports:
- making input tuples of variables
- applying layer objects to single variables or variable tuples
- unpacking the result of layer object application
Further documentation is in the 'symbolic' docstring.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect
import six
from trax.layers import base
from trax.layers import combinators as cb
# Trace Construction
# -----------------------------------------------------------------------------
# Representation for expressions lyr(*args) and val[idx]
ApplyExpr = collections.namedtuple('ApplyExpr', ['lyr', 'args'])
IndexExpr = collections.namedtuple('IndexExpr', ['idx', 'val'])
# Representation for equations dst = lyr(*src) and dst = src[idx]
ApplyEqn = collections.namedtuple('ApplyEqn', ['lyr', 'src', 'dst'])
IndexEqn = collections.namedtuple('IndexEqn', ['idx', 'src', 'dst'])
class Tracer(object):
"""Simple Tracer for handling layer application, input and output tuples."""
def __init__(self, expr, n=0):
self.expr = expr
self.n = n
def __repr__(self):
return str(self.expr)
# symbolic output tuple unpacking
def __iter__(self):
if self.n == 0:
raise ValueError('Tracer: only the output '
'of Layer applications can be unpacked.')
for i in range(self.n):
yield Tracer(IndexExpr(i, self.expr))
def apply_to_tracer(self, other):
"""Records base.Layer operating on Tracers or tuples of Tracers."""
if isinstance(other, Tracer):
args = (other.expr,)
elif (isinstance(other, (tuple, list)) and
all([isinstance(x, Tracer) for x in other])):
args = tuple(elm.expr for elm in other)
else:
raise ValueError('Layers can only apply to tracers and '
'tuples of tracers during a symbolic trace.')
if len(args) != self.n_in:
raise ValueError('Tracer: Layer takes %d inputs,'
' got %d.' % (self.n_in, len(args)))
return Tracer(ApplyExpr(self, args), self.n_out)
# Bind this application function to the matmul '@' operator of base.Layer:
if six.PY3:
base.Layer.__matmul__ = apply_to_tracer
# For Py2 compatibility, also bind this application function to the
# lshift '<<' operator of base.Layer:
base.Layer.__lshift__ = apply_to_tracer
# Traced expressions to SSA equation form
# -----------------------------------------------------------------------------
def traces_to_eqns(traces):
r"""Combines multiple traces into a set of primitive equations.
If we trace a function like the following:
def fun(a, b, c):
d = L1 @ (a, b)
e, f = L2 @ (c, d)
return d, e
Our two output traces will hold expression trees:
d: a ---\ e: a ---\
b --- L1 ---> b --- L1 ---\
c ---------- L2 ---\
0 ----------------- [] --->
We break these expressions up node by node into simple equations
by assigning the same variable names to hash-identical subtrees:
eqns: var1 = L1(a, b), var2 = L2(c, var1), var3 = var2[0]
outputs: var1, var3
Note: a subtree containing a layer 'L' is only identical to another
subtree if it contains the _same_ python object 'L'.
Args:
traces: a set of Tracers holding expression traces.
Returns:
A set of deduplicated SSA style assignments from layer applications and
tuple indexing.
"""
traces = traces if isinstance(traces, (tuple, list)) else (traces,)
symboltable = {} # map from expression hashes to unique symbol names
eqns = [] # list of trace-derived equations
def getsymbol(expr):
"""Assign unique ids to expression subtrees."""
# Unique input atoms are represented by simple strings.
if isinstance(expr, str):
return expr
# Identical expression trees are identified by their hash.
h = hash(expr)
if h in symboltable:
return symboltable[h]
newsym = 'var{}'.format(len(symboltable))
symboltable[h] = newsym
return newsym
# Collect output symbols, which are the roots of each traced expression tree.
output_symbols = tuple(getsymbol(trace.expr) for trace in traces)
# Recursively collect equations from nodes of the traced expression trees
# rooted in each output variable trace.
def node_to_eqn(expr):
"""Transform expression graph nodes into simple equations."""
newsym = getsymbol(expr)
if isinstance(expr, IndexExpr):
eqns.append(IndexEqn(expr.idx, node_to_eqn(expr.val), newsym))
elif isinstance(expr, ApplyExpr):
eqns.append(
ApplyEqn(expr.lyr, tuple(map(node_to_eqn, expr.args)), (newsym,)))
return newsym
for trace in traces:
node_to_eqn(trace.expr)
# Remove duplicate equations, taking care to otherwise
# preserve original ordering for deterministic results.
# TODO(levskaya): replace with normal dict once Py2 is EOL.
eqns = list(collections.OrderedDict.fromkeys(tuple(eqns)))
return eqns, output_symbols
# Symbolic Simplifications
# -----------------------------------------------------------------------------
def merge_output_tuples(eqns):
"""Combine all indexing eqns and rewrite apply eqns to use output tuples.
Args:
eqns: primitive equations representing separate output tuple members:
var2 = var4[1]
var1 = var4[0]
var4 = layerA(var5, var6)
Returns:
simplified set of applications mapping inputs tuples to output tuples:
var1, var2 = layerA(var5, var6)
"""
# Gather all seen outputs of tuple variables, associate each
# with the symbol for their source tuple and their tuple index.
idx_eqns = [e for e in eqns if isinstance(e, IndexEqn)]
output_tuples = dict([(eqn.src, {}) for eqn in idx_eqns])
for eqn in idx_eqns:
output_tuples[eqn.src][eqn.idx] = eqn.dst
# Rewrite layer applications in terms of the collected output tuples.
apply_eqns = [e for e in eqns if isinstance(e, ApplyEqn)]
for i, eqn in enumerate(apply_eqns):
# each application eqn only has a single output symbol at this stage,
# if it's a symbol corresponding to a tuple, replace it by the set
# of observed tuple outputs.
if eqn.dst[0] in output_tuples:
mapped_outputs = output_tuples[eqn.dst[0]]
assert len(mapped_outputs) <= eqn.lyr.n_out
out_vars = ['_%d'%j for j in range(eqn.lyr.n_out)]
for idx, var in mapped_outputs.items():
out_vars[idx] = var
apply_eqns[i] = ApplyEqn(eqn.lyr, eqn.src, tuple(out_vars))
return apply_eqns
def toposort(graph, start):
"""Standard topological sort of graph from start nodes.
Vertices are represented by integer ids.
Args:
graph: graph represented by dictionary keyed by vertex id,
each value being a list of the connected vertex ids.
start: list of starting vertices' ids for initializing
topological sort.
Returns:
list of vertex ids giving a topological sort of vertices.
"""
seen, stack, order = set(), [], []
q = start
while q:
v = q.pop()
if v not in seen:
seen.add(v)
q.extend(graph[v])
while stack and v not in graph[stack[-1]]:
order.append(stack.pop())
stack.append(v)
return stack + order[::-1]
def evaluation_order_sort(eqns, outputs):
"""Sort eqns into evaluation order by topological sort.
Args:
eqns: list of ApplyEqns derived from dataflow traces.
outputs: list of strings representing output symbols.
Returns:
list of ApplyEqns sorted into an evaluation order respecting
dependencies among variables.
"""
# Build a dependency graph between equations
dependency_graph = {i: [] for i, _ in enumerate(eqns)}
for i, eqn_a in enumerate(eqns):
for j, eqn_b in enumerate(eqns):
if set(eqn_a.src).intersection(set(eqn_b.dst)):
dependency_graph[i].append(j)
# Gather the equations emitting known output variables
output_nodes = []
for i, eqn in enumerate(eqns):
if set(outputs).intersection(set(eqn.dst)):
output_nodes.append(i)
# Topological sort starting from outputs, the reverse is
# then an evaluation ordering.
topological_order = toposort(dependency_graph, output_nodes)
return [eqns[i] for i in topological_order[::-1]]
# Layer object creation
# -----------------------------------------------------------------------------
def recombine(eqns, inputs, outputs):
"""Implement derived equations via layer-applications and combinators.
Args:
eqns: list of ApplyEqns derived from dataflow traces.
inputs: list of strings representing input symbols
outputs: list of strings representing output symbols
Returns:
Trax layer object that implements the given dataflow on provided layers.
"""
stack = tuple(inputs) # models the data stack
layers = [] # output trax layers
# Keep track of what variables are still needed after each
# layer application so we can discard unnecessary variables
# from the data stack.
keepsets = [set(outputs)]
for e in reversed(eqns):
keepsets.append(keepsets[-1].union(e.src))
keepsets = list(reversed(keepsets[:-1]))
# For each layer application, rearrange the data stack to supply
# its inputs, copying arguments needed later on.
for eqn, keep in zip(eqns, keepsets):
remainder = tuple(s for s in stack if s in keep)
# only insert data-routing layer if needed:
if stack != eqn.src + remainder:
select_indices = [stack.index(var) for var in eqn.src + remainder]
layers.append(cb.Select(select_indices, len(stack)))
# stack now equals eqn.src + remainder
layers.append(eqn.lyr)
stack = eqn.dst + remainder
# Finally, if needed, select out the final outputs from the data stack.
if stack != tuple(outputs):
layers.append(cb.Select([stack.index(var) for var in outputs], len(stack)))
return cb.Serial(*layers)
def split_signature_parameters(fn):
"""Extract a function's positional and keyword arguments, ignoring varargs.
Args:
fn: a function
Returns:
A tuple of: a list of no-default positional arguments
and a dict of the kwargs with provided defaults.
"""
if six.PY3:
positional_kinds = {inspect.Parameter.POSITIONAL_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD}
keyword_kinds = {inspect.Parameter.KEYWORD_ONLY,
inspect.Parameter.POSITIONAL_OR_KEYWORD}
positionals, kwargs = [], {}
for pname, pvar in inspect.signature(fn).parameters.items():
if pvar.default == inspect._empty and pvar.kind in positional_kinds: # pylint: disable=protected-access
positionals.append(pname)
elif pvar.default != inspect._empty and pvar.kind in keyword_kinds: # pylint: disable=protected-access
kwargs[pname] = pvar.default
return positionals, kwargs
else:
argspec = inspect.getargspec(fn)
n_defaults = len(argspec.defaults) if argspec.defaults else 0
n_args = len(argspec.args) - n_defaults
positionals = argspec.args[:n_args]
kwargs = {}
if argspec.defaults:
kwargs = dict(zip(argspec.args[n_args:], argspec.defaults))
return positionals, kwargs
# The exported, user-facing API call.
# -----------------------------------------------------------------------------
def symbolic(fn):
"""Decorator to trace and combine layers using natural python notation.
Instead of having to figure out complicated combinator expressions, one can
simply write symbolic applications of layer objects to variables in a simple
python syntax and have it traced and 'compiled' into low-level combinators.
This decorator takes a simple function-based DSL description of layer
combinations and produces a layer construction function that can optionally
take keyword arguments to override configuration variables.
The DSL supports:
- applying layer objects to single variables or variable tuples
- unpacking the result of layer object application
- layer objects can also be created inside and used
@tl.symbolic
def new_trax_layer(a, b, c, config_var=True):
d, e = layer_objectA @ (a, b)
if config_var:
f = layer_objectB @ c
else:
other_layer = some_other_layer_constructor()
f = other_layer @ c
g = tl.Serial(layer_objectC, layer_objectD) @ (d, e, f, g)
return g, f, a
NOTE: the functions provided can have two kinds of arguments:
- positional: these name variables that will flow into the layer
- keyword arguments: these are -configuration- variables that will
not be traced, but can be given as kwargs to the layer constructor
function that this decorator produces.
The above creates a trax layer constructor that takes a single keyword
argument `config_var` producing a trax layer that takes three array
arguments and returns three arrays, e.g. we can call it like:
layer = new_trax_layer() # config_var = True
or:
tl.Serial(tl.Dense, new_trax_layer(config_var=False))
Note: for python2 compatibility, the '<<' operator can be used in
place of '@' as: d, e = layer_objectA << (a, b)
Args:
fn: any python function following the above tracing conventions for
describing dataflow between trax layers.
Returns:
Trax layer object implementing the dataflow between the trax layers
used in the provided function.
"""
fn_args, fn_kwargs = split_signature_parameters(fn)
n_args = len(fn_args)
if n_args == 0:
raise ValueError('Must have named positional arguments to trace.')
def traced_layer_constructor(*args, **kwargs):
"""Constructs trax layer."""
# Check and handle arguments.
if args:
raise ValueError('Layer constructor takes no positional arguments.')
extra_kwargs = set(kwargs).difference(set(fn_kwargs))
if extra_kwargs:
raise ValueError('Unknown layer constructor parameters: '
'%s' % extra_kwargs)
fn_kwargs.update(kwargs)
traced_fn = functools.partial(fn, **fn_kwargs)
# Trace through positional arguments.
tracers = [Tracer('in{}'.format(i)) for i in range(n_args)]
returned_tracers = traced_fn(*tracers)
# Transform traces back into ordered, simplified equations.
inputs = tuple('in{}'.format(i) for i in range(n_args))
eqns, outputs = traces_to_eqns(returned_tracers)
eqns = merge_output_tuples(eqns)
eqns = evaluation_order_sort(eqns, outputs)
# Compose the traced layer DAG with combinators.
return recombine(eqns, inputs, outputs)
return traced_layer_constructor
| 37.115108 | 110 | 0.6845 |
a11f1857d6a7a4044dcf266ed561f3c1f39ef179 | 2,867 | py | Python | open_spiel/python/examples/treeviz_example.py | alexminnaar/open_spiel | c17a390f8a007ccc309f76cb0cfa29f06dc5d2c9 | [
"Apache-2.0"
] | 2 | 2020-09-05T07:17:08.000Z | 2021-05-02T21:10:28.000Z | open_spiel/python/examples/treeviz_example.py | alexminnaar/open_spiel | c17a390f8a007ccc309f76cb0cfa29f06dc5d2c9 | [
"Apache-2.0"
] | 1 | 2020-03-11T14:56:18.000Z | 2020-03-12T15:42:07.000Z | open_spiel/python/examples/treeviz_example.py | alexminnaar/open_spiel | c17a390f8a007ccc309f76cb0cfa29f06dc5d2c9 | [
"Apache-2.0"
] | 1 | 2020-06-02T17:52:48.000Z | 2020-06-02T17:52:48.000Z | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Game tree visualization example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import pyspiel
from open_spiel.python.visualizations import treeviz
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_string("out", "/tmp/gametree.png", "Name of output file, e.g., "
"[*.png|*.pdf].")
flags.DEFINE_enum("prog", "dot", ["dot", "neato", "circo"], "Graphviz layout.")
flags.DEFINE_boolean("group_infosets", False, "Wether to group infosets.")
flags.DEFINE_boolean("group_terminal", False, "Wether to group terminal nodes.")
flags.DEFINE_boolean("verbose", False, "Wether to print verbose output.")
def _zero_sum_node_decorator(state):
"""Custom node decorator that only shows the return of the first player."""
attrs = treeviz.default_node_decorator(state) # get default attributes
if state.is_terminal():
attrs["label"] = str(int(state.returns()[0]))
return attrs
def main(argv):
del argv
game = pyspiel.load_game(FLAGS.game)
game_type = game.get_type()
if game_type.dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
logging.warn("%s is not turn-based. Trying to reload game as turn-based.",
FLAGS.game)
game = pyspiel.load_game_as_turn_based(FLAGS.game)
game_type = game.get_type()
if game_type.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("Game must be sequential, not {}".format(
game_type.dynamics))
if (game_type.utility == pyspiel.GameType.Utility.ZERO_SUM and
game.num_players() == 2):
logging.info("Game is zero-sum: only showing first-player's returns.")
gametree = treeviz.GameTree(
game,
node_decorator=_zero_sum_node_decorator,
group_infosets=FLAGS.group_infosets,
group_terminal=FLAGS.group_terminal)
else:
gametree = treeviz.GameTree(game) # use default decorators
if FLAGS.verbose:
logging.info("Game tree:\n%s", gametree.to_string())
gametree.draw(FLAGS.out, prog=FLAGS.prog)
logging.info("Game tree saved to file: %s", FLAGS.out)
if __name__ == "__main__":
app.run(main)
| 34.963415 | 80 | 0.726892 |
90459fe48378aa42348b5960d679d3f3a64fabd3 | 4,128 | bzl | Python | lib/write_source_files.bzl | alexeagle/bazel-lib | 02be4c5f47b476e686943f53ddd47a672a51a265 | [
"Apache-2.0"
] | 16 | 2021-11-08T15:03:49.000Z | 2022-03-23T19:38:32.000Z | lib/write_source_files.bzl | alexeagle/bazel-lib | 02be4c5f47b476e686943f53ddd47a672a51a265 | [
"Apache-2.0"
] | 29 | 2021-11-15T16:27:25.000Z | 2022-03-31T04:03:16.000Z | lib/write_source_files.bzl | alexeagle/bazel-lib | 02be4c5f47b476e686943f53ddd47a672a51a265 | [
"Apache-2.0"
] | 3 | 2021-12-09T00:27:42.000Z | 2022-03-30T20:14:00.000Z | "Public API for write_source_files"
load(
"//lib/private:write_source_file.bzl",
_write_source_file = "write_source_file",
)
def write_source_files(
name,
files = {},
additional_update_targets = [],
suggested_update_target = None,
diff_test = True,
**kwargs):
"""Write to one or more files or folders in the source tree. Stamp out tests that ensure the sources exist and are up to date.
Usage:
```starlark
load("@aspect_bazel_lib//lib:write_source_files.bzl", "write_source_files")
write_source_files(
name = "write_foobar",
files = {
"foobar.json": "//some/generated:file",
},
)
```
To update the source file, run:
```bash
bazel run //:write_foobar
```
A test will fail if the source file doesn't exist or if it's out of date with instructions on how to create/update it.
You can declare a tree of generated source file targets:
```starlark
load("@aspect_bazel_lib//lib:write_source_files.bzl", "write_source_files")
write_source_files(
name = "write_all",
additional_update_targets = [
# Other write_source_files targets to run when this target is run
"//a/b/c:write_foo",
"//a/b:write_bar",
]
)
```
And update them with a single run:
```bash
bazel run //:write_all
```
When a file is out of date, you can leave a suggestion to run a target further up in the tree by specifying `suggested_update_target`. E.g.,
```starlark
write_source_files(
name = "write_foo",
files = {
"foo.json": ":generated-foo",
},
suggested_update_target = "//:write_all"
)
```
A test failure from foo.json being out of date will yield the following message:
```
//a/b:c:foo.json is out of date. To update this and other generated files, run:
bazel run //:write_all
To update *only* this file, run:
bazel run //a/b/c:write_foo
```
If you have many sources that you want to update as a group, we recommend wrapping write_source_files in a macro that defaults `suggested_update_target` to the umbrella update target.
Args:
name: Name of the executable target that creates or updates the source file
files: A dict where the keys are source files or folders to write to and the values are labels pointing to the desired content.
Sources must be within the same bazel package as the target.
additional_update_targets: (Optional) List of other write_source_file or other executable updater targets to call in the same run
suggested_update_target: (Optional) Label of the write_source_file target to suggest running when files are out of date
diff_test: (Optional) Generate a test target to check that the source file(s) exist and are up to date with the generated files(s).
**kwargs: Other common named parameters such as `tags` or `visibility`
"""
single_update_target = len(files.keys()) == 1
update_targets = []
for i, pair in enumerate(files.items()):
out_file, in_file = pair
if single_update_target:
update_target_name = name
else:
update_target_name = "%s_%d" % (name, i)
update_targets.append(update_target_name)
# Runnable target that writes to the out file to the source tree
_write_source_file(
name = update_target_name,
in_file = in_file,
out_file = out_file,
additional_update_targets = additional_update_targets if single_update_target else [],
suggested_update_target = suggested_update_target,
diff_test = diff_test,
**kwargs
)
if not single_update_target:
_write_source_file(
name = name,
additional_update_targets = update_targets + additional_update_targets,
suggested_update_target = suggested_update_target,
diff_test = False,
**kwargs
)
| 33.290323 | 187 | 0.642684 |
601d193b80deddaf0d9aa115a7b51ace0bec1425 | 2,202 | py | Python | one_fm/accommodation/report/bed_space_detailed_report/bed_space_detailed_report.py | f-9t9it/One-FM | aa0ea31eff453a6788dcb78251928559de818dff | [
"MIT"
] | null | null | null | one_fm/accommodation/report/bed_space_detailed_report/bed_space_detailed_report.py | f-9t9it/One-FM | aa0ea31eff453a6788dcb78251928559de818dff | [
"MIT"
] | null | null | null | one_fm/accommodation/report/bed_space_detailed_report/bed_space_detailed_report.py | f-9t9it/One-FM | aa0ea31eff453a6788dcb78251928559de818dff | [
"MIT"
] | 1 | 2021-08-14T22:49:47.000Z | 2021-08-14T22:49:47.000Z | # Copyright (c) 2013, omar jaber and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = get_columns(), get_data(filters)
return columns, data
def get_columns():
return [
_("Code/Rerf") + ":Link/Accommodation:100",
_("Accommodation Name") + ":Data:180",
_("Gender") + ":Data:70",
_("Rooms") + ":Data:70",
_("Type A") + ":Data:70",
_("Type B") + ":Data:70",
_("Type C") + ":Data:70",
_("Type D") + ":Data:70",
_("Type E") + ":Data:70",
_("Total Bed") + ":Data:100",
_("Occupied Bed") + ":Data:120",
_("Vacant Bed") + ":Data:100"
]
def get_data(filters):
data=[]
acc_list=frappe.db.sql("""select * from `tabAccommodation`""",as_dict=1)
for acc in acc_list:
for gender in ["Male", "Female"]:
filters['accommodation'] = acc.name
filters['gender'] = gender
filters['disabled'] = 0
total_no_of_bed_space = frappe.db.count('Bed', filters)
totall_no_of_rooms = frappe.db.count('Accommodation Space',
{'accommodation': acc.name, 'bed_space_available': 1, 'gender': gender})
type_a = frappe.db.count('Bed', {'accommodation': acc.name, 'gender': gender, 'disabled': 0, 'bed_space_type': 'A'})
type_b = frappe.db.count('Bed', {'accommodation': acc.name, 'gender': gender, 'disabled': 0, 'bed_space_type': 'B'})
type_c = frappe.db.count('Bed', {'accommodation': acc.name, 'gender': gender, 'disabled': 0, 'bed_space_type': 'C'})
type_d = frappe.db.count('Bed', {'accommodation': acc.name, 'gender': gender, 'disabled': 0, 'bed_space_type': 'D'})
type_e = frappe.db.count('Bed', {'accommodation': acc.name, 'gender': gender, 'disabled': 0, 'bed_space_type': 'E'})
filters['status'] = 'Occupied'
occupied_bed = frappe.db.count('Bed', filters)
filters['status'] = 'Vacant'
vaccant_bed = frappe.db.count('Bed', filters)
filters.pop('status')
row = [
acc.name,
acc.accommodation,
gender,
totall_no_of_rooms,
type_a,
type_b,
type_c,
type_d,
type_e,
total_no_of_bed_space,
occupied_bed,
vaccant_bed
]
data.append(row)
return data
| 33.363636 | 119 | 0.640327 |
b89f25466e7fa44826548c5c5484f08ec7c059f9 | 791 | py | Python | eventio/urls.py | gittiap/Eventio | 050b68e762f53e4812c8acee4f73ff7e937c319e | [
"MIT"
] | null | null | null | eventio/urls.py | gittiap/Eventio | 050b68e762f53e4812c8acee4f73ff7e937c319e | [
"MIT"
] | null | null | null | eventio/urls.py | gittiap/Eventio | 050b68e762f53e4812c8acee4f73ff7e937c319e | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.views import login, logout
from accounts.views import register_user
from events.views import index, event_detail, event_add, event_edit, event_delete
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', index, name='home'),
url(r'^event/(?P<pk>\d+)/$', event_detail, name='event'),
url(r'^event/add/$', event_add, name='event_add'),
url(r'^event/(?P<pk>\d+)/delete/$', event_delete, name='event_delete'),
url(r'^event/(?P<pk>\d+)/edit/$', event_edit, name='event_edit'),
url(r'^login/$', login,
{'template_name': 'accounts/login.html'}, name='login'),
url(r'^logout/$', logout, name='logout'),
url(r'^sign-up/$', register_user, name='signup'),
]
| 39.55 | 81 | 0.65866 |
3b22949e61c7298b38bdd7b5b5d80787cbe517e3 | 2,874 | py | Python | unification/more.py | james1293/unification | 99037f265df2e5af495611261ec5cacb707c1815 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T18:44:22.000Z | 2020-05-05T18:44:22.000Z | unification/more.py | james1293/unification | 99037f265df2e5af495611261ec5cacb707c1815 | [
"BSD-3-Clause"
] | 3 | 2021-03-19T02:52:37.000Z | 2021-09-22T18:58:23.000Z | unification/more.py | james1293/unification | 99037f265df2e5af495611261ec5cacb707c1815 | [
"BSD-3-Clause"
] | null | null | null | from functools import partial
from .core import unify, reify
from .variable import var
from .dispatch import dispatch
def unifiable(cls):
""" Register standard unify and reify operations on class
This uses the type and __dict__ or __slots__ attributes to define the
nature of the term
See Also:
>>> class A(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
>>> unifiable(A)
<class 'unification.more.A'>
>>> x = var('x')
>>> a = A(1, 2)
>>> b = A(1, x)
>>> unify(a, b, {})
{~x: 2}
"""
_unify.add((cls, cls, dict), unify_object)
_reify.add((cls, dict), reify_object)
return cls
#########
# Reify #
#########
def reify_object(o, s):
""" Reify a Python object with a substitution
>>> class Foo(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
... def __str__(self):
... return "Foo(%s, %s)"%(str(self.a), str(self.b))
>>> x = var('x')
>>> f = Foo(1, x)
>>> print(f)
Foo(1, ~x)
>>> print(reify_object(f, {x: 2}))
Foo(1, 2)
"""
if hasattr(o, '__slots__'):
return _reify_object_slots(o, s)
else:
return _reify_object_dict(o, s)
def _reify_object_dict(o, s):
obj = object.__new__(type(o))
d = reify(o.__dict__, s)
if d == o.__dict__:
return o
obj.__dict__.update(d)
return obj
def _reify_object_slots(o, s):
attrs = [getattr(o, attr) for attr in o.__slots__]
new_attrs = reify(attrs, s)
if attrs == new_attrs:
return o
else:
newobj = object.__new__(type(o))
for slot, attr in zip(o.__slots__, new_attrs):
setattr(newobj, slot, attr)
return newobj
@dispatch(slice, dict)
def _reify(o, s):
""" Reify a Python ``slice`` object """
return slice(*reify((o.start, o.stop, o.step), s))
#########
# Unify #
#########
def unify_object(u, v, s):
""" Unify two Python objects
Unifies their type and ``__dict__`` attributes
>>> class Foo(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
... def __str__(self):
... return "Foo(%s, %s)"%(str(self.a), str(self.b))
>>> x = var('x')
>>> f = Foo(1, x)
>>> g = Foo(1, 2)
>>> unify_object(f, g, {})
{~x: 2}
"""
if type(u) != type(v):
return False
if hasattr(u, '__slots__'):
return unify([getattr(u, slot) for slot in u.__slots__],
[getattr(v, slot) for slot in v.__slots__],
s)
else:
return unify(u.__dict__, v.__dict__, s)
@dispatch(slice, slice, dict)
def _unify(u, v, s):
""" Unify a Python ``slice`` object """
return unify((u.start, u.stop, u.step), (v.start, v.stop, v.step), s)
| 22.809524 | 73 | 0.522617 |
dd6eb5257c4c750d2e469194aab0d03b473cbcca | 14,735 | py | Python | model/resnet_v2.py | FangShancheng/conv-ensemble-str | 86029cb059a904ad3a6182e2f0855f0263e34d0b | [
"Apache-2.0"
] | 51 | 2018-04-02T13:10:03.000Z | 2021-11-22T13:24:21.000Z | model/resnet_v2.py | snny000/conv-ensemble-str | 86029cb059a904ad3a6182e2f0855f0263e34d0b | [
"Apache-2.0"
] | 5 | 2018-11-15T01:28:57.000Z | 2020-02-07T07:31:43.000Z | model/resnet_v2.py | snny000/conv-ensemble-str | 86029cb059a904ad3a6182e2f0855f0263e34d0b | [
"Apache-2.0"
] | 5 | 2018-10-29T06:59:52.000Z | 2020-06-23T07:02:02.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Typical use:
from tensorflow.contrib.slim.python.slim.nets import
resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
resnet_arg_scope = resnet_utils.resnet_arg_scope
@add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with variable_scope.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = layers.batch_norm(
inputs, activation_fn=nn_ops.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = layers_lib.conv2d(
preact,
depth, [1, 1],
stride=stride,
normalizer_fn=None,
activation_fn=None,
scope='shortcut')
residual = layers_lib.conv2d(
preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = resnet_utils.conv2d_same(
residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
residual = layers_lib.conv2d(
residual,
depth, [1, 1],
stride=1,
normalizer_fn=None,
activation_fn=None,
scope='conv3')
output = shortcut + residual
return utils.collect_named_outputs(outputs_collections, sc.name, output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=None,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether is training or not. If None, the value inherited from
the resnet_arg_scope is used. Specifying value None is deprecated.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with variable_scope.variable_scope(
scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with arg_scope(
[layers_lib.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
if is_training is not None:
bn_scope = arg_scope([layers.batch_norm], is_training=is_training)
else:
bn_scope = arg_scope([])
with bn_scope:
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with arg_scope(
[layers_lib.conv2d], activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = layers.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = layers.batch_norm(
net, activation_fn=None, scope='postnorm')
if global_pool:
# Global average pooling.
net = math_ops.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
if num_classes is not None:
net = layers_lib.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='logits')
# Convert end_points_collection into a dictionary of end_points.
end_points = utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = layers.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v2_50(inputs,
num_classes=None,
is_training=None,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_101(inputs,
num_classes=None,
global_pool=True,
is_training=None,
output_stride=None,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_152(inputs,
num_classes=None,
is_training=None,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_200(inputs,
num_classes=None,
is_training=None,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
| 39.716981 | 80 | 0.669766 |
49177360a21809264a1a088f1f92017d27812e0a | 2,092 | py | Python | workspace/src/rqt_common_plugins/rqt_shell/src/rqt_shell/shell_widget.py | migarstka/barc | deacfd974f251693d74b273d58d22e9fead2354f | [
"MIT"
] | 1 | 2019-01-10T22:07:07.000Z | 2019-01-10T22:07:07.000Z | workspace/src/rqt_common_plugins/rqt_shell/src/rqt_shell/shell_widget.py | migarstka/barc | deacfd974f251693d74b273d58d22e9fead2354f | [
"MIT"
] | null | null | null | workspace/src/rqt_common_plugins/rqt_shell/src/rqt_shell/shell_widget.py | migarstka/barc | deacfd974f251693d74b273d58d22e9fead2354f | [
"MIT"
] | null | null | null | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Dorian Scholz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import rospkg
from python_qt_binding import loadUi
from python_qt_binding.QtGui import QWidget
import shell_text_edit
class ShellWidget(QWidget):
def __init__(self, parent=None):
super(ShellWidget, self).__init__(parent=parent)
rp = rospkg.RosPack()
ui_file = os.path.join(rp.get_path('rqt_shell'), 'resource', 'shell_widget.ui')
loadUi(ui_file, self, {'ShellTextEdit': shell_text_edit.ShellTextEdit})
self.setObjectName('ShellWidget')
| 43.583333 | 87 | 0.764818 |
d7cd8b96b5be5ee526d2445ee07271aa48237071 | 10,403 | py | Python | balff_createDataArray_sim.py | LBJ-Wade/BALFF | 2e53b562c347ad3875e4ebb49338c15e116b5f7b | [
"MIT"
] | 4 | 2016-04-29T17:46:36.000Z | 2019-01-07T16:52:03.000Z | balff_createDataArray_sim.py | LBJ-Wade/BALFF | 2e53b562c347ad3875e4ebb49338c15e116b5f7b | [
"MIT"
] | null | null | null | balff_createDataArray_sim.py | LBJ-Wade/BALFF | 2e53b562c347ad3875e4ebb49338c15e116b5f7b | [
"MIT"
] | 1 | 2016-04-29T17:46:48.000Z | 2016-04-29T17:46:48.000Z | #!/usr/bin/env python2.7
#+
#----------------------------
# NAME
#----------------------------
# balff_createDataArray_sim.py
#----------------------------
# PURPOSE/DESCRIPTION
#----------------------------
# Creating a simulated data array needed as input when creating a balff_mpd class object.
#
#----------------------------
# INPUTS:
#----------------------------
# probdist : Probability distribution to draw data from. Choices are:
# 'gamma' (NOTE: there is no cutoff on this distribution, i.e. integrals converge)
# 'schechter'
# 'normal'
# Ntotal : Number of objects to create data for. For probdist=Schechter sample goes down to
# Lmin = 1e-4 x 1e44 erg/s (corresponding to M=-10)
# Llim : Luminoisity limit to apply to objects resulting from detection
# probability cut (0.1e44 erg/s -> mAB ~ 29.1)
# kval : Shape parameter k of distribution (k>0) to simulate data for
# k = alpha(schecheter) + 1
# Lstar : Scale parameter Lstar of distribution (Lstar>0) units [1e44 erg/s] to simulate data for
#----------------------------
# OPTIONAL INPUTS:
#----------------------------
# --nochoice : set this keyword to ignore the choice to write file to disk (i.e. no interaction required)
# --verbose : Toggle verbosity
# --help : Printing help menu
#----------------------------
# EXAMPLES/USAGE
#----------------------------
# samples with full depth
# bash> ./balff_createDataArray_sim.py 'schechter' 10000 0.01 -0.9 0.5 --verbose
#
#----------------------------
# MODULES
#----------------------------
import argparse
import sys
import pdb
import balff_createDataArray as bcd
import numpy as np
#-------------------------------------------------------------------------------------------------------------
# Managing arguments with argparse (see http://docs.python.org/howto/argparse.html)
parser = argparse.ArgumentParser()
# ---- required arguments ---- :
parser.add_argument("probdist", type=str, help="Probability distribution to draw data from")
parser.add_argument("Ntotal", type=int, help="Total number of objects to draw (from Schechter down to L 1e-4 x 1e44 erg/s)")
parser.add_argument("Llim", type=float, help="luminosity limit to select objects down to.")
parser.add_argument("kval", type=float, help="Shape parameter k of distribution (k = alpha+1 > -1)")
parser.add_argument("Lstar", type=float, help="Scale parameters Lstar of distribution (Lstar > 0) units [1e44 erg/s]")
# ---- optional arguments ----
parser.add_argument("--nochoice", action="store_true", help="Set to ignore choice of saving output to disk")
parser.add_argument("-v", "--verbose", action="store_true", help="Print verbose comments")
args = parser.parse_args()
#-------------------------------------------------------------------------------------------------------------
def simulate_schechter_distribution(alpha, L_star, L_min, N):
"""
Generate N samples from a Schechter distribution, which is like a gamma distribution
but with a negative alpha parameter and a cut off above zero so that it converges.
Based on algorithm in http://www.math.leidenuniv.nl/~gill/teaching/astro/stanSchechter.pdf
KBS:-------------------------------------------------------------------------------------
Code taken from https://gist.github.com/joezuntz/5056136 and modified.
Schechter distribution with -1 < alpha+1 (k) < -0
-------------------------------------------------------------------------------------
"""
output = []
n = 0
while n<N:
Lgam = np.random.gamma(scale=L_star, shape=alpha+2, size=N) # drawing values from gamma dist with k+1
Lcut = Lgam[Lgam>L_min] # removing L values from gamma dist > L_min
ucut = np.random.uniform(size=Lcut.size) # random values [0:1]
Lval = Lcut[ucut<L_min/Lcut] # only keeping L values where ucut < L_min/L
output.append(Lval) # append these to output array
n+=Lval.size # increase counter
values = np.concatenate(output)[:N] # generate output by reformatting
return values
#-------------------------------------------------------------------------------------------------------------
if args.verbose:
print ' '
print ':: '+sys.argv[0]+' :: -- START OF PROGRAM -- '
print ' '
#-------------------------------------------------------------------------------------------------------------
# Drawing full sample of objects
if args.verbose: print ' - Start drawing full sample from distribution'
if args.probdist == 'gamma':
Ldraw_total = np.random.gamma(args.kval,args.Lstar,args.Ntotal)
elif args.probdist == 'schechter':
L_min = 1.0e-4 # Lower limit on L to enforce convergence;
Ldraw_total = simulate_schechter_distribution(args.kval-1.0,args.Lstar,L_min,args.Ntotal)
elif args.probdist == 'normal':
Ldraw_total = np.random.normal(args.Lstar,args.kval,args.Ntotal)
else:
sys.exit(args.probdist+' is not a valid probability distribution to draw from --> ABORTING')
#-------------------------------------------------------------------------------------------------------------
# add measurement errors
if args.verbose: print ' - Adding measurement errors to drawn sample'
Msun = [5.61,5.48,4.83,4.42,4.08,3.64,3.32,3.28] # (rest-frame) abs Mag_sun in U,B,V,R,I,J,H,K (www.ucolick.org/~cnaw/sun.html)
Lsun = 3.839e-11 # 1e44 erg/
measurementerr = 0.2 # approximate 1sigma measurement error in J
Jmagerr = np.random.normal(0.0,measurementerr,args.Ntotal) # errors to add
Lerr = Ldraw_total*np.log(10)/2.5*Jmagerr # Error in luminosity: see notes from LT130408
Ldraw_toterr = Ldraw_total+Lerr # adding measurement error
Lerrabs = np.abs(Lerr)
#-------------------------------------------------------------------------------------------------------------
# Select objects above detection limit
if args.verbose: print ' - Applying detection threshold to sample'
MJlim = -2.5*np.log10(args.Llim/Lsun)+Msun[1]+ 47.14 # apparent mag (47.14 term incl k-correction & reddening)
Llimcheck = 10**( (Msun[1]+47.14-MJlim)/2.5 ) * Lsun
Llimerr = args.Llim * np.log(10)/2.5*0.2 # uncertainty on detection limit
Ldraw = Ldraw_toterr[Ldraw_toterr > args.Llim]
Lerrabs = Lerrabs[Ldraw_toterr > args.Llim]
Nobj = len(Ldraw) # Counting number of objects above detection limit
#-------------------------------------------------------------------------------------------------------------
if args.nochoice:
ncstring = '_nochoice'
else: # ask if the results should be saved
input = raw_input(" - Selected a final sample of Nobj = "+str(Nobj)+
" \n Should I write that sample to disk? (y/n): ")
if (input == 'y') or (input == 'yes'):
print " Okay, good, then I'll continue\n"
elif (input == 'n') or (input == 'no'):
sys.exit(" Not satisfied?... okay then I'll abort\n")
else:
sys.exit(' "'+input+'" is not a valid answer --> Aborting \n')
#-------------------------------------------------------------------------------------------------------------
# define string with output name
outbase = './balff_data/dataarraySim_pdist'+args.probdist+\
'_Ntot'+str(args.Ntotal)+\
'_k'+str(args.kval).replace('.','p')+\
'_Lstar'+str(args.Lstar).replace('.','p')+\
'_Llim'+str(args.Llim).replace('.','p')+\
'_Nobj'+str(Nobj)
if args.nochoice: outbase = outbase+ncstring
outputfile = outbase+'.fits'
#-------------------------------------------------------------------------------------------------------------
plotsamples = 1
if args.nochoice: plotsamples = 0 # ignore plotting when nochoice is set
if plotsamples == 1:
plotname = outbase+'.pdf'
import pylab as plt
Fsize = 10
plt.rc('text', usetex=True) # enabling LaTex rendering of text
plt.rc('font', family='serif',size=Fsize) # setting text font
plt.rc('xtick', labelsize=Fsize)
plt.rc('ytick', labelsize=Fsize)
hist = plt.hist(np.log10(Ldraw_total),bins=100,color='0.3',
label='Total sample drawn from dist: k='+str(args.kval)+', L*/[1e44erg/s]='+str(args.Lstar))
plt.hist(np.log10(Ldraw_toterr),bins=hist[1],color='b',alpha=0.5,
label='Total sample added measurement errors')
plt.hist(np.log10(Ldraw),bins=hist[1],color='r',alpha=0.5,
label='Observed sample down to Llim='+str(args.Llim)+' (Mlim$\sim$'+str("%.2f" % MJlim)+')')
plt.xlabel('log(L/[1e44erg/s])')
plt.ylabel('\#')
leg = plt.legend(fancybox=True, loc='upper right',numpoints=1)
leg.get_frame().set_alpha(0.6)
if args.verbose: print ' - Writing sample plot to ',plotname
plt.savefig(plotname)
#-------------------------------------------------------------------------------------------------------------
# Defining/creating data lists
if args.verbose: print ' - Creating dictionary with simulated data '
Nfields = 1
fieldsize = Nobj/(Nfields+0.0)
number = 0 # resetting numbering
OBJname = []
field = []
for ii in range(Nobj):
if ii/fieldsize == round(ii/fieldsize):
number = number+1
if args.verbose: print ' - Writing data for field number ',number
OBJname.append('Obj'+format(ii+1, "05d")+'_FIELD'+str(number))
field.append('FIELD'+str(number))
L = Ldraw
dL = Lerrabs
Llim = [args.Llim for N in range(Nobj)]
dLlim = [Llimerr for N in range(Nobj)]
#-------------------------------------------------------------------------------------------------------------
# writing output to fits table
bcd.write_fitsfile(OBJname,field,L,dL,Llim,dLlim,1,outputname=outputfile,verbose=True)
#-------------------------------------------------------------------------------------------------------------
if args.verbose:
print ' '
print ':: '+sys.argv[0]+' :: -- END OF PROGRAM -- '
print ' '
#-------------------------------------------------------------------------------------------------------------
| 52.276382 | 138 | 0.518408 |
c8d8e372ac13799c3679038fd25724630ec7bc44 | 515 | py | Python | learn_elasticsearch/tests/__init__.py | MacHu-GWU/learn_elasticsearch-project | c655aab80417b9c053e42921c6506600210e1e87 | [
"MIT"
] | null | null | null | learn_elasticsearch/tests/__init__.py | MacHu-GWU/learn_elasticsearch-project | c655aab80417b9c053e42921c6506600210e1e87 | [
"MIT"
] | null | null | null | learn_elasticsearch/tests/__init__.py | MacHu-GWU/learn_elasticsearch-project | c655aab80417b9c053e42921c6506600210e1e87 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from opensearchpy import OpenSearch
from ..os_domain import (
boto_ses,
es_sanhe_dev,
)
def create_index(es: OpenSearch, index: str, body: dict = None):
# create if exists
es.indices.create(index=index, ignore=400, body=body)
def delete_index(es: OpenSearch, index: str):
# delete if exists
es.indices.delete(index=index, ignore=[400, 404])
def reset_index(es: OpenSearch, index: str, body: dict):
delete_index(es, index)
create_index(es, index, body)
| 22.391304 | 64 | 0.687379 |
4e8bacc27ad22aa20ab7bcb0c5a4e9d070e33888 | 6,317 | py | Python | lzhw_cli/lzhw_cli.py | MNoorFawi/lzhw | 97beb8ad13e3fdf09358ce3d28084fa03389283a | [
"MIT"
] | 4 | 2020-05-29T02:40:04.000Z | 2021-04-08T17:35:58.000Z | lzhw_cli/lzhw_cli.py | MNoorFawi/lzhw | 97beb8ad13e3fdf09358ce3d28084fa03389283a | [
"MIT"
] | 1 | 2020-05-29T02:34:51.000Z | 2020-05-29T06:24:11.000Z | lzhw_cli/lzhw_cli.py | MNoorFawi/lzhw | 97beb8ad13e3fdf09358ce3d28084fa03389283a | [
"MIT"
] | 4 | 2020-05-29T02:32:14.000Z | 2022-01-14T15:55:02.000Z | #!/usr/bin/env python
import lzhw
import pandas as pd
import argparse
import os
from subprocess import call
from time import time
import multiprocessing
def main():
## This script and the solution to convert xlsx into csv was thanks to the answer found here:
## https://stackoverflow.com/questions/28766133/faster-way-to-read-excel-files-to-pandas-dataframe
## and here: https://stackoverflow.com/questions/1858195/convert-xls-to-csv-on-command-line
vbscript = """if WScript.Arguments.Count < 3 Then
WScript.Echo "Please specify the source and the destination files. Usage: ExcelToCsv <xls/xlsx source file> <csv destination file> <worksheet number (starts at 1)>"
Wscript.Quit
End If
csv_format = 6
Set objFSO = CreateObject("Scripting.FileSystemObject")
src_file = objFSO.GetAbsolutePathName(Wscript.Arguments.Item(0))
dest_file = objFSO.GetAbsolutePathName(WScript.Arguments.Item(1))
worksheet_number = CInt(WScript.Arguments.Item(2))
Dim oExcel
Set oExcel = CreateObject("Excel.Application")
Dim oBook
Set oBook = oExcel.Workbooks.Open(src_file)
oBook.Worksheets(worksheet_number).Activate
oBook.SaveAs dest_file, csv_format
oBook.Close False
oExcel.Quit
"""
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def csv_reader(file, cols, col_arg, nh_arg):
if nh_arg:
h = None
else:
h = 0
if col_arg:
cols_used = cols.split(",")
if is_number(cols_used[0]):
cols_used = [int(i) - 1 for i in cols_used]
else:
cols_used = None
data = pd.read_csv(file, header=h, usecols=cols_used)
data.columns = list(map(str, data.columns))
return data
parser = argparse.ArgumentParser(
description="LZHW is a tabular data compression tool. It is used to compress excel, csv and any flat file. Version: 0.0.10")
parser.add_argument("-d", "--decompress", help="decompress input into output",
action="store_true", default=False)
parser.add_argument("-f", "--input", help="input file to be (de)compressed",
type=str, required=True)
parser.add_argument("-o", "--output", help="output where to save result",
type=str, required=True)
parser.add_argument("-c", "--columns", nargs="+",
help="select specific columns by names or indices (1-based) to compress or decompress",
type=str,
required=False)
parser.add_argument("-r", "--rows",
help="select specific rows to decompress (1-based)", type=str,
required=False)
parser.add_argument("-nh", "--no-header", help="skip header / data to be compressed has no header",
action="store_true", default=False)
parser.add_argument("-p", "--parallel", help="compress or decompress in parallel",
action="store_true", default=False)
parser.add_argument("-j", "--jobs", help="Number of CPUs to use if parallel (default all but 2)",
type=str, required=False, default="-3")
args = vars(parser.parse_args())
file = args["input"]
output = args["output"]
para = args["parallel"]
n_jobs = args["jobs"]
if args["columns"]:
cols = args["columns"][0]
else:
cols = "all"
if args["rows"]:
n_rows = int(args["rows"])
else:
n_rows = 0
if args["decompress"]:
start = time()
if cols != "all":
cols = cols.split(",")
if is_number(cols[0]):
cols = [int(i) - 1 for i in cols]
if para:
decompressed = lzhw.decompress_df_from_file(file, cols, n_rows,
parallel = para, n_jobs = int(n_jobs))
else:
decompressed = lzhw.decompress_df_from_file(file, cols, n_rows)
decompressed.fillna("", inplace=True)
decompressed = decompressed.replace("nan", "", regex=True)
if "xls" in output:
options = {}
options["strings_to_formulas"] = False
options["strings_to_urls"] = False
writer = pd.ExcelWriter(output, engine="xlsxwriter", options=options)
decompressed.to_excel(writer, output.split(".xls")[0], index=False)
writer.save()
if "csv" in output:
decompressed.to_csv(output, index=False)
else:
with open(output, "w") as o:
decompressed.to_string(o, index=False)
print("Finalizing Decompression ...")
print(f"Creating {output} file ...")
print("time taken: ", (time() - start) / 60, " minutes")
print("Decompressed Successfully")
else:
start = time()
if "xls" in file:
print("Reading files, Can take 1 minute or something ...",
"\nRunning CScript.exe to convert xls file to csv for better performance", "\n")
f = open("excel_to_csv.vbs", "w")
f.write(vbscript)
f.close()
csv_file = file.split(".xls")[0] + "1" + ".csv"
call(["cscript.exe", "excel_to_csv.vbs", file, csv_file, "1"])
os.remove("excel_to_csv.vbs")
data = csv_reader(csv_file, cols, args["columns"], args["no_header"])
os.remove(csv_file)
elif "csv" in file:
print("Reading files ...")
data = csv_reader(file, cols, args["columns"], args["no_header"])
else:
with open(file, "r") as i:
data = i.read()
if para:
comp_df = lzhw.CompressedDF(data, parallel = para, n_jobs = int(n_jobs))
else:
comp_df = lzhw.CompressedDF(data)
print("Finalizing Compression ...")
comp_df.save_to_file(output)
print(f"Creating {output} file ...")
print("time taken: ", (time() - start) / 60, " minutes")
print("Compressed Successfully")
if __name__ == "__main__":
multiprocessing.freeze_support()
multiprocessing.Process(target=main).start()
| 36.514451 | 172 | 0.579389 |
77588b9bc104ced16f40073cff8b32bb7c5d97b0 | 2,009 | bzl | Python | tools/objc.bzl | erka/tink | 78b908124efbb34acc4de3ae11b41112cb41adb2 | [
"Apache-2.0"
] | 1 | 2019-11-05T10:58:12.000Z | 2019-11-05T10:58:12.000Z | tools/objc.bzl | bollwarm/tink | e699fe174212ba5a1d43d4681a251245decdaeff | [
"Apache-2.0"
] | 1 | 2020-11-11T16:45:00.000Z | 2020-11-11T16:45:00.000Z | tools/objc.bzl | bollwarm/tink | e699fe174212ba5a1d43d4681a251245decdaeff | [
"Apache-2.0"
] | 2 | 2020-02-03T13:52:11.000Z | 2021-06-20T00:58:09.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compiles protobuf for ObjC.
This tool uses https://github.com/pubref/rules_protobuf.
"""
# The actual rule which does the filtering.
def _do_filter_impl(ctx):
return struct(
files = depset([f for f in ctx.files.srcs if f.path.endswith(ctx.attr.suffix)]),
)
_do_filter = rule(
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = True,
),
"suffix": attr.string(
mandatory = True,
),
},
implementation = _do_filter_impl,
)
# A convenient macro to wrap the custom rule and objc_library.
def tink_objc_proto_library(name, srcs, **kwargs):
"""
Compiles ObjC proto libaries in srcs into a single library.
Args:
name: the name of the output library
srcs: the list of ObjC proto libraries, which are generated using
objc_proto_compile in rules_protobuf.
"""
_do_filter(
name = "%s_hdrs" % name,
visibility = ["//visibility:private"],
# srcs = hdrs,
srcs = srcs,
suffix = ".pbobjc.h",
)
_do_filter(
name = "%s_srcs" % name,
visibility = ["//visibility:private"],
srcs = srcs,
suffix = ".pbobjc.m",
)
native.objc_library(
name = name,
srcs = [":%s_srcs" % name],
hdrs = [":%s_hdrs" % name],
copts = ["-fno-objc-arc"],
deps = ["@com_google_protobuf//:objectivec"],
**kwargs
)
| 29.115942 | 88 | 0.623196 |
dd54e4e55acc7f7a400c2cfb309f035ce1bfc330 | 2,564 | py | Python | zs/consts.py | lqvito/zs | c80e06a6905879b76a473512261e44d23bea7e99 | [
"MIT"
] | 14 | 2020-05-08T07:27:21.000Z | 2021-08-04T09:12:17.000Z | zs/consts.py | wrhsd1/zs | 5176616427411b61f9252567445e7ed47fc35b2c | [
"MIT"
] | 1 | 2021-12-27T05:10:37.000Z | 2021-12-28T00:38:33.000Z | zs/consts.py | wrhsd1/zs | 5176616427411b61f9252567445e7ed47fc35b2c | [
"MIT"
] | 3 | 2021-07-20T12:57:39.000Z | 2021-12-26T12:47:45.000Z | import os
CONFIG_DIR = os.path.join(os.environ.get('HOME'), '.zs', 'config')
DATA_DIR = os.path.join(os.environ.get('HOME'), '.zs', 'data')
README_TEMPLATE = """{name}
=======
Support Python{version} or later
## Install
```shell
python setup.py install
```
## Develop
Create virtualenv and install dependencies:
```shell
make venv && make deps
```
Unit testing
```shell
make test
```
"""
SETUP_FILE_TEMPLATE = """#!/usr/bin/env python
# coding: utf-8
from setuptools import setup, find_packages
VERSION = '0.1.0'
REQS = []
setup(
name='{name}',
version=VERSION,
description='',
license='MIT',
packages=find_packages(),
install_requires=REQS,
include_package_data=True,
zip_safe=False,
)
"""
MAKEFILE_TEMPLATE = """lint: clean
\tflake8 {name} --format=pylint || true
test: lint
\tpy.test -vvv --cov {name} --cov-report term-missing --cov-report xml:cobertura.xml --junitxml=testresult.xml tests
clean:
\t- find . -iname "*__pycache__" | xargs rm -rf
\t- find . -iname "*.pyc" | xargs rm -rf
\t- rm cobertura.xml -f
\t- rm testresult.xml -f
\t- rm .coverage -f
\t- rm .pytest_cache/ -rf
venv:
\t- virtualenv --python=$(shell which python{version}) --prompt '<venv:{name}>' venv
lock-requirements:
\t- pip install pip-tools
\t- pip-compile --output-file requirements.txt requirements.in
deps:
\t- pip install -U pip setuptools
\t- pip install -r requirements.txt
""" # noqa
SETUP_CFG = """[flake8]
max-line-length = 100
ignore = E201,E202
[pep8]
max-line-length = 100
ignore = E201,E202
"""
TEST_FILE_TEMPLATE = """import os
import sys
import pytest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from {name} import *
def test_init():
assert True == True
"""
IGNORE_FILE_TEMPLATE = """# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
# pyenv
.python-version
# dotenv
.env
# virtualenv
env/
venv/
ENV/
# Spyder project settings
.spyderproject
"""
| 14.994152 | 116 | 0.682527 |
4663708007179f69825032e17932957d4da39a10 | 11,743 | py | Python | intersight/model/macpool_id_block_response.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/macpool_id_block_response.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/macpool_id_block_response.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.macpool_id_block_list import MacpoolIdBlockList
from intersight.model.mo_aggregate_transform import MoAggregateTransform
from intersight.model.mo_document_count import MoDocumentCount
from intersight.model.mo_tag_key_summary import MoTagKeySummary
from intersight.model.mo_tag_summary import MoTagSummary
globals()['MacpoolIdBlockList'] = MacpoolIdBlockList
globals()['MoAggregateTransform'] = MoAggregateTransform
globals()['MoDocumentCount'] = MoDocumentCount
globals()['MoTagKeySummary'] = MoTagKeySummary
globals()['MoTagSummary'] = MoTagSummary
class MacpoolIdBlockResponse(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'object_type': (str,), # noqa: E501
'count': (int,), # noqa: E501
'results': ([MoTagKeySummary], none_type,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'macpool.IdBlock.List': MacpoolIdBlockList,
'mo.AggregateTransform': MoAggregateTransform,
'mo.DocumentCount': MoDocumentCount,
'mo.TagSummary': MoTagSummary,
}
if not val:
return None
return {'object_type': val}
attribute_map = {
'object_type': 'ObjectType', # noqa: E501
'count': 'Count', # noqa: E501
'results': 'Results', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, object_type, *args, **kwargs): # noqa: E501
"""MacpoolIdBlockResponse - a model defined in OpenAPI
Args:
object_type (str): A discriminator value to disambiguate the schema of a HTTP GET response body.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
count (int): The total number of 'macpool.IdBlock' resources matching the request, accross all pages. The 'Count' attribute is included when the HTTP GET request includes the '$inlinecount' parameter.. [optional] # noqa: E501
results ([MoTagKeySummary], none_type): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
],
'oneOf': [
MacpoolIdBlockList,
MoAggregateTransform,
MoDocumentCount,
MoTagSummary,
],
}
| 46.972 | 1,678 | 0.63689 |
d24cb8ed0055acb27eeb8f2a83ac13bb3f4f98d7 | 1,482 | py | Python | algorithms/sets/longest_common_subsequence/lcs.py | maurobaraldi/python-algorithms | 94fe5e46737d6ea2f7ff62b5b61b7b9cca3fbd14 | [
"MIT"
] | 2 | 2018-11-05T17:12:40.000Z | 2019-09-05T17:10:01.000Z | algorithms/sets/longest_common_subsequence/lcs.py | maurobaraldi/python-algorithms | 94fe5e46737d6ea2f7ff62b5b61b7b9cca3fbd14 | [
"MIT"
] | null | null | null | algorithms/sets/longest_common_subsequence/lcs.py | maurobaraldi/python-algorithms | 94fe5e46737d6ea2f7ff62b5b61b7b9cca3fbd14 | [
"MIT"
] | 1 | 2019-01-02T19:07:51.000Z | 2019-01-02T19:07:51.000Z | #!/usr/bin/env python
def lcs(listX, listY):
"""Dynamic programing version."""
if not listX or not listY:
return ""
matix = []
for r in range(len(listX) + 1):
row = []
for c in range(len(listY) + 1):
row.append(0)
matix.append(row)
# list comprehension version of matix construction
# matix = [[0 for j in range(len(listY) + 1)] for i in range(len(listX) + 1)]
# row 0 and column 0 are initialized to 0 already
for i, x in enumerate(listX):
for j, y in enumerate(listY):
if x == y:
matix[i + 1][j + 1] = matix[i][j] + 1
else:
matix[i + 1][j + 1] = max(matix[i + 1][j], matix[i][j + 1])
# read the substring out from the matrix
result = ""
x, y = len(listX), len(listY)
while x != 0 and y != 0:
if matix[x][y] == matix[x - 1][y]:
x -= 1
elif matix[x][y] == matix[x][y - 1]:
y -= 1
else:
assert listX[x - 1] == listY[y - 1]
result = listX[x - 1] + result
x -= 1
y -= 1
return result
def lcs_recursive(listX, listY):
"""Recursive and optimized solution."""
if not listX or not listY:
return ""
x, xs, y, ys = listX[0], listX[1:], listY[0], listY[1:]
if x == y:
return x + lcs_recursive(xs, ys)
else:
return max(lcs_recursive(listX, ys), lcs_recursive(xs, listY), key=len)
| 26 | 81 | 0.499325 |
04ecb3ffad997b607823b8a10a5f879ddc47b4a0 | 82 | py | Python | lm/pytorch-lm/rnn/__init__.py | DeepLearnXMU/ATR | 6f0e4009003eff5db82c9cb537f010720a9bed5c | [
"BSD-3-Clause"
] | 9 | 2020-07-20T15:58:44.000Z | 2022-02-04T16:22:03.000Z | lm/pytorch-lm/rnn/__init__.py | DeepLearnXMU/ATR | 6f0e4009003eff5db82c9cb537f010720a9bed5c | [
"BSD-3-Clause"
] | null | null | null | lm/pytorch-lm/rnn/__init__.py | DeepLearnXMU/ATR | 6f0e4009003eff5db82c9cb537f010720a9bed5c | [
"BSD-3-Clause"
] | 1 | 2018-12-10T12:31:15.000Z | 2018-12-10T12:31:15.000Z | # coding: utf-8
from .lstm import LSTM
from .gru import GRU
from .atr import ATR
| 13.666667 | 22 | 0.731707 |
9c764fd6672244a6cef693600913885ab1f0282e | 29 | py | Python | app/tasks/docker/command.py | Clivern/Kraven | 5d8d2de26e170d853d7d5f2b1f2d453ab07e4401 | [
"Apache-2.0"
] | 3 | 2018-07-22T22:36:09.000Z | 2019-05-31T10:29:54.000Z | app/tasks/docker/command.py | Clivern/Kraven | 5d8d2de26e170d853d7d5f2b1f2d453ab07e4401 | [
"Apache-2.0"
] | 41 | 2018-07-22T22:07:52.000Z | 2018-11-14T11:07:48.000Z | app/tasks/docker/command.py | Clivern/Kraven | 5d8d2de26e170d853d7d5f2b1f2d453ab07e4401 | [
"Apache-2.0"
] | 1 | 2020-04-24T12:55:27.000Z | 2020-04-24T12:55:27.000Z | """
Docker Command Tasks
"""
| 7.25 | 20 | 0.62069 |
390c33debdb556a365f4e9a01312286d7a8a8ec7 | 2,075 | py | Python | app/settings.py | sampathweb/ml-api-sample-app | 53a7275bc00393bbd9df14fd3d3c432a5b485b21 | [
"MIT"
] | 3 | 2017-11-13T08:44:23.000Z | 2020-04-30T10:55:44.000Z | app/settings.py | sampathweb/ml-api-sample-app | 53a7275bc00393bbd9df14fd3d3c432a5b485b21 | [
"MIT"
] | null | null | null | app/settings.py | sampathweb/ml-api-sample-app | 53a7275bc00393bbd9df14fd3d3c432a5b485b21 | [
"MIT"
] | null | null | null | """
Appliction configuration settings
"""
import os
from tornado.options import define
define("debug", default=True, help="Debug settings")
define("port", default=9000, help="Port to run the server on")
_CUR_DIR = os.path.dirname(os.path.realpath(__file__))
MODEL_DIR = os.path.join(_CUR_DIR, "..", "models")
LOG_DIR = os.path.join(_CUR_DIR, "..", "logs")
MAX_THREAD_POOL = 10
LOG_SETTINGS = {
'version': 1,
"root": {
"level": "WARNING",
"handlers": ["console"],
},
"formatters": {
"json": {
"()": "app.utils.log_formatters.JSONFormatter",
},
"simple": {
"format": "%(levelname)s %(message)s",
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': "simple"
},
'applog': {
"class": "logging.handlers.TimedRotatingFileHandler",
'level': "INFO",
'formatter': "json",
'filename': os.path.join(LOG_DIR, "app.log"),
"when": "D",
"interval": 2,
"backupCount": 5
},
'accesslog': {
"class": "logging.handlers.TimedRotatingFileHandler",
'level': "INFO",
'formatter': "json",
'filename': os.path.join(LOG_DIR, "access.log"),
"when": "D",
"interval": 2,
"backupCount": 5
},
},
"loggers": {
"tornado.access": {
"handlers": ["console", "accesslog"],
"level": "INFO",
"propagate": False,
},
"tornado.application": {
"handlers": ["console", "applog"],
"level": "INFO",
"propagate": False,
},
"tornado.general": {
"handlers": ["console", "applog"],
"level": "INFO",
"propagate": False,
},
"app": {
"handlers": ["console", "applog"],
"level": "INFO",
"propagate": False,
},
}
} | 26.602564 | 65 | 0.466506 |
ddb3c179e11f017457e1e80df72eaafba51b2cd6 | 19,084 | py | Python | plot_.py | SoftwareDevEngResearch/CAML | 3318887a6b47f8ff99d9ebdff75e5d38f112a859 | [
"MIT"
] | null | null | null | plot_.py | SoftwareDevEngResearch/CAML | 3318887a6b47f8ff99d9ebdff75e5d38f112a859 | [
"MIT"
] | null | null | null | plot_.py | SoftwareDevEngResearch/CAML | 3318887a6b47f8ff99d9ebdff75e5d38f112a859 | [
"MIT"
] | 1 | 2020-04-22T21:07:41.000Z | 2020-04-22T21:07:41.000Z | """ This module includes plotting functions. """
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from matplotlib import cm
plt.tight_layout(h_pad = 3, w_pad=3)
####################################
def set_dist(df, target, set_, path):
"""This function takes in a dataframe and plots the distribution
of the target column. It is saved to the path
----------
df : pandas dataframe
dataframe of set to plot distribution
target : string
name of target column in dataframes
set_ : string
name of split. "train" or "test". Used for file naming
path : string
path where png file will be stored
"""
y = df[str(target)].values.astype(np.float)
plt.hist([y])
plt.ylabel('frequency')
plt.xlabel(f"{target}")
plt.grid(axis = 'y')
plt.savefig(f"{path}/{set_}_dist.png", bbox_inches='tight', dpi=1200)
plt.clf()
def overall_dist(df_train, df_test, target, path):
"""This function takes in the train and test dataframes and plots both
target distributions stacked in a histogram, It is saved to the path
----------
df_train : pandas dataframe
dataframe of train set
df_test : pandas dataframe
dataframe of test set
target : string
name of target column in dataframes
path : string
path where png file will be stored
"""
target_train = df_train[str(target)].values.astype(np.float)
target_test = df_test[str(target)].values.astype(np.float)
labels = ['train', 'test']
plt.hist([target_train, target_test],
label=labels, stacked=True) # add auto bin number
plt.ylabel('frequency')
plt.xlabel(f"{target}")
plt.legend(loc='upper right')
plt.grid(axis = 'y')
plt.savefig(f"{path}/overall_dist.png", bbox_inches='tight', dpi=1200)
plt.clf()
def kfold_dist(kf, target, X_train, y_train, path):
"""This function takes in the train data series and K-fold indices for
plotting the fold distribution. It is saved to the path
----------
kf : skearn Kfold object
X_train : 2D series
feature data of train set
y_train : 1D series
target data of train set
target : string
name of target column in dataframes
path : string
path where png file will be stored
"""
folds = []
try: # stratified kfold .split() takes X, y
for train_index, test_index in kf.split(X_train, y_train):
y_test_ = y_train[test_index]
folds.append(y_test_)
except: # kfold .split() takes only X
for train_index, test_index in kf.split(X_train):
y_test_ = y_train[test_index]
folds.append(y_test_)
labels = [f"split {i+1}" for i in range(len(folds))]
plt.hist(folds,
label=labels,
stacked = True)
plt.ylabel('frequency')
plt.xlabel(f"{target}")
plt.legend(loc='upper right')
plt.grid(axis = 'y')
plt.savefig(f"{path}/kfold_dist.png", bbox_inches='tight', dpi=1200)
plt.clf()
def spectra_3D(df, target, path):
"""This function takes in a dataframe and plots the spectra in a 3D plot.
It is saved to the path. [in construction]
----------
df : pandas dataframe
dataframe of spectra
target : string
name of target column in dataframes
path : string
path where png file will be stored
"""
# To add: sort by target value, color gradient for target value or make target = y_
# save higher quality image, fix fontsize / use tight layout, make interactive/moving
# add train, test, split options to plot
# add fuel names maybe? or remove numbers from y axis
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
features = list(df.columns)
features.remove(target)
features = np.asarray(features)
x_ = features.copy().astype(float)
y_ = np.arange(len(df)).astype(float)
X_,Y_ = np.meshgrid(x_,y_)
Z = df.drop(str(target), axis=1).values.astype(np.float)
ax.plot_surface(X_, Y_, Z, rstride=1, cstride=1000, shade=True, lw=.1, alpha=0.4)
ax.set_zlabel("Intensity")
ax.set_xlabel("Wavenumber")
ax.set_ylabel("Fuel")
# ax.view_init(20,-120)
plt.savefig(f"{path}/3D_spectra.png", dpi=1200)
plt.clf()
def spectra_2D(df, target, path, label):
"""This function takes in a spectra dataframe and plots the
staggered spectra on a 2D plot. It is saved to the path
----------
df : pandas dataframe
dataframe of set
target : string
name of target column in dataframes
path : string
path where png file will be stored
label : string
name of set. "train" or "test" for file naming.
"""
X = df.drop(str(target), axis=1).values.astype(np.float)
# y = df[str(target)].copy().values.astype(np.float)
features = list(df.columns)
features.remove(target)
features = np.asarray(features).astype(np.float)
for i, example in enumerate(X):
intensity = X[i] + 0.2*i
plt.plot(features, intensity, alpha = 0.5, linewidth=1, c='k')
plt.ylabel("Intensity")
plt.yticks([])
plt.xlabel("Wavenumber")
plt.savefig(f"{path}/spectra_2D_{label}.png", bbox_inches='tight', dpi=1200)
plt.clf()
#####################################
def abs_error_hist(abs_errors, model_name, transform_name, target, path, dataset):
"""This function takes in the absolute errors from model predictions
and plots the distribution of absolute error on a histogram. It is saved to the path
----------
abs_errors : list
absolute errors
model_name : string
name of algorithm in yaml input file
transform_name : string
name of transform specified in transform_names in input file
target : string
name of target column in dataframes
path : string
path where png file will be stored
dataset : string
name of set. "train" or "test" for file naming.
"""
plt.hist(abs_errors)
plt.xlabel("Absolute error")
plt.ylabel('Frequency')
plt.savefig(f"{path}/{target}_{model_name}_{transform_name}_{dataset}_error_hist.png", bbox_inches='tight', dpi=1200)
plt.clf()
def parity_plot(y, predictions, model_name, transform_name, target, path, dataset):
''' This function takes in the true and predicted values and plots a scatter
plot alongside a y=x line for one dataset- train or test.
------------
y : array-like
the true values
predictions : array-like
the predicted values from evaluating the trianed model
model_name : string
name of algorithm in yaml input file
transform_name : string
name of transform specified in transform_names in input file
target : string
name of target column in dataframes
path : string
path where png file will be stored
dataset : string
name of set. "train" or "test" for file naming.
'''
abs_errors = np.abs(y - predictions)
min_ = min([min(y), min(predictions)])
max_ = max([max(y), max(predictions)])
if dataset == "Train":
color='r'
marker="o"
else: # dataset =="Test":
color='b'
marker="*"
label = f"{dataset}, Average absolute error: {np.mean(abs_errors): .2f}"
plt.scatter(y, predictions, facecolors='none', edgecolors=color, marker=marker, s= 80, label=label)
plt.plot([min_, max_],[min_, max_])
plt.grid()
plt.xlabel('True')
plt.ylabel('Predicted')
plt.legend()
plt.savefig(f"{path}/{target}_{model_name}_{transform_name}_{dataset}_parity.png", bbox_inches='tight', dpi=1200)
plt.clf()
def train_test_parity_plot(y_test, y_test_pred, y_train, y_train_pred, model_name, transform_name, target, path):
''' This function takes in the true and predicted values and plots a scatter
plot alongside a y=x line.
------------
y_test : array-like
the true test values
y_test_pred : array-like
the predicted test values
y_train : array-like
the true train values
y_train_pred : array-like
the predicted train values
model_name : string
name of algorithm in yaml input file
transform_name : string
name of transform specified in transform_names in input file
target : string
name of target column in dataframes
path : string
path where png file will be stored
'''
test_errors = np.abs(y_test - y_test_pred)
train_errors = np.abs(y_train - y_train_pred)
min_ = min([min(y_test), min(y_test_pred), min(y_train), min(y_train_pred)])
max_ = max([max(y_test), max(y_test_pred), max(y_train), max(y_train_pred)])
plt.scatter(y_train, y_train_pred, facecolors='none', edgecolors='r', marker='o', s= 80, label=f'Train, average absolute error: {np.mean(train_errors): .2f}')
plt.scatter(y_test, y_test_pred, facecolors='none', edgecolors='b', marker='*', s= 80, label=f'Test, average absolute error: {np.mean(test_errors): .2f}')
plt.plot([min_, max_],[min_, max_])
plt.grid()
plt.legend(loc="lower right")
# range_ = max_ - min_
# plt.text(min_+ 0.05*range_, max_ - 0.05*range_, f"Average absolute error in test set: {np.mean(test_errors): .2f}")
# plt.text(min_+ 0.05*range_, max_ - 0.1*range_, f"Average absolute error in train set: {np.mean(train_errors): .2f}")
plt.xlabel('True')
plt.ylabel('Predicted')
plt.savefig(f"{path}/{target}_{model_name}_{transform_name}_train_test_parity.png", bbox_inches='tight', dpi=1200)
plt.clf()
def bar_performances_by_algorithm(train_performances, test_performances, models, target, transform, path):
''' This function plots the average absolute error of train and test sets for each
algorithm for a single transform
---------
train_performances : array-like
average absolute error in train set
test_performances : array-like
average absolute error in test set
models : list
list of model names as strings
target : string
name of target column in dataframes
transform : string
name of transform that is paired for each model. For file naming
path : string
path where png file will be stored
'''
# set width of bar
barWidth = 0.25
# Set position of bar on X axis
r1 = np.arange(len(train_performances))
r2 = [x + barWidth for x in r1]
# Make the plot
plt.bar(r1, train_performances, color='slateblue', width=barWidth, edgecolor='white', label='Train')
plt.bar(r2, test_performances, color='forestgreen', width=barWidth, edgecolor='white', label='Test')
# Add xticks on the middle of the group bars
plt.xlabel('Model', fontweight='bold')
plt.xticks([r + barWidth/2 for r in range(len(train_performances))], models)
plt.tick_params(
axis='x',
bottom=False)
# Create legend & Show graphic
plt.legend()
plt.ylabel('Average absolute error')
plt.grid(axis = 'y')
plt.savefig(f"{path}/{target}_{transform}_performances_barplot.png", bbox_inches='tight', dpi=1200)
plt.clf()
def box_performances_by_algorithm(train_errors, test_errors, models, target, transform, path):
''' This function plots the distribution of absolute error as boxplots of train and test sets for each
algorithm for a single transform
---------
train_errors : array-like
individual errors in each train set
test_errors : array-like
individual errors in each test set
models : list
list of model names as strings
target : string
name of target column in dataframes
transform : string
name of transform that is paired for each model. For file naming
path : string
path where png file will be stored
'''
def set_box_color(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color)
plt.figure()
# print(train_errors)
train = plt.boxplot(train_errors, positions=np.array(range(len(train_errors)))*2.0-0.4, sym='', widths=0.5)
test = plt.boxplot(test_errors, positions=np.array(range(len(test_errors)))*2.0+0.4, sym='', widths=0.5)
set_box_color(train, '#D7191C')
set_box_color(test, '#2C7BB6')
# draw temporary red and blue lines and use them to create a legend
plt.plot([], c='#D7191C', label='Train')
plt.plot([], c='#2C7BB6', label='Test')
plt.legend()
plt.ylabel('Absolute error')
plt.xticks(range(0, len(models) * 2, 2), models)
plt.xlim(-2, len(models)*2)
plt.tight_layout()
plt.grid(axis = 'y', alpha = 0.3)
plt.savefig(f"{path}/{target}_{transform}_performances_boxplot.png", bbox_inches='tight', dpi=1200)
plt.clf()
def bar_performances_by_transform(train_performances, test_performances, model_name, transform_names, target, path):
''' This function plots the average absolute error of train and test sets for each
transform for a single model/algorithm
---------
train_performances : array-like
average absolute error in train set
test_performances : array-like
average absolute error in test set
model_name: string
name of model that is paired for each transform
transform_names : list
list of transform names as strings
target : string
name of target column in dataframes
path : string
path where png file will be stored'''
transform_names = [name.replace("_"," ") for name in transform_names]
# set width of bar
barWidth = 0.25
# Set position of bar on X axis
r1 = np.arange(len(train_performances))
r2 = [x + barWidth for x in r1]
# Make the plot
plt.bar(r1, train_performances, color='slateblue', width=barWidth, edgecolor='white', label='Train')
plt.bar(r2, test_performances, color='forestgreen', width=barWidth, edgecolor='white', label='Test')
# Add xticks on the middle of the group bars
plt.xlabel('Transform', fontweight='bold')
plt.xticks([r + barWidth/2 for r in range(len(train_performances))], transform_names)
plt.tick_params(
axis='x',
bottom=False)
# Create legend & Show graphic
plt.legend()
plt.ylabel('Average absolute error')
plt.grid(axis = 'y')
plt.savefig(f"{path}/{target}_{model_name}_performances_barplot.png", bbox_inches='tight', dpi=1200)
plt.clf()
def box_performances_by_transform(train_errors, test_errors, model_name, transform_names, target, path):
''' This function plots the distribution of absolute error as boxplots of train and test sets for each
transformation for a single model/algorithm
---------
train_errors : array-like
individual errors in each train set
test_errors : array-like
individual errors in each test set
model_name: string
name of model that is paired for each transform
transform_names : list
list of transform names as strings
target : string
name of target column in dataframes
path : string
path where png file will be stored
'''
transform_names = [name.replace("_"," ") for name in transform_names]
def set_box_color(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color)
plt.figure()
train = plt.boxplot(train_errors, positions=np.array(range(len(train_errors)))*2.0-0.4, sym='', widths=0.5)
test = plt.boxplot(test_errors, positions=np.array(range(len(test_errors)))*2.0+0.4, sym='', widths=0.5)
set_box_color(train, '#D7191C')
set_box_color(test, '#2C7BB6')
# draw temporary red and blue lines and use them to create a legend
plt.plot([], c='#D7191C', label='Train')
plt.plot([], c='#2C7BB6', label='Test')
plt.legend()
plt.ylabel('Absolute error')
plt.xticks(range(0, len(transform_names) * 2, 2), transform_names)
plt.xlim(-2, len(transform_names)*2)
plt.tight_layout()
plt.grid(axis = 'y', alpha = 0.3)
plt.savefig(f"{path}/{target}_{model_name}_performances_boxplot.png", bbox_inches='tight', dpi=1200)
plt.clf()
def PC_spectra(df, target, path, label):
"""This function takes in spectra dataframe that was transformed with PCA and plots the
staggered PCA-spectra on a 2D plot. It is saved to the path
----------
df : pandas dataframe
dataframe of set
target : string
name of target column in dataframes
path : string
path where png file will be stored
label : string
name of set. "train" or "test" for file naming.
"""
# spacing for PC's
if label == "train":
space = 10
else:
space = 5
X = df.drop(str(target), axis=1).values.astype(np.float)
features = list(df.columns)
features.remove(target)
features = np.asarray(features)
for i, example in enumerate(X):
intensity = X[i] + space*i
plt.plot(features, intensity, alpha = 0.5, linewidth=1, c='k')
plt.yticks([])
plt.xticks([])
plt.xlabel("Principal component")
plt.savefig(f"{path}/PCA_spectra_2D_{label}.png", bbox_inches='tight', dpi=1200)
plt.clf()
def all_spectra(df, target, path):
"""This function takes in a dataframe and plots each spectra
individually and stores the images in a folder in the path.
----------
df : pandas dataframe
dataframe of spectra set
target : string
name of target column in dataframes
path : string
path where folder of all spectra plots will be stored
"""
# make folder for individual spectra
spectra_path = f"{path}/spectra_plots"
os.mkdir(spectra_path)
X = df.drop(str(target), axis=1).values.astype(np.float)
examples = df.index.values.tolist()
features = list(df.columns)
features.remove(target)
features = np.asarray(features).astype(np.float)
for i, spectra in enumerate(X):
plt.plot(features, X[i])
plt.xlabel("Wavenumber")
plt.ylabel("Intensity")
plt.title(f"{examples[i]}")
plt.savefig(f"{spectra_path}/{examples[i]}_spectra.png", bbox_inches='tight', dpi=1200)
plt.clf()
# | 35.080882 | 162 | 0.628694 |
104cb952cb937e152550d05cba68121eae1ad671 | 151 | py | Python | examples/example_steps/steps.py | ityutin/df-and-order | fd25e7a23234bfdea6e84b238974e5eccae21b8a | [
"MIT"
] | 1 | 2020-05-18T19:20:06.000Z | 2020-05-18T19:20:06.000Z | examples/example_steps/steps.py | ityutin/df-and-order | fd25e7a23234bfdea6e84b238974e5eccae21b8a | [
"MIT"
] | null | null | null | examples/example_steps/steps.py | ityutin/df-and-order | fd25e7a23234bfdea6e84b238974e5eccae21b8a | [
"MIT"
] | 1 | 2021-01-24T21:05:50.000Z | 2021-01-24T21:05:50.000Z | from df_and_order.df_transform_step import DfTransformStep
class DummyTransformStep(DfTransformStep):
def transform(self, df):
return df
| 21.571429 | 58 | 0.781457 |
ab93e27d74a14ff90c917a414b49c3eaf60a5eb6 | 1,324 | py | Python | events/templatetags/weekday.py | wwangwe/Team-213-A-Back-End | 0dcb22ec5ff28284f51bec34a7103c6939b81c51 | [
"MIT"
] | 2 | 2019-12-11T14:39:05.000Z | 2020-06-23T10:28:30.000Z | events/templatetags/weekday.py | wwangwe/Team-213-A-Back-End | 0dcb22ec5ff28284f51bec34a7103c6939b81c51 | [
"MIT"
] | null | null | null | events/templatetags/weekday.py | wwangwe/Team-213-A-Back-End | 0dcb22ec5ff28284f51bec34a7103c6939b81c51 | [
"MIT"
] | 2 | 2018-12-25T00:34:11.000Z | 2020-06-23T10:28:45.000Z | from __future__ import unicode_literals
from datetime import date
from django.template import Library, TemplateSyntaxError
from django.utils.dates import WEEKDAYS_ABBR, WEEKDAYS
register = Library()
@register.simple_tag
def weekday(year_or_num, month=None, day=None, full=False):
"""Simple tag - returns the weekday of the given (year, month, day) or of given (weekday_number).
Usage (in template):
{% weekday 2014 3 3 %}
Result: Mon
Return abbreviation by default. To return full name: pass full=True
{% weekday 2014 3 3 full=True %}
Result: Monday
When only number of weekday is given then 0 is considered as "Monday"
{% weekday 0 full=True %}
Result: Monday
"""
if any([month, day]) and not all([month, day]):
raise TemplateSyntaxError("weekday accepts 1 or 3 arguments plus optional 'full' argument")
try:
if all([year_or_num, month, day]):
weekday_num = date(*map(int, (year_or_num, month, day))).weekday()
else:
weekday_num = year_or_num
if full:
return WEEKDAYS[weekday_num]
else:
return WEEKDAYS_ABBR[weekday_num]
except Exception:
return
@register.filter
def weekday_css_class(weekday_num, calendar):
return calendar.cssclasses[weekday_num]
| 24.981132 | 101 | 0.673716 |
2919edd6bc3e384897ebf63a960521fce0bd2298 | 4,867 | py | Python | scripts/matched_vs_unmatched_er_testing_sims.py | neurodata/bilateral-connectome | 2335bd444040ff647a4cd3304bddf7f533e490a7 | [
"MIT"
] | 2 | 2021-09-24T20:21:18.000Z | 2022-02-08T18:31:29.000Z | scripts/matched_vs_unmatched_er_testing_sims.py | neurodata/bilateral-connectome | 2335bd444040ff647a4cd3304bddf7f533e490a7 | [
"MIT"
] | 9 | 2021-09-29T17:23:41.000Z | 2022-03-16T20:22:04.000Z | scripts/matched_vs_unmatched_er_testing_sims.py | neurodata/bilateral-connectome | 2335bd444040ff647a4cd3304bddf7f533e490a7 | [
"MIT"
] | 2 | 2021-11-16T16:17:53.000Z | 2022-03-26T01:25:10.000Z | #%%
import datetime
from re import sub
import time
import matplotlib.path
import matplotlib.pyplot as plt
import matplotlib.transforms
import numpy as np
import pandas as pd
import seaborn as sns
from giskard.plot import merge_axes, soft_axis_off
from graspologic.simulations import er_np
from matplotlib.collections import LineCollection
from pkg.data import load_network_palette, load_node_palette, load_unmatched
from pkg.io import FIG_PATH
from pkg.io import glue as default_glue
from pkg.io import savefig
from pkg.plot import SmartSVG, networkplot_simple, set_theme
from pkg.plot.er import plot_density
from pkg.stats import erdos_renyi_test
from pkg.utils import sample_toy_networks
from svgutils.compose import Figure, Panel, Text
from pkg.plot import draw_hypothesis_box
DISPLAY_FIGS = True
FILENAME = "er_unmatched_test"
def gluefig(name, fig, **kwargs):
savefig(name, foldername=FILENAME, **kwargs)
glue(name, fig, figure=True)
if not DISPLAY_FIGS:
plt.close()
def glue(name, var, **kwargs):
default_glue(name, var, FILENAME, **kwargs)
t0 = time.time()
set_theme(font_scale=1.25)
network_palette, NETWORK_KEY = load_network_palette()
node_palette, NODE_KEY = load_node_palette()
#%%
from graspologic.simulations import er_np
from pkg.perturb import shuffle_edges
p = 0.1
n = 20
A1 = er_np(n, p, directed=True)
p_shuffle = 0.4
n_edges = np.count_nonzero(A1)
n_shuffle = int(n_edges * p_shuffle)
A2 = shuffle_edges(A1, effect_size=n_shuffle)
#%%
# #%%
# from graspologic.simulations import er_np
# from pkg.perturb import remove_edges
# from pkg.stats import compute_density, erdos_renyi_test, erdos_renyi_test_paired
# from tqdm import tqdm
# p1 = 0.1
# n = 20
# n_sims_alt = 100
# n_sims_null = 1000
# p_equalize = 0.5
# ps = np.linspace(0.1, 0.3, 10)
# rows = []
# for p2 in ps:
# if p2 == p1:
# n_sims = n_sims_null
# else:
# n_sims = n_sims_alt
# for sim in tqdm(range(n_sims)):
# A1 = er_np(n, p1, directed=True)
# A2 = er_np(n, p2, directed=True)
# n_set = A1.size * p_equalize
# choice_edge_indices = np.random.choice(A1.size, size=n_set, replace=False)
# row_inds, col_inds = np.unravel_index(choice_edge_indices, A1.shape)
# A2[]
# # density_before = compute_density(A2)
# # flat_edges1 = np.nonzero(A1.ravel())[0]
# # flat_edges2 = np.nonzero(A2.ravel())[0]
# # edges1_not2 = np.setdiff1d(flat_edges1, flat_edges2)
# # n_edges = np.count_nonzero(A1)
# # n_set = int(n_edges * p_equalize)
# # A2 = remove_edges(A2, effect_size=n_set)
# #
# # A2[row_inds, col_inds] = 1
# # density_after = compute_density(A2)
# stat, pvalue, misc = erdos_renyi_test(A1, A2)
# rows.append(
# {
# "stat": stat,
# "pvalue": pvalue,
# "sim": sim,
# "p_equalize": p_equalize,
# "p1": p1,
# "p2": p2,
# "method": "Fisher's",
# }
# )
# stat, pvalue, misc = erdos_renyi_test_paired(A1, A2)
# rows.append(
# {
# "stat": stat,
# "pvalue": pvalue,
# "sim": sim,
# "p_equalize": p_equalize,
# "p1": p1,
# "p2": p2,
# "method": "McNemar's",
# }
# )
# results = pd.DataFrame(rows)
# #%%
# results["detected"] = 0
# results.at[results[results["pvalue"] < 0.05].index, "detected"] = 1
# #%%
# squashed_results = results.groupby(["p1", "p2", "method"]).mean().reset_index()
# fig, ax = plt.subplots(1, 1, figsize=(8, 6))
# sns.lineplot(data=squashed_results, y="detected", x="p2", hue="method", ax=ax)
# ax.set(ylabel=r"Power (@ $\alpha$ = 0.05)", xlabel="Effect size")
# ax.get_legend().set_title("Test")
# #%%
# fig, axs = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True)
# from giskard.plot import subuniformity_plot
# subuniformity_plot(
# results[(results["method"] == "Fisher's") & (results["p1"] == results["p2"])][
# "pvalue"
# ],
# ax=axs[0],
# )
# subuniformity_plot(
# results[(results["method"] == "McNemar's") & (results["p1"] == results["p2"])][
# "pvalue"
# ],
# ax=axs[1],
# )
# # %%
# from scipy.stats import binom
# from statsmodels.stats.contingency_tables import mcnemar
# from pkg.stats import binom_2samp_paired
# p = 0.1
# n = 1000
# n_same = 10
# pvalues = []
# for i in range(n_sims):
# samples1 = binom(1, p).rvs(size=100)
# samples2 = binom(1, p).rvs(size=100)
# samples2[:n_same] = samples1[:n_same]
# stat, pvalue, misc = binom_2samp_paired(samples1, samples2)
# pvalues.append(pvalue)
# pvalues = np.array(pvalues)
# subuniformity_plot(pvalues)
| 26.741758 | 85 | 0.621122 |
529059eaad08e58c824b93979442fb568d10f5f1 | 3,142 | py | Python | examples/flask/app/views.py | Slojob/Chore-Roster | ea4e43ce5d8f0f9663c6574308d6c712dc52e9ad | [
"Apache-2.0"
] | 11 | 2018-01-24T19:54:58.000Z | 2022-01-16T06:39:03.000Z | examples/flask/app/views.py | Slojob/Chore-Roster | ea4e43ce5d8f0f9663c6574308d6c712dc52e9ad | [
"Apache-2.0"
] | null | null | null | examples/flask/app/views.py | Slojob/Chore-Roster | ea4e43ce5d8f0f9663c6574308d6c712dc52e9ad | [
"Apache-2.0"
] | 7 | 2018-01-24T20:21:33.000Z | 2021-11-26T13:08:24.000Z | from facebook import get_user_from_cookie, GraphAPI
from flask import g, render_template, redirect, request, session, url_for
from app import app, db
from models import User
# Facebook app details
FB_APP_ID = ''
FB_APP_NAME = ''
FB_APP_SECRET = ''
@app.route('/')
def index():
# If a user was set in the get_current_user function before the request,
# the user is logged in.
if g.user:
return render_template('index.html', app_id=FB_APP_ID,
app_name=FB_APP_NAME, user=g.user)
# Otherwise, a user is not logged in.
return render_template('login.html', app_id=FB_APP_ID, name=FB_APP_NAME)
@app.route('/logout')
def logout():
"""Log out the user from the application.
Log out the user from the application by removing them from the
session. Note: this does not log the user out of Facebook - this is done
by the JavaScript SDK.
"""
session.pop('user', None)
return redirect(url_for('index'))
@app.before_request
def get_current_user():
"""Set g.user to the currently logged in user.
Called before each request, get_current_user sets the global g.user
variable to the currently logged in user. A currently logged in user is
determined by seeing if it exists in Flask's session dictionary.
If it is the first time the user is logging into this application it will
create the user and insert it into the database. If the user is not logged
in, None will be set to g.user.
"""
# Set the user in the session dictionary as a global g.user and bail out
# of this function early.
if session.get('user'):
g.user = session.get('user')
return
# Attempt to get the short term access token for the current user.
result = get_user_from_cookie(cookies=request.cookies, app_id=FB_APP_ID,
app_secret=FB_APP_SECRET)
# If there is no result, we assume the user is not logged in.
if result:
# Check to see if this user is already in our database.
user = User.query.filter(User.id == result['uid']).first()
if not user:
# Not an existing user so get info
graph = GraphAPI(result['access_token'])
profile = graph.get_object('me')
if 'link' not in profile:
profile['link'] = ""
# Create the user and insert it into the database
user = User(id=str(profile['id']), name=profile['name'],
profile_url=profile['link'],
access_token=result['access_token'])
db.session.add(user)
elif user.access_token != result['access_token']:
# If an existing user, update the access token
user.access_token = result['access_token']
# Add the user to the current session
session['user'] = dict(name=user.name, profile_url=user.profile_url,
id=user.id, access_token=user.access_token)
# Commit changes to the database and set the user as a global g.user
db.session.commit()
g.user = session.get('user', None)
| 36.114943 | 79 | 0.643221 |
5a71afe721a7bef67c90cce31533e1b5c02a9609 | 11,188 | py | Python | cinder/tests/test_block_device.py | CloudVPS/cinder | 9097b9407b6ce16c7b5678682284a0ad0fcc652d | [
"Apache-2.0"
] | null | null | null | cinder/tests/test_block_device.py | CloudVPS/cinder | 9097b9407b6ce16c7b5678682284a0ad0fcc652d | [
"Apache-2.0"
] | null | null | null | cinder/tests/test_block_device.py | CloudVPS/cinder | 9097b9407b6ce16c7b5678682284a0ad0fcc652d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import mox
from cinder import context
from cinder.db.sqlalchemy import api
import cinder.exception
from cinder.image import image_utils
import cinder.test
from cinder.volume.driver import ISCSIDriver
from cinder.volume.drivers.block_device import BlockDeviceDriver
from cinder.volume import utils as volutils
class TestBlockDeviceDriver(cinder.test.TestCase):
def setUp(self):
super(TestBlockDeviceDriver, self).setUp()
self.configuration = mox.MockAnything()
self.configuration.available_devices = ['/dev/loop1', '/dev/loop2']
self.configuration.host = 'localhost'
self.configuration.iscsi_port = 3260
self.drv = BlockDeviceDriver(configuration=self.configuration)
def test_initialize_connection(self):
TEST_VOLUME1 = {'host': 'localhost1',
'provider_location': '1 2 3 /dev/loop1',
}
TEST_CONNECTOR = {'host': 'localhost1'}
self.mox.StubOutWithMock(self.drv, 'local_path')
self.drv.local_path(TEST_VOLUME1).AndReturn('/dev/loop1')
self.mox.ReplayAll()
data = self.drv.initialize_connection(TEST_VOLUME1, TEST_CONNECTOR)
self.assertEqual(data, {
'driver_volume_type': 'local',
'data': {'device_path': '/dev/loop1'}
})
def test_initialize_connection_different_hosts(self):
TEST_CONNECTOR = {'host': 'localhost1'}
TEST_VOLUME2 = {'host': 'localhost2',
'provider_location': '1 2 3 /dev/loop2',
}
self.mox.StubOutWithMock(ISCSIDriver, 'initialize_connection')
ISCSIDriver.initialize_connection(TEST_VOLUME2,
TEST_CONNECTOR).AndReturn('data')
self.mox.ReplayAll()
data = self.drv.initialize_connection(TEST_VOLUME2, TEST_CONNECTOR)
self.assertEqual(data, 'data')
def test_delete_not_volume_provider_location(self):
TEST_VOLUME2 = {'provider_location': None}
self.mox.StubOutWithMock(self.drv, 'local_path')
self.drv.local_path(TEST_VOLUME2).AndReturn(None)
self.mox.StubOutWithMock(self.drv, 'clear_volume')
self.mox.ReplayAll()
self.drv.delete_volume(TEST_VOLUME2)
def test_delete_volume_path_exist(self):
TEST_VOLUME1 = {'provider_location': '1 2 3 /dev/loop1'}
self.mox.StubOutWithMock(self.drv, 'local_path')
path = self.drv.local_path(TEST_VOLUME1).AndReturn('/dev/loop1')
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(path).AndReturn(True)
self.mox.StubOutWithMock(self.drv, 'clear_volume')
self.drv.clear_volume(TEST_VOLUME1)
self.mox.ReplayAll()
self.drv.delete_volume(TEST_VOLUME1)
def test_delete_path_is_not_in_list_of_available_devices(self):
TEST_VOLUME2 = {'provider_location': '1 2 3 /dev/loop0'}
self.mox.StubOutWithMock(self.drv, 'local_path')
self.drv.local_path(TEST_VOLUME2).AndReturn('/dev/loop0')
self.mox.StubOutWithMock(self.drv, 'clear_volume')
self.mox.ReplayAll()
self.drv.delete_volume(TEST_VOLUME2)
def test_create_volume(self):
TEST_VOLUME = {'size': 1,
'name': 'vol1'}
self.mox.StubOutWithMock(self.drv, 'find_appropriate_size_device')
self.drv.find_appropriate_size_device(TEST_VOLUME['size']) \
.AndReturn('dev_path')
self.mox.ReplayAll()
result = self.drv.create_volume(TEST_VOLUME)
self.assertEqual(result, {
'provider_location': 'None:3260,None None '
'None dev_path'})
def test_update_volume_stats(self):
self.mox.StubOutWithMock(self.drv, '_devices_sizes')
self.drv._devices_sizes().AndReturn({'/dev/loop1': 1024,
'/dev/loop2': 1024})
self.mox.StubOutWithMock(self.drv, '_get_used_devices')
self.drv._get_used_devices().AndReturn(set())
self.mox.StubOutWithMock(self.configuration, 'safe_get')
self.configuration.safe_get('volume_backend_name'). \
AndReturn('BlockDeviceDriver')
self.mox.ReplayAll()
self.drv._update_volume_stats()
self.assertEqual(self.drv._stats,
{'total_capacity_gb': 2,
'free_capacity_gb': 2,
'reserved_percentage':
self.configuration.reserved_percentage,
'QoS_support': False,
'vendor_name': "Open Source",
'driver_version': self.drv.VERSION,
'storage_protocol': 'unknown',
'volume_backend_name': 'BlockDeviceDriver',
})
def test_create_cloned_volume(self):
TEST_SRC = {'id': '1',
'size': 1,
'provider_location': '1 2 3 /dev/loop1'}
TEST_VOLUME = {}
self.mox.StubOutWithMock(self.drv, 'find_appropriate_size_device')
dev = self.drv.find_appropriate_size_device(TEST_SRC['size']).\
AndReturn('/dev/loop2')
self.mox.StubOutWithMock(volutils, 'copy_volume')
self.mox.StubOutWithMock(self.drv, 'local_path')
self.mox.StubOutWithMock(self.drv, '_get_device_size')
self.drv.local_path(TEST_SRC).AndReturn('/dev/loop1')
self.drv._get_device_size('/dev/loop2').AndReturn(1)
volutils.copy_volume('/dev/loop1', dev, 2048,
execute=self.drv._execute)
self.mox.ReplayAll()
self.assertEqual(self.drv.create_cloned_volume(TEST_VOLUME, TEST_SRC),
{'provider_location': 'None:3260,'
'None None None /dev/loop2'})
def test_copy_image_to_volume(self):
TEST_VOLUME = {'provider_location': '1 2 3 /dev/loop1', 'size': 1}
TEST_IMAGE_SERVICE = "image_service"
TEST_IMAGE_ID = "image_id"
self.mox.StubOutWithMock(image_utils, 'fetch_to_raw')
self.mox.StubOutWithMock(self.drv, 'local_path')
self.drv.local_path(TEST_VOLUME).AndReturn('/dev/loop1')
image_utils.fetch_to_raw(context, TEST_IMAGE_SERVICE,
TEST_IMAGE_ID, '/dev/loop1', size=1)
self.mox.ReplayAll()
self.drv.copy_image_to_volume(context, TEST_VOLUME, TEST_IMAGE_SERVICE,
TEST_IMAGE_ID)
def test_copy_volume_to_image(self):
TEST_VOLUME = {'provider_location': '1 2 3 /dev/loop1'}
TEST_IMAGE_SERVICE = "image_service"
TEST_IMAGE_META = "image_meta"
self.mox.StubOutWithMock(image_utils, 'upload_volume')
self.mox.StubOutWithMock(self.drv, 'local_path')
self.drv.local_path(TEST_VOLUME).AndReturn('/dev/loop1')
image_utils.upload_volume(context, TEST_IMAGE_SERVICE,
TEST_IMAGE_META, '/dev/loop1')
self.mox.ReplayAll()
self.drv.copy_volume_to_image(context, TEST_VOLUME, TEST_IMAGE_SERVICE,
TEST_IMAGE_META)
def test_get_used_devices(self):
TEST_VOLUME1 = {'host': 'localhost',
'provider_location': '1 2 3 /dev/loop1'}
TEST_VOLUME2 = {'host': 'localhost',
'provider_location': '1 2 3 /dev/loop2'}
self.mox.StubOutWithMock(api, 'volume_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
context.get_admin_context()
api.volume_get_all_by_host(None,
self.configuration.host) \
.AndReturn([TEST_VOLUME1, TEST_VOLUME2])
self.mox.StubOutWithMock(self.drv, 'local_path')
path1 = self.drv.local_path(TEST_VOLUME1).AndReturn('/dev/loop1')
path2 = self.drv.local_path(TEST_VOLUME2).AndReturn('/dev/loop2')
self.mox.ReplayAll()
self.assertEqual(self.drv._get_used_devices(), set([path1, path2]))
def test_get_device_size(self):
dev_path = '/dev/loop1'
self.mox.StubOutWithMock(self.drv, '_execute')
out = '2048'
self.drv._execute('blockdev', '--getsz', dev_path,
run_as_root=True).AndReturn((out, None))
self.mox.ReplayAll()
self.assertEqual(self.drv._get_device_size(dev_path), 1)
def test_devices_sizes(self):
self.mox.StubOutWithMock(self.drv, '_get_device_size')
for dev in self.configuration.available_devices:
self.drv._get_device_size(dev).AndReturn(1)
self.mox.ReplayAll()
self.assertEqual(self.drv._devices_sizes(),
{'/dev/loop1': 1, '/dev/loop2': 1})
def test_find_appropriate_size_device_no_free_disks(self):
size = 1
self.mox.StubOutWithMock(self.drv, '_devices_sizes')
self.drv._devices_sizes().AndReturn({'/dev/loop1': 1024,
'/dev/loop2': 1024})
self.mox.StubOutWithMock(self.drv, '_get_used_devices')
self.drv._get_used_devices().AndReturn(set(['/dev/loop1',
'/dev/loop2']))
self.mox.ReplayAll()
self.assertRaises(cinder.exception.CinderException,
self.drv.find_appropriate_size_device, size)
def test_find_appropriate_size_device_not_big_enough_disk(self):
size = 2
self.mox.StubOutWithMock(self.drv, '_devices_sizes')
self.drv._devices_sizes().AndReturn({'/dev/loop1': 1024,
'/dev/loop2': 1024})
self.mox.StubOutWithMock(self.drv, '_get_used_devices')
self.drv._get_used_devices().AndReturn(set(['/dev/loop1']))
self.mox.ReplayAll()
self.assertRaises(cinder.exception.CinderException,
self.drv.find_appropriate_size_device, size)
def test_find_appropriate_size_device(self):
size = 1
self.mox.StubOutWithMock(self.drv, '_devices_sizes')
self.drv._devices_sizes().AndReturn({'/dev/loop1': 2048,
'/dev/loop2': 1024})
self.mox.StubOutWithMock(self.drv, '_get_used_devices')
self.drv._get_used_devices().AndReturn(set())
self.mox.ReplayAll()
self.assertEqual(self.drv.find_appropriate_size_device(size),
'/dev/loop2')
| 46.616667 | 79 | 0.618699 |
3d4ed023c56fcc25cbe8b0db9cf1c5216cf8f8ab | 1,285 | py | Python | code/modify.py | FlyingFordAnglia/Scrabble | 014bc1b6dc405835cd6f9638f4f298cfc8225f91 | [
"MIT"
] | 2 | 2020-01-18T18:36:25.000Z | 2020-05-28T07:28:16.000Z | code/modify.py | Stochastic13/Scrabble | 47887c7780c81a66b8b4c6f82ca2c50d645b075c | [
"MIT"
] | null | null | null | code/modify.py | Stochastic13/Scrabble | 47887c7780c81a66b8b4c6f82ca2c50d645b075c | [
"MIT"
] | 2 | 2019-02-12T12:46:03.000Z | 2019-02-14T12:35:29.000Z | # board format: The board is an array of 15x15 shape, with the elements being numbers,
# translated by the dictionary letternumberkey into strings whenever required.
import string
# letternumberkey dictionary to translate the integers on board to strings whenever necessary
l1 = dict(zip(string.ascii_uppercase, list(range(0, 26, 1))))
l2 = dict(zip(string.ascii_lowercase, list(range(26, 52, 1))))
letternumberkey = {**l1, **l2}
letternumberkey[' '] = 52
l3 = dict(zip(list(range(0, 26, 1)), string.ascii_uppercase))
l4 = dict(zip(list(range(26, 52, 1)), string.ascii_lowercase))
numberletterkey = {**l3, **l4}
numberletterkey[52] = ' '
# arguments: letters is a list of numbers representing string characters which you want to place on the board,
# positions is a list of lists with each sublist containing two numbers indicating the position of the
# corresponding letters on the board array, player is the player making the move.
def move(letters, positions, player, mainboard):
board = mainboard.copy()
for i in range(len(letters)):
board[positions[i][0], positions[i][1]] = letternumberkey[letters[i]]
if letters[i] == letters[i].upper():
player.rack.remove(letters[i])
else:
player.rack.remove(' ')
return board
| 45.892857 | 111 | 0.712062 |
8cf5a3ebbcf82c70f7dde4f2ed7c61024950e70a | 13,427 | py | Python | tests/test_client_functional.py | ajdavis/aiohttp | d5138978f3e82aa82a2f003b00d38112c58a40c1 | [
"Apache-2.0"
] | null | null | null | tests/test_client_functional.py | ajdavis/aiohttp | d5138978f3e82aa82a2f003b00d38112c58a40c1 | [
"Apache-2.0"
] | null | null | null | tests/test_client_functional.py | ajdavis/aiohttp | d5138978f3e82aa82a2f003b00d38112c58a40c1 | [
"Apache-2.0"
] | null | null | null | """HTTP client functional tests against aiohttp.web server"""
import aiohttp
import asyncio
import io
import os
import os.path
import pytest
import ssl
from unittest import mock
from aiohttp import hdrs, web
from aiohttp.errors import FingerprintMismatch
@pytest.fixture
def ssl_ctx():
here = os.path.dirname(__file__)
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_ctx.load_cert_chain(
os.path.join(here, 'sample.crt'),
os.path.join(here, 'sample.key'))
return ssl_ctx
@pytest.mark.run_loop
def test_keepalive_two_requests_success(create_app_and_client):
@asyncio.coroutine
def handler(request):
body = yield from request.read()
assert b'' == body
return web.Response(body=b'OK')
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp1 = yield from client.get('/')
yield from resp1.read()
resp2 = yield from client.get('/')
yield from resp2.read()
assert 1 == len(client._session.connector._conns)
@pytest.mark.run_loop
def test_keepalive_response_released(create_app_and_client):
@asyncio.coroutine
def handler(request):
body = yield from request.read()
assert b'' == body
return web.Response(body=b'OK')
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp1 = yield from client.get('/')
yield from resp1.release()
resp2 = yield from client.get('/')
yield from resp2.release()
assert 1 == len(client._session.connector._conns)
@pytest.mark.run_loop
def test_keepalive_server_force_close_connection(create_app_and_client):
@asyncio.coroutine
def handler(request):
body = yield from request.read()
assert b'' == body
response = web.Response(body=b'OK')
response.force_close()
return response
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp1 = yield from client.get('/')
resp1.close()
resp2 = yield from client.get('/')
resp2.close()
assert 0 == len(client._session.connector._conns)
@pytest.mark.run_loop
def test_HTTP_304(create_app_and_client):
@asyncio.coroutine
def handler(request):
body = yield from request.read()
assert b'' == body
return web.Response(status=304)
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp = yield from client.get('/')
assert resp.status == 304
content = yield from resp.read()
assert content == b''
@pytest.mark.run_loop
def test_HTTP_304_WITH_BODY(create_app_and_client):
@asyncio.coroutine
def handler(request):
body = yield from request.read()
assert b'' == body
return web.Response(body=b'test', status=304)
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp = yield from client.get('/')
assert resp.status == 304
content = yield from resp.read()
assert content == b''
@pytest.mark.run_loop
def test_auto_header_user_agent(create_app_and_client):
@asyncio.coroutine
def handler(request):
assert 'aiohttp' in request.headers['user-agent']
return web.Response()
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp = yield from client.get('/')
try:
assert 200, resp.status
finally:
yield from resp.release()
@pytest.mark.run_loop
def test_skip_auto_headers_user_agent(create_app_and_client):
@asyncio.coroutine
def handler(request):
assert hdrs.USER_AGENT not in request.headers
return web.Response()
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp = yield from client.get('/',
skip_auto_headers=['user-agent'])
try:
assert 200 == resp.status
finally:
yield from resp.release()
@pytest.mark.run_loop
def test_skip_default_auto_headers_user_agent(create_app_and_client):
@asyncio.coroutine
def handler(request):
assert hdrs.USER_AGENT not in request.headers
return web.Response()
app, client = yield from create_app_and_client(client_params=dict(
skip_auto_headers=['user-agent']))
app.router.add_route('GET', '/', handler)
resp = yield from client.get('/')
try:
assert 200 == resp.status
finally:
yield from resp.release()
@pytest.mark.run_loop
def test_skip_auto_headers_content_type(create_app_and_client):
@asyncio.coroutine
def handler(request):
assert hdrs.CONTENT_TYPE not in request.headers
return web.Response()
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp = yield from client.get('/',
skip_auto_headers=['content-type'])
try:
assert 200 == resp.status
finally:
yield from resp.release()
@pytest.mark.run_loop
def test_post_data_bytesio(create_app_and_client):
data = b'some buffer'
@asyncio.coroutine
def handler(request):
assert len(data) == request.content_length
val = yield from request.read()
assert data == val
return web.Response()
app, client = yield from create_app_and_client()
app.router.add_route('POST', '/', handler)
resp = yield from client.post('/', data=io.BytesIO(data))
try:
assert 200 == resp.status
finally:
yield from resp.release()
@pytest.mark.run_loop
def test_post_data_with_bytesio_file(create_app_and_client):
data = b'some buffer'
@asyncio.coroutine
def handler(request):
post_data = yield from request.post()
assert ['file'] == list(post_data.keys())
assert data == post_data['file'].file.read()
return web.Response()
app, client = yield from create_app_and_client()
app.router.add_route('POST', '/', handler)
resp = yield from client.post('/', data={'file': io.BytesIO(data)})
try:
assert 200 == resp.status
finally:
yield from resp.release()
@pytest.mark.run_loop
def test_client_ssl(create_app_and_client, loop, ssl_ctx):
connector = aiohttp.TCPConnector(verify_ssl=False, loop=loop)
@asyncio.coroutine
def handler(request):
return web.HTTPOk(text='Test message')
app, client = yield from create_app_and_client(
server_params=dict(ssl_ctx=ssl_ctx),
client_params=dict(connector=connector))
app.router.add_route('GET', '/', handler)
resp = yield from client.get('/')
try:
assert 200 == resp.status
txt = yield from resp.text()
assert txt == 'Test message'
finally:
yield from resp.release()
@pytest.mark.parametrize('fingerprint', [
b'\xa2\x06G\xad\xaa\xf5\xd8\\J\x99^by;\x06=',
b's\x93\xfd:\xed\x08\x1do\xa9\xaeq9\x1a\xe3\xc5\x7f\x89\xe7l\xf9',
b'0\x9a\xc9D\x83\xdc\x91\'\x88\x91\x11\xa1d\x97\xfd\xcb~7U\x14D@L'
b'\x11\xab\x99\xa8\xae\xb7\x14\xee\x8b'],
ids=['md5', 'sha1', 'sha256'])
@pytest.mark.run_loop
def test_tcp_connector_fingerprint_ok(create_app_and_client,
loop, ssl_ctx, fingerprint):
@asyncio.coroutine
def handler(request):
return web.HTTPOk(text='Test message')
connector = aiohttp.TCPConnector(loop=loop, verify_ssl=False,
fingerprint=fingerprint)
app, client = yield from create_app_and_client(
server_params=dict(ssl_ctx=ssl_ctx),
client_params=dict(connector=connector))
app.router.add_route('GET', '/', handler)
resp = yield from client.get('/')
assert resp.status == 200
resp.close()
@pytest.mark.parametrize('fingerprint', [
b'\xa2\x06G\xad\xaa\xf5\xd8\\J\x99^by;\x06=',
b's\x93\xfd:\xed\x08\x1do\xa9\xaeq9\x1a\xe3\xc5\x7f\x89\xe7l\xf9',
b'0\x9a\xc9D\x83\xdc\x91\'\x88\x91\x11\xa1d\x97\xfd\xcb~7U\x14D@L'
b'\x11\xab\x99\xa8\xae\xb7\x14\xee\x8b'],
ids=['md5', 'sha1', 'sha256'])
@pytest.mark.run_loop
def test_tcp_connector_fingerprint_fail(create_app_and_client,
loop, ssl_ctx, fingerprint):
@asyncio.coroutine
def handler(request):
return web.HTTPOk(text='Test message')
bad_fingerprint = b'\x00' * len(fingerprint)
connector = aiohttp.TCPConnector(loop=loop, verify_ssl=False,
fingerprint=bad_fingerprint)
app, client = yield from create_app_and_client(
server_params=dict(ssl_ctx=ssl_ctx),
client_params=dict(connector=connector))
app.router.add_route('GET', '/', handler)
with pytest.raises(FingerprintMismatch) as cm:
yield from client.get('/')
exc = cm.value
assert exc.expected == bad_fingerprint
assert exc.got == fingerprint
@pytest.mark.run_loop
def test_format_task_get(create_server, loop):
@asyncio.coroutine
def handler(request):
return web.Response(body=b'OK')
app, url = yield from create_server()
app.router.add_route('GET', '/', handler)
client = aiohttp.ClientSession(loop=loop)
task = loop.create_task(client.get(url))
assert "{}".format(task)[:18] == "<Task pending coro"
resp = yield from task
resp.close()
client.close()
@pytest.mark.run_loop
def test_str_params(create_app_and_client):
@asyncio.coroutine
def handler(request):
assert 'q=t+est' in request.query_string
return web.Response()
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp = yield from client.get('/', params='q=t+est')
try:
assert 200 == resp.status
finally:
yield from resp.release()
@pytest.mark.run_loop
def test_history(create_app_and_client):
@asyncio.coroutine
def handler_redirect(request):
return web.Response(status=301, headers={'Location': '/ok'})
@asyncio.coroutine
def handler_ok(request):
return web.Response(status=200)
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/ok', handler_ok)
app.router.add_route('GET', '/redirect', handler_redirect)
resp = yield from client.get('/ok')
try:
assert len(resp.history) == 0
assert resp.status == 200
finally:
yield from resp.release()
resp_redirect = yield from client.get('/redirect')
try:
assert len(resp_redirect.history) == 1
assert resp_redirect.history[0].status == 301
assert resp_redirect.status == 200
finally:
yield from resp_redirect.release()
@pytest.mark.run_loop
def test_keepalive_closed_by_server(create_app_and_client):
@asyncio.coroutine
def handler(request):
body = yield from request.read()
assert b'' == body
resp = web.Response(body=b'OK')
resp.force_close()
return resp
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp1 = yield from client.get('/')
val1 = yield from resp1.read()
assert val1 == b'OK'
resp2 = yield from client.get('/')
val2 = yield from resp2.read()
assert val2 == b'OK'
assert 0 == len(client._session.connector._conns)
@pytest.mark.run_loop
def test_wait_for(create_app_and_client, loop):
@asyncio.coroutine
def handler(request):
return web.Response(body=b'OK')
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp = yield from asyncio.wait_for(client.get('/'), 10, loop=loop)
assert resp.status == 200
txt = yield from resp.text()
assert txt == 'OK'
@pytest.mark.run_loop
def test_raw_headers(create_app_and_client, loop):
@asyncio.coroutine
def handler(request):
return web.Response()
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
resp = yield from client.get('/')
assert resp.status == 200
assert resp.raw_headers == ((b'CONTENT-LENGTH', b'0'),
(b'DATE', mock.ANY),
(b'SERVER', mock.ANY))
resp.close()
@pytest.mark.run_loop
def test_http_request_with_version(create_app_and_client, loop, warning):
@asyncio.coroutine
def handler(request):
return web.Response()
app, client = yield from create_app_and_client()
app.router.add_route('GET', '/', handler)
with warning(DeprecationWarning):
resp = yield from client.get('/', version=aiohttp.HttpVersion11)
assert resp.status == 200
resp.close()
@pytest.mark.run_loop
def test_204_with_gzipped_content_encoding(create_app_and_client):
@asyncio.coroutine
def handler(request):
resp = web.StreamResponse(status=204)
resp.content_length = 0
resp.content_type = 'application/json'
# resp.enable_compression(web.ContentCoding.gzip)
resp.headers['Content-Encoding'] = 'gzip'
yield from resp.prepare(request)
return resp
app, client = yield from create_app_and_client()
app.router.add_route('DELETE', '/', handler)
resp = yield from client.delete('/')
assert resp.status == 204
yield from resp.release()
| 29.57489 | 73 | 0.656364 |
720a84062cdf795715dec7cbf3733d23e602d12d | 9,753 | py | Python | spikesorters/kilosort3/kilosort3.py | m-col/spikesorters | 8ea241cd98419d5e2c9b3ee7da26e770dee720b2 | [
"MIT"
] | null | null | null | spikesorters/kilosort3/kilosort3.py | m-col/spikesorters | 8ea241cd98419d5e2c9b3ee7da26e770dee720b2 | [
"MIT"
] | null | null | null | spikesorters/kilosort3/kilosort3.py | m-col/spikesorters | 8ea241cd98419d5e2c9b3ee7da26e770dee720b2 | [
"MIT"
] | null | null | null | from pathlib import Path
import os
import sys
import numpy as np
from typing import Union
import shutil
import json
import spikeextractors as se
from ..basesorter import BaseSorter
from ..utils.shellscript import ShellScript
from ..sorter_tools import get_git_commit, recover_recording
def check_if_installed(kilosort3_path: Union[str, None]):
if kilosort3_path is None:
return False
assert isinstance(kilosort3_path, str)
if kilosort3_path.startswith('"'):
kilosort3_path = kilosort3_path[1:-1]
kilosort3_path = str(Path(kilosort3_path).absolute())
if (Path(kilosort3_path) / 'main_kilosort3.m').is_file():
return True
else:
return False
class Kilosort3Sorter(BaseSorter):
"""
"""
sorter_name: str = 'kilosort3'
kilosort3_path: Union[str, None] = os.getenv('KILOSORT3_PATH', None)
requires_locations = False
_default_params = {
'detect_threshold': 6,
'projection_threshold': [9, 9],
'preclust_threshold': 8,
'car': True,
'minFR': 0.2,
'minfr_goodchannels': 0.2,
'nblocks': 5,
'sig': 20,
'freq_min': 300,
'sigmaMask': 30,
'nPCs': 3,
'ntbuff': 64,
'nfilt_factor': 4,
'NT': None,
'keep_good_only': False,
'chunk_mb': 500,
}
_params_description = {
'detect_threshold': "Threshold for spike detection",
'projection_threshold': "Threshold on projections",
'preclust_threshold': "Threshold crossings for pre-clustering (in PCA projection space)",
'car': "Enable or disable common reference",
'minFR': "Minimum spike rate (Hz), if a cluster falls below this for too long it gets removed",
'minfr_goodchannels': "Minimum firing rate on a 'good' channel",
'nblocks': "blocks for registration. 0 turns it off, 1 does rigid registration. Replaces 'datashift' option.",
'sig': "spatial smoothness constant for registration",
'freq_min': "High-pass filter cutoff frequency",
'sigmaMask': "Spatial constant in um for computing residual variance of spike",
'nPCs': "Number of PCA dimensions",
'ntbuff': "Samples of symmetrical buffer for whitening and spike detection",
'nfilt_factor': "Max number of clusters per good channel (even temporary ones) 4",
'NT': "Batch size (if None it is automatically computed)",
'keep_good_only': "If True only 'good' units are returned",
'chunk_mb': "Chunk size in Mb for saving to binary format (default 500Mb)",
}
sorter_description = """Kilosort3 is a GPU-accelerated and efficient template-matching spike sorter. On top of its
predecessor Kilosort, it implements a drift-correction strategy. Kilosort3 improves on Kilosort2 primarily in the
type of drift correction we use. Where Kilosort2 modified templates as a function of time/drift (a drift tracking
approach), Kilosort3 corrects the raw data directly via a sub-pixel registration process (a drift correction
approach). Kilosort3 has not been as broadly tested as Kilosort2, but is expected to work out of the box on
Neuropixels 1.0 and 2.0 probes, as well as other probes with vertical pitch <=40um. For other recording methods,
like tetrodes or single-channel recordings, you should test empirically if v3 or v2.0 works better for you (use
the "releases" on the github page to download older versions).
For more information see https://github.com/MouseLand/Kilosort"""
installation_mesg = """\nTo use Kilosort3 run:\n
>>> git clone https://github.com/MouseLand/Kilosort
and provide the installation path by setting the KILOSORT3_PATH
environment variables or using Kilosort3Sorter.set_kilosort3_path().\n\n
More information on Kilosort3 at:
https://github.com/MouseLand/Kilosort
"""
def __init__(self, **kargs):
BaseSorter.__init__(self, **kargs)
@classmethod
def is_installed(cls):
return check_if_installed(cls.kilosort3_path)
@staticmethod
def get_sorter_version():
commit = get_git_commit(os.getenv('KILOSORT3_PATH', None))
if commit is None:
return 'unknown'
else:
return 'git-' + commit
@staticmethod
def set_kilosort3_path(kilosort3_path: str):
kilosort3_path = str(Path(kilosort3_path).absolute())
Kilosort3Sorter.kilosort3_path = kilosort3_path
try:
print("Setting KILOSORT3_PATH environment variable for subprocess calls to:", kilosort3_path)
os.environ["KILOSORT3_PATH"] = kilosort3_path
except Exception as e:
print("Could not set KILOSORT3_PATH environment variable:", e)
def _setup_recording(self, recording, output_folder):
source_dir = Path(Path(__file__).parent)
p = self.params
if not self.is_installed():
raise Exception(Kilosort3Sorter.installation_mesg)
# prepare electrode positions for this group (only one group, the split is done in basesorter)
groups = [1] * recording.get_num_channels()
positions = np.array(recording.get_channel_locations())
if positions.shape[1] != 2:
raise RuntimeError("3D 'location' are not supported. Set 2D locations instead")
# save binary file
input_file_path = output_folder / 'recording.dat'
recording.write_to_binary_dat_format(input_file_path, dtype='int16', chunk_mb=p["chunk_mb"],
verbose=self.verbose)
if p['car']:
use_car = 1
else:
use_car = 0
# read the template txt files
with (source_dir / 'kilosort3_master.m').open('r') as f:
kilosort3_master_txt = f.read()
with (source_dir / 'kilosort3_config.m').open('r') as f:
kilosort3_config_txt = f.read()
with (source_dir / 'kilosort3_channelmap.m').open('r') as f:
kilosort3_channelmap_txt = f.read()
# make substitutions in txt files
kilosort3_master_txt = kilosort3_master_txt.format(
kilosort3_path=str(
Path(Kilosort3Sorter.kilosort3_path).absolute()),
output_folder=str(output_folder),
channel_path=str(
(output_folder / 'kilosort3_channelmap.m').absolute()),
config_path=str((output_folder / 'kilosort3_config.m').absolute()),
)
if p['NT'] is None:
p['NT'] = 64 * 1024 + p['ntbuff']
else:
p['NT'] = p['NT'] // 32 * 32 # make sure is multiple of 32
kilosort3_config_txt = kilosort3_config_txt.format(
nchan=recording.get_num_channels(),
sample_rate=recording.get_sampling_frequency(),
dat_file=str((output_folder / 'recording.dat').absolute()),
nblocks=p['nblocks'],
sig=p['sig'],
projection_threshold=p['projection_threshold'],
preclust_threshold=p['preclust_threshold'],
minfr_goodchannels=p['minfr_goodchannels'],
minFR=p['minFR'],
freq_min=p['freq_min'],
sigmaMask=p['sigmaMask'],
detect_threshold=p['detect_threshold'],
use_car=use_car,
nPCs=int(p['nPCs']),
ntbuff=int(p['ntbuff']),
nfilt_factor=int(p['nfilt_factor']),
NT=int(p['NT'])
)
kilosort3_channelmap_txt = kilosort3_channelmap_txt.format(
nchan=recording.get_num_channels(),
sample_rate=recording.get_sampling_frequency(),
xcoords=[p[0] for p in positions],
ycoords=[p[1] for p in positions],
kcoords=groups
)
for fname, txt in zip(['kilosort3_master.m', 'kilosort3_config.m',
'kilosort3_channelmap.m'],
[kilosort3_master_txt, kilosort3_config_txt,
kilosort3_channelmap_txt]):
with (output_folder / fname).open('w') as f:
f.write(txt)
shutil.copy(str(source_dir.parent / 'utils' / 'writeNPY.m'), str(output_folder))
shutil.copy(str(source_dir.parent / 'utils' / 'constructNPYheader.m'), str(output_folder))
def _run(self, recording, output_folder):
recording = recover_recording(recording)
if 'win' in sys.platform and sys.platform != 'darwin':
shell_cmd = '''
{disk_move}
cd {tmpdir}
matlab -nosplash -wait -log -r kilosort3_master
'''.format(disk_move=str(output_folder)[:2], tmpdir=output_folder)
else:
shell_cmd = '''
#!/bin/bash
cd "{tmpdir}"
matlab -nosplash -nodisplay -log -r kilosort3_master
'''.format(tmpdir=output_folder)
shell_script = ShellScript(shell_cmd, script_path=output_folder / f'run_{self.sorter_name}',
log_path=output_folder / f'{self.sorter_name}.log', verbose=self.verbose)
shell_script.start()
retcode = shell_script.wait()
if retcode != 0:
raise Exception('kilosort3 returned a non-zero exit code')
@staticmethod
def get_result_from_folder(output_folder):
output_folder = Path(output_folder)
with (output_folder / 'spikeinterface_params.json').open('r') as f:
sorter_params = json.load(f)['sorter_params']
sorting = se.KiloSortSortingExtractor(folder_path=output_folder, keep_good_only=sorter_params['keep_good_only'])
return sorting
| 41.858369 | 120 | 0.629447 |
eeac684bc07f8291ebd5d024f743a86cd1753601 | 8,285 | py | Python | 02_classification/final_train.py | johannesschweig/master-thesis | 3b31ca0ede030ca75e63b337dc81517a80b88d8a | [
"MIT"
] | null | null | null | 02_classification/final_train.py | johannesschweig/master-thesis | 3b31ca0ede030ca75e63b337dc81517a80b88d8a | [
"MIT"
] | null | null | null | 02_classification/final_train.py | johannesschweig/master-thesis | 3b31ca0ede030ca75e63b337dc81517a80b88d8a | [
"MIT"
] | null | null | null | import numpy as np
import pickle
import time
import warnings
from sklearn import metrics, neighbors
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC, LinearSVC
# filter fscore warnings
warnings.filterwarnings("ignore", message="F-score is ill-defined and being set to 0.0 in labels with no predicted samples.")
# classifier to train
classifier = "logr"
# Open file for reading
train = np.genfromtxt("in/train.txt", delimiter=";", skip_header=1)
test = np.genfromtxt("in/test.txt", delimiter=";", skip_header=1)
# split into train and test
n, col = train.shape
col = col - 1
X_train = train[:,0:col]
y_train = train[:,col]
X_test = test[:,0:col]
y_test = test[:,col]
# scale data
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Print start time
start = time.time()
print("Start", classifier, time.strftime("%d.%m.%y %H:%M:%S"))
# train model
if(classifier=="knn"):
k = 4
mdl = neighbors.KNeighborsClassifier(k, weights="uniform")
elif(classifier=="svm"):
c = 32768
g = 2
mdl = SVC(C=c, gamma=g, kernel="rbf", cache_size=1000)
elif(classifier=="lsvm"):
c = 8
mdl = LinearSVC(C=c)
elif(classifier=="mlp"):
h = 64
mdl = MLPClassifier(solver="adam", hidden_layer_sizes=(h,), random_state=123)
elif(classifier=="logr"):
c = 32
mdl = LogisticRegression(C=c)
mdl.fit(X_train, y_train)
# predict labels in training and test
y_train_pred = mdl.predict(X_train)
y_test_pred = mdl.predict(X_test)
# save model
if(classifier=="knn"):
with open("out/knn_k"+str(k)+".pkl", 'wb') as f:
pickle.dump(mdl, f)
print("Model saved as out/knn_k"+str(k)+".pkl")
elif(classifier=="svm"):
with open("out/svm_c"+str(c)+"_g"+str(g)+".pkl", 'wb') as f:
pickle.dump(mdl, f)
print("Model saved as out/svm_c"+str(c)+"_g"+str(g)+".pkl")
elif(classifier=="lsvm"):
with open("out/lsvm_c"+str(c)+".pkl", 'wb') as f:
pickle.dump(mdl, f)
print("Model saved as out/lsvm_c"+str(c)+".pkl")
elif(classifier=="mlp"):
with open("out/mlp_h"+str(h)+".pkl", 'wb') as f:
pickle.dump(mdl, f)
print("Model saved as out/mlp_h"+str(h)+".pkl")
elif(classifier=="logr"):
with open("out/logr_c"+str(c)+".pkl", 'wb') as f:
pickle.dump(mdl, f)
print("Model saved as out/logr_c"+str(c)+".pkl")
# print classification reports
print("TRAIN")
print(classification_report(y_train, y_train_pred))
print("TEST")
print(classification_report(y_test, y_test_pred))
# print done message
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
print("Done", classifier, time.strftime("%d.%m.%y %H:%M:%S"), "duration: ", "%d:%02d:%02d" % (h, m, s), "train-samples: ", n)
# KNN, 7s, 1core, 300k samples
# TRAIN
# precision recall f1-score support
# -4.0 1.00 1.00 1.00 22198
# -3.0 0.99 1.00 0.99 21641
# -1.0 0.97 0.98 0.98 20701
# 0.0 1.00 0.99 1.00 181839
# 1.0 1.00 1.00 1.00 23957
# 2.0 1.00 1.00 1.00 23405
# 5.0 1.00 1.00 1.00 16002
# avg / total 1.00 1.00 1.00 309743
# TEST
# precision recall f1-score support
# -4.0 1.00 1.00 1.00 5674
# -3.0 0.99 0.99 0.99 5282
# -1.0 0.95 0.97 0.96 5182
# 0.0 1.00 0.99 0.99 45397
# 1.0 1.00 1.00 1.00 5893
# 2.0 1.00 1.00 1.00 6002
# 5.0 1.00 1.00 1.00 4006
# avg / total 0.99 0.99 0.99 77436
# SVM, 5min, 1core, 300k samples
# TRAIN
# precision recall f1-score support
# -4.0 1.00 1.00 1.00 22198
# -3.0 0.99 0.99 0.99 21641
# -1.0 0.98 0.98 0.98 20701
# 0.0 1.00 1.00 1.00 181839
# 1.0 1.00 1.00 1.00 23957
# 2.0 1.00 1.00 1.00 23405
# 5.0 1.00 1.00 1.00 16002
# avg / total 1.00 1.00 1.00 309743
# TEST
# precision recall f1-score support
# -4.0 1.00 1.00 1.00 5674
# -3.0 0.99 1.00 0.99 5282
# -1.0 0.98 0.98 0.98 5182
# 0.0 1.00 1.00 1.00 45397
# 1.0 1.00 1.00 1.00 5893
# 2.0 1.00 1.00 1.00 6002
# 5.0 1.00 1.00 1.00 4006
# avg / total 1.00 1.00 1.00 77436
#LSVM, 7min, 1core, 300k samples
# TRAIN
# precision recall f1-score support
# -4.0 0.00 0.00 0.00 22198
# -3.0 0.00 0.00 0.00 21641
# -1.0 0.00 0.00 0.00 20701
# 0.0 0.63 1.00 0.77 181839
# 1.0 0.00 0.00 0.00 23957
# 2.0 0.00 0.00 0.00 23405
# 5.0 0.45 0.62 0.53 16002
# avg / total 0.39 0.62 0.48 309743
# TEST
# precision recall f1-score support
# -4.0 0.00 0.00 0.00 5674
# -3.0 0.00 0.00 0.00 5282
# -1.0 0.00 0.00 0.00 5182
# 0.0 0.63 1.00 0.77 45397
# 1.0 0.00 0.00 0.00 5893
# 2.0 0.00 0.00 0.00 6002
# 5.0 0.45 0.64 0.53 4006
# avg / total 0.39 0.62 0.48 77436
# LOGR, 5s, 1core, 300k samples
# TRAIN
# precision recall f1-score support
# -4.0 0.90 0.11 0.19 22198
# -3.0 0.00 0.00 0.00 21641
# -1.0 0.00 0.00 0.00 20701
# 0.0 0.63 1.00 0.77 181839
# 1.0 0.00 0.00 0.00 23957
# 2.0 0.00 0.00 0.00 23405
# 5.0 0.53 0.52 0.52 16002
# avg / total 0.46 0.62 0.49 309743
# TEST
# precision recall f1-score support
# -4.0 0.91 0.10 0.19 5674
# -3.0 0.00 0.00 0.00 5282
# -1.0 0.00 0.00 0.00 5182
# 0.0 0.63 1.00 0.77 45397
# 1.0 0.00 0.00 0.00 5893
# 2.0 0.00 0.00 0.00 6002
# 5.0 0.52 0.53 0.52 4006
# avg / total 0.46 0.62 0.49 77436
# MLP, 2min, 1core, 300k samples
# TRAIN
# precision recall f1-score support
# -4.0 1.00 0.99 0.99 22198
# -3.0 0.97 0.97 0.97 21641
# -1.0 0.96 0.90 0.93 20701
# 0.0 0.99 1.00 0.99 181839
# 1.0 0.99 1.00 1.00 23957
# 2.0 1.00 0.99 0.99 23405
# 5.0 0.99 1.00 0.99 16002
# avg / total 0.99 0.99 0.99 309743
# TEST
# precision recall f1-score support
# -4.0 1.00 1.00 1.00 5674
# -3.0 0.98 0.97 0.97 5282
# -1.0 0.97 0.89 0.93 5182
# 0.0 0.99 1.00 0.99 45397
# 1.0 0.99 1.00 1.00 5893
# 2.0 1.00 0.99 1.00 6002
# 5.0 0.99 1.00 0.99 4006
# avg / total 0.99 0.99 0.99 77436
| 33.678862 | 126 | 0.449246 |
9598947538269f9bb9e3afc420d757e44b10273e | 1,528 | py | Python | peach-blog/app/__init__.py | lt94/peach-blog | 49a861bcfb8d7ab036153091c48718e165b3ec72 | [
"MIT"
] | 57 | 2018-12-04T08:49:00.000Z | 2021-01-15T14:41:48.000Z | peach-blog/app/__init__.py | lt94/peach-blog | 49a861bcfb8d7ab036153091c48718e165b3ec72 | [
"MIT"
] | 32 | 2018-12-04T08:55:11.000Z | 2021-01-08T12:03:55.000Z | peach-blog/app/__init__.py | lt94/peach-blog | 49a861bcfb8d7ab036153091c48718e165b3ec72 | [
"MIT"
] | 7 | 2018-12-04T09:09:56.000Z | 2020-06-04T17:27:39.000Z | from flask import Flask
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_misaka import Misaka
from flask_mail import Mail
from config import config
from utils import Tools
bootstrap = Bootstrap()
db = SQLAlchemy()
mail = Mail()
migrate = Migrate()
misaka = Misaka()
tools = Tools()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
login_manager.login_message = '请登录后访问该页面'
from .commands.hexo import Hexo
hexo = Hexo()
from admin import PeachAdmin
admin = PeachAdmin(name="Management", template_mode="bootstrap3")
def create_app(config_name):
app = Flask(__name__, static_folder='static', static_url_path='')
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
misaka.init_app(app)
db.init_app(app)
mail.init_app(app)
migrate.init_app(app, db)
hexo.init_app(app, db)
admin.init_app(app, db)
tools.init_app(app)
login_manager.init_app(app)
from .commands.hexo.cli import hexo_cli
app.cli.add_command(hexo_cli)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .api_v1_0 import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api/v1')
return app
| 25.466667 | 69 | 0.76178 |
97de03e9252237c5648ae74ad9456d0e7658d614 | 3,491 | py | Python | libcloud/compute/drivers/hpcloud.py | elastacloud/libcloud | f3792b2dca835c548bdbce0da2eb71bfc9463b72 | [
"Apache-2.0"
] | 1 | 2015-06-24T15:09:07.000Z | 2015-06-24T15:09:07.000Z | libcloud/compute/drivers/hpcloud.py | elastacloud/libcloud | f3792b2dca835c548bdbce0da2eb71bfc9463b72 | [
"Apache-2.0"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | libcloud/compute/drivers/hpcloud.py | elastacloud/libcloud | f3792b2dca835c548bdbce0da2eb71bfc9463b72 | [
"Apache-2.0"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | # contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
HP Public cloud driver which is esentially just a small wrapper around
OpenStack driver.
"""
from libcloud.compute.types import Provider, LibcloudError
from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection
from libcloud.compute.drivers.openstack import OpenStack_1_1_NodeDriver
__all__ = [
'HPCloudNodeDriver'
]
ENDPOINT_ARGS_MAP = {
'region-a.geo-1': {
'service_type': 'compute',
'name': 'Compute',
'region': 'region-a.geo-1'
},
'region-b.geo-1': {
'service_type': 'compute',
'name': 'Compute',
'region': 'region-b.geo-1'
},
}
AUTH_URL_TEMPLATE = 'https://%s.identity.hpcloudsvc.com:35357/v2.0/tokens'
class HPCloudConnection(OpenStack_1_1_Connection):
_auth_version = '2.0_password'
def __init__(self, *args, **kwargs):
self.region = kwargs.pop('region', None)
self.get_endpoint_args = kwargs.pop('get_endpoint_args', None)
super(HPCloudConnection, self).__init__(*args, **kwargs)
def get_endpoint(self):
if not self.get_endpoint_args:
raise LibcloudError(
'HPCloudConnection must have get_endpoint_args set')
if '2.0_password' in self._auth_version:
ep = self.service_catalog.get_endpoint(**self.get_endpoint_args)
else:
raise LibcloudError(
'Auth version "%s" not supported' % (self._auth_version))
public_url = ep.get('publicURL', None)
if not public_url:
raise LibcloudError('Could not find specified endpoint')
return public_url
class HPCloudNodeDriver(OpenStack_1_1_NodeDriver):
name = 'HP Public Cloud (Helion)'
website = 'http://www.hpcloud.com/'
connectionCls = HPCloudConnection
type = Provider.HPCLOUD
def __init__(self, key, secret, tenant_name, secure=True,
host=None, port=None, region='region-b.geo-1', **kwargs):
"""
Note: tenant_name argument is required for HP cloud.
"""
self.tenant_name = tenant_name
super(HPCloudNodeDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port,
region=region,
**kwargs)
def _ex_connection_class_kwargs(self):
endpoint_args = ENDPOINT_ARGS_MAP[self.region]
kwargs = self.openstack_connection_kwargs()
kwargs['region'] = self.region
kwargs['get_endpoint_args'] = endpoint_args
kwargs['ex_force_auth_url'] = AUTH_URL_TEMPLATE % (self.region)
kwargs['ex_tenant_name'] = self.tenant_name
return kwargs
| 34.91 | 76 | 0.648525 |
a4278bae2f768666979bca43e6d24d9b65426071 | 1,299 | py | Python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/verification_ip_flow_result.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2022-03-30T22:39:15.000Z | 2022-03-30T22:39:15.000Z | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/verification_ip_flow_result.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/verification_ip_flow_result.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VerificationIPFlowResult(Model):
"""Results of IP flow verification on the target resource.
:param access: Indicates whether the traffic is allowed or denied.
Possible values include: 'Allow', 'Deny'
:type access: str or ~azure.mgmt.network.v2017_08_01.models.Access
:param rule_name: Name of the rule. If input is not matched against any
security rule, it is not displayed.
:type rule_name: str
"""
_attribute_map = {
'access': {'key': 'access', 'type': 'str'},
'rule_name': {'key': 'ruleName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VerificationIPFlowResult, self).__init__(**kwargs)
self.access = kwargs.get('access', None)
self.rule_name = kwargs.get('rule_name', None)
| 37.114286 | 76 | 0.61047 |
3660a2466dd9b78c816ce4d2356edafb2b692198 | 37 | py | Python | sys_add_path/toy_project_python3/src/folder_0/file_0.py | tedhuang96/python_tricks | 1579176144b8ca32bf2eb5ba0c5291237d8b8935 | [
"MIT"
] | null | null | null | sys_add_path/toy_project_python3/src/folder_0/file_0.py | tedhuang96/python_tricks | 1579176144b8ca32bf2eb5ba0c5291237d8b8935 | [
"MIT"
] | null | null | null | sys_add_path/toy_project_python3/src/folder_0/file_0.py | tedhuang96/python_tricks | 1579176144b8ca32bf2eb5ba0c5291237d8b8935 | [
"MIT"
] | null | null | null | def func_sum(a, b):
return a+b
| 7.4 | 19 | 0.567568 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.