hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2a07b7ade1a2a41f2a562fa8fa5cc67a39804a53
| 152
|
py
|
Python
|
api/urls.py
|
RahulML2505/My-Django-App
|
57c93b1cbfd95e298d33bb2b2b632ee65533113d
|
[
"MIT"
] | null | null | null |
api/urls.py
|
RahulML2505/My-Django-App
|
57c93b1cbfd95e298d33bb2b2b632ee65533113d
|
[
"MIT"
] | null | null | null |
api/urls.py
|
RahulML2505/My-Django-App
|
57c93b1cbfd95e298d33bb2b2b632ee65533113d
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('member', views.memberApi),
path('member/<int:id>', views.memberApi),
]
| 16.888889
| 45
| 0.677632
|
4c348de52059a9ce5cbd5b68460bc8b5ef706dcd
| 5,573
|
py
|
Python
|
checkers/board.py
|
danielkaichis/CheckersMinimax
|
0d54d745c0f5cb57131bf8774312d5bcd584134b
|
[
"MIT"
] | null | null | null |
checkers/board.py
|
danielkaichis/CheckersMinimax
|
0d54d745c0f5cb57131bf8774312d5bcd584134b
|
[
"MIT"
] | null | null | null |
checkers/board.py
|
danielkaichis/CheckersMinimax
|
0d54d745c0f5cb57131bf8774312d5bcd584134b
|
[
"MIT"
] | null | null | null |
import pygame
from .constants import BLACK, ROWS, RED, SQUARE_SIZE, COLS, WHITE, RED_STRING, WHITE_STRING
from .piece import Piece
class Board:
def __init__(self):
self.board = []
self.red_left = self.white_left = 12
self.red_kings = self.white_kings = 0
self.create_board()
def draw_squares(self, win):
win.fill(BLACK)
for row in range(ROWS):
for col in range(row % 2, ROWS, 2):
pygame.draw.rect(win, RED, (row * SQUARE_SIZE, col * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))
def move(self, piece, row, col):
self.board[piece.row][piece.col], self.board[row][col] = self.board[row][col], self.board[piece.row][piece.col]
piece.move(row, col)
if row == ROWS - 1 or row == 0:
piece.make_king()
if piece.colour == WHITE:
self.white_kings += 1
else:
self.red_kings += 1
def get_piece(self, row, col):
return self.board[row][col]
def create_board(self):
for row in range(ROWS):
self.board.append([])
for col in range(COLS):
if col % 2 == ((row + 1) % 2):
if row <= 2:
self.board[row].append(Piece(row, col, WHITE))
elif row >= 5:
self.board[row].append(Piece(row, col, RED))
else:
self.board[row].append(0)
else:
self.board[row].append(0)
def draw(self, win):
self.draw_squares(win)
for row in range(ROWS):
for col in range(COLS):
piece = self.board[row][col]
if piece:
piece.draw(win)
def get_valid_moves(self, piece):
moves = {}
left = piece.col - 1
right = piece.col + 1
row = piece.row
if piece.colour == RED or piece.king:
moves.update(self._check_left(row - 1, max(row - 3, -1), -1, piece.colour, left))
moves.update(self._check_right(row - 1, max(row - 3, -1), -1, piece.colour, right))
if piece.colour == WHITE or piece.king:
moves.update(self._check_left(row + 1, min(row + 3, ROWS), 1, piece.colour, left))
moves.update(self._check_right(row + 1, min(row + 3, ROWS), 1, piece.colour, right))
return moves
def _check_left(self, start, stop, interval, colour, left, skipped=[]):
moves = {}
last = []
for r in range(start, stop, interval):
if left < 0:
break
current = self.board[r][left]
if current == 0:
if skipped and not last:
break
elif skipped:
moves[(r, left)] = last + skipped
else:
moves[(r, left)] = last
if last:
if interval == -1:
row = max(r - 3, 0)
else:
row = min(r + 3, ROWS)
moves.update(self._check_left(r + interval, row, interval, colour, left - 1, skipped=last))
moves.update(self._check_right(r + interval, row, interval, colour, left + 1, skipped=last))
break
elif current.colour == colour:
break
else:
last = [current]
left -= 1
return moves
def _check_right(self, start, stop, interval, colour, right, skipped=[]):
moves = {}
last = []
for r in range(start, stop, interval):
if right >= COLS:
break
current = self.board[r][right]
if current == 0:
if skipped and not last:
break
elif skipped:
moves[(r, right)] = last + skipped
else:
moves[(r, right)] = last
if last:
if interval == -1:
row = max(r - 3, 0)
else:
row = min(r + 3, ROWS)
moves.update(self._check_left(r + interval, row, interval, colour, right - 1, skipped=last))
moves.update(self._check_right(r + interval, row, interval, colour, right + 1, skipped=last))
break
elif current.colour == colour:
break
else:
last = [current]
right += 1
return moves
def remove(self, pieces):
for piece in pieces:
self.board[piece.row][piece.col] = 0
if piece != 0:
if piece.colour == RED:
self.red_left -= 1
else:
self.white_left -= 1
def winner(self):
if self.red_left <= 0:
return WHITE_STRING
elif self.white_left <= 0:
return RED_STRING
return None
def evaluate(self):
return self.white_left - self.red_left + (self.white_kings * 0.5 - self.red_kings * 0.5)
def get_all_pieces(self, colour):
pieces = []
for row in self.board:
for piece in row:
if piece != 0 and piece.colour == colour:
pieces.append(piece)
return pieces
| 32.782353
| 119
| 0.464382
|
31f649fd8cb99f624d351cb262af0b737b97ada7
| 1,369
|
py
|
Python
|
setup.py
|
garanews/jbxapi
|
195d4e44762c081214fdb691ba61310dbc962dc8
|
[
"MIT"
] | null | null | null |
setup.py
|
garanews/jbxapi
|
195d4e44762c081214fdb691ba61310dbc962dc8
|
[
"MIT"
] | null | null | null |
setup.py
|
garanews/jbxapi
|
195d4e44762c081214fdb691ba61310dbc962dc8
|
[
"MIT"
] | null | null | null |
import re
import os
from setuptools import setup
def get_version():
""" Extract the version number from the code. """
here = os.path.abspath(os.path.dirname(__file__))
jbxapi_file = os.path.join(here, "jbxapi.py")
with open(jbxapi_file) as f:
content = f.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M)
if not match:
raise RuntimeError("Unable to find version string.")
return match.group(1)
setup(name='jbxapi',
version=get_version(),
description='API for Joe Sandbox',
url='https://github.com/joesecurity/joesandboxcloudapi',
author='Joe Security LLC',
license='MIT',
py_modules=['jbxapi'],
install_requires=[
'requests>=2.18.4,<3',
],
entry_points={
'console_scripts': [
'jbxapi=jbxapi:main'
],
},
zip_safe=False,
keywords="security sandbox joe",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Security',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
])
| 26.843137
| 79
| 0.569028
|
3d011c180f31275b6fe5460be6f0124c43a052b2
| 1,168
|
py
|
Python
|
PortScanner/AdvScanner.py
|
saadhaxxan/Python-For-Ethical-Hacking
|
87ef18b2c2876bf1711442a5f00ddb7d2dacfd43
|
[
"MIT"
] | 26
|
2020-09-16T18:26:00.000Z
|
2022-02-09T15:18:34.000Z
|
PortScanner/AdvScanner.py
|
saadhaxxan/Python-For-Ethical-Hacking
|
87ef18b2c2876bf1711442a5f00ddb7d2dacfd43
|
[
"MIT"
] | null | null | null |
PortScanner/AdvScanner.py
|
saadhaxxan/Python-For-Ethical-Hacking
|
87ef18b2c2876bf1711442a5f00ddb7d2dacfd43
|
[
"MIT"
] | 3
|
2020-11-27T20:30:22.000Z
|
2022-02-16T05:57:16.000Z
|
#!/usr/bin/python
from socket import *
import socket
from termcolor import colored
from threading import *
print(colored("[*] Enter Host IP Address or Website Name:","green"))
host = input()
print(colored("[*] Enter number of ports to scan:","green"))
num = int(input())
def PScanner(port):
# AF_INT means we want to connect to IPv4 and IPv6 Addresses
# SOCK_STREAM means we want to connect using the TCP protocol not the UDP
try:
soc = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
soc.connect((host,port))
print(colored("[+] %d/tcp is Open" %(port),"blue"))
except:
print(colored("[!!] %d/tcp is Closed" %(port),"red"))
finally:
soc.close()
def ResolveScan(tHost,tPorts):
try:
targetIP = gethostbyname(tHost)
except:
print(colored("Unknown Host"),"red")
try:
targetname = gethostbyaddr(targetIP)
print(colored("[+] Scan results for: "+ targetname[0],"blue"))
except:
print(colored("[+] Scan Results for: "+ targetIP,"blue"))
setdefaulttimeout(1)
for port in range(1,num):
PScanner(port)
ResolveScan(host,num)
| 25.955556
| 77
| 0.629281
|
751619e1b09b688ac2db2dcb55dbbd87c919594a
| 6,165
|
py
|
Python
|
utils/interactive_plotting.py
|
TUM-LMF/ijgi18
|
f6e5aed5c59af084a91428fdd285a17fcf6344f4
|
[
"MIT"
] | 68
|
2018-03-23T01:32:39.000Z
|
2021-07-29T14:00:02.000Z
|
utils/interactive_plotting.py
|
natumeyuzuru/MTLCC
|
8abe62dfc759e2e8034ee372bf22dce510e15a59
|
[
"MIT"
] | 2
|
2020-03-02T16:19:45.000Z
|
2020-05-21T02:08:29.000Z
|
utils/interactive_plotting.py
|
natumeyuzuru/MTLCC
|
8abe62dfc759e2e8034ee372bf22dce510e15a59
|
[
"MIT"
] | 29
|
2018-07-26T07:31:53.000Z
|
2021-08-21T17:41:02.000Z
|
from __future__ import print_function
import matplotlib.pyplot as plt
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
import numpy as np
def show_rgb(x, name=""):
if len(x.shape)==5: # BTHWD
max_b, max_t,_,_,max_d = x.shape
elif len(x.shape)==4: # BHWD
max_b,_,_,max_d = x.shape
def norm(band):
return (band - band.min()) / (band - band.min()).max()
def _show_map_BTHWD(t,d,b):
plt.title("{name} RGB map {rd}-{gn}-{bl}, b={b}, t={t}".format(name=name,b=b,t=t,rd=d-1,gn=d,bl=d+1))
plt.imshow(np.stack((norm(x[b,t,:,:,d-1]),norm(x[b,t,:,:,d]),norm(x[b,t,:,:,d+1])),axis=-1))
def _show_map_BHWD(d,b):
plt.title("{name} RGB map {rd}-{gn}-{bl}, b={b}".format(name=name,b=b,rd=d-1,gn=d,bl=d+1))
plt.imshow(np.stack((norm(x[b,:,:,d-1]),norm(x[b,:,:,d]),norm(x[b,:,:,d+1])),axis=-1))
# both
b_slider = widgets.IntSlider(description='batch',min=0,max=max_b-1,step=1,value=max_b/2)
d_slider = widgets.IntSlider(description='band',min=1,max=max_d-2,step=1,value=max_d/2)
if len(x.shape)==5: # BTHWD
t_slider = widgets.IntSlider(description='time',min=0,max=max_t-1,step=1,value=max_t/2)
w = interactive(_show_map_BTHWD, t=t_slider, d=d_slider, b=b_slider)
elif len(x.shape)==4: # BHWD
w = interactive(_show_map_BHWD, d=d_slider, b=b_slider)
w.layout.height = '400px'
display(w)
def show_gray(x, name="",vmin=None, vmax=None):
if len(x.shape)==5: # BTHWD
max_b, max_t,_,_,max_d = x.shape
elif len(x.shape)==4: # BHWD
max_b,_,_,max_d = x.shape
elif len(x.shape)==3: # BHW
max_b,_,_ = x.shape
def _show(x,title):
plt.title(title)
plt.imshow(x,vmax=vmax, vmin=vmin);
plt.colorbar()
def _show_map_BTHWD(t,d,b):
_show(x[b,t,:,:,d],"{name} feature map b={b}, t={t}, d={d}".format(name=name,b=b,t=t,d=d))
def _show_map_BHWD(d,b):
_show(x[b,:,:,d],"{name} feature map b={b}, d={d}".format(name=name,b=b,d=d))
def _show_map_BHW(b):
_show(x[b,:,:],"{name} feature map b={b}".format(name=name,b=b))
# all
b_slider = widgets.IntSlider(description='batch',min=0,max=max_b-1,step=1,value=max_b/2)
if len(x.shape)==5: # BTHWD
d_slider = widgets.IntSlider(description='band',min=0,max=max_d-1,step=1,value=max_d/2)
t_slider = widgets.IntSlider(description='time',min=0,max=max_t-1,step=1,value=max_t/2)
w = interactive(_show_map_BTHWD, t=t_slider, d=d_slider, b=b_slider)
elif len(x.shape)==4: # BHWD
d_slider = widgets.IntSlider(description='band',min=0,max=max_d-1,step=1,value=max_d/2)
w = interactive(_show_map_BHWD, d=d_slider, b=b_slider)
elif len(x.shape)==3: # BHW
w = interactive(_show_map_BHW, b=b_slider)
w.layout.height = '400px'
display(w)
def show(x,name="",mode="RGB"):
if mode=="RGB":
show_rgb(x,name)
elif mode=="gray":
show_gray(x,name)
def norm_ptp(arr):
return (arr-arr.min()) / (arr-arr.min()).max()
def norm_std(arr,stddev=1):
arr -= arr.mean(axis=0).mean(axis=0)
arr /= stddev*arr.std(axis=0).std(axis=0) # [-1,1]
arr = (arr/2) + 0.5 # [0,1]
arr = np.clip(arr*255,0,255) # [0,255]
return arr.astype("uint8")
def norm_rgb(arr):
# taken from QGIS mean +- 2 stddev over cloudfree image
vmin = np.array([-0.0433,-0.0054,-0.0237])
vmax = np.array([0.1756,0.1483,0.1057])
arr-=vmin
arr/=(vmax-vmin)
return np.clip((arr*255),0,255).astype("uint8")
def write(arr,outfile):
#norm_img = norm(arr)
img = Image.fromarray(arr)
img.save(outfile)
def dump3(array,name,outfolder,cmap="inferno",norm=norm_ptp):
filenpath="{outfolder}/sample{s}/{name}/{d}.png"
cmap = plt.get_cmap(cmap)
# normalize over the entire array
#array = norm(array)
samples,h,w,depth = array.shape
for s in range(samples):
for d in range(depth):
outfilepath = filenpath.format(outfolder=outfolder,s=s,name=name,d=d)
if not os.path.exists(os.path.dirname(outfilepath)):
os.makedirs(os.path.dirname(outfilepath))
arr = array[s,:,:,d]
arr = cmap(arr)
write((arr*255).astype('uint8'),outfilepath)
def dump(array,name,outfolder,cmap="inferno",norm=norm_ptp):
filenpath="{outfolder}/sample{s}/time{t}/{d}_{name}.png"
cmap = plt.get_cmap(cmap)
# normalize over the entire array
#array = norm(array)
samples,times,h,w,depth = array.shape
for s in range(samples):
for t in range(times):
for d in range(depth):
outfilepath = filenpath.format(outfolder=outfolder,s=s,t=t,name=name,d=d)
if not os.path.exists(os.path.dirname(outfilepath)):
os.makedirs(os.path.dirname(outfilepath))
arr = array[s,t,:,:,d]
arr = cmap(arr)
write((arr*255).astype('uint8'),outfilepath)
def dump_rgb(array,name,outfolder,stddev):
filenpath="{outfolder}/sample{s}/time{t}_{name}.png"
samples,times,h,w,depth = array.shape
for s in range(samples):
for t in range(times):
outfilepath = filenpath.format(outfolder=outfolder,s=s,t=t,name=name)
if not os.path.exists(os.path.dirname(outfilepath)):
os.makedirs(os.path.dirname(outfilepath))
arr = array[s,t,:,:,0:3]
arr = norm_std(arr,stddev=stddev)
write(arr,outfilepath)
def dump_class(array,name,outfolder,cmap="Accent"):
filenpath="{outfolder}/sample{s}/{name}.png"
samples,h,w = array.shape
array = array.astype(float) / 26
cmap = plt.get_cmap(cmap)
for s in range(samples):
outfilepath = filenpath.format(outfolder=outfolder,s=s,name=name)
arr = (cmap(array[s])*255).astype("uint8")
write(arr,outfilepath)
| 32.792553
| 109
| 0.586699
|
514571375cedc9738249d0632f9eda3d051b2ffb
| 11,662
|
py
|
Python
|
docs/conf.py
|
soheil191/translate.py
|
b136ec92dfe225aba06d96b7009318e3707ee465
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
soheil191/translate.py
|
b136ec92dfe225aba06d96b7009318e3707ee465
|
[
"Apache-2.0"
] | 1
|
2021-02-24T06:42:22.000Z
|
2021-02-24T06:42:22.000Z
|
docs/conf.py
|
isabella232/python-translate
|
6fb2effa6903cae5584f51a74d1399f12697db1f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# google-cloud-translate documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-translate"
copyright = u"2019, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-translate",
"github_user": "googleapis",
"github_repo": "python-translate",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-translate-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-translate.tex",
u"google-cloud-translate Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-translate",
u"google-cloud-translate Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-translate",
u"google-cloud-translate Documentation",
author,
"google-cloud-translate",
"google-cloud-translate Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.io/grpc/python/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| 31.863388
| 88
| 0.703653
|
492d50a1df2a3d6ba5d0fba4fae6bd58552cb06e
| 1,815
|
py
|
Python
|
src/main.py
|
lukas2511/bbb-streaming
|
3ce86576e1921236d329a8002d7aedfa9528f36d
|
[
"MIT"
] | 60
|
2021-03-06T10:50:27.000Z
|
2022-03-19T06:26:52.000Z
|
src/main.py
|
aguerson/bbb-streaming
|
3ce86576e1921236d329a8002d7aedfa9528f36d
|
[
"MIT"
] | 8
|
2021-03-11T13:09:16.000Z
|
2021-08-05T07:49:23.000Z
|
src/main.py
|
aguerson/bbb-streaming
|
3ce86576e1921236d329a8002d7aedfa9528f36d
|
[
"MIT"
] | 15
|
2021-03-11T03:17:40.000Z
|
2022-03-30T10:07:15.000Z
|
#!/usr/bin/env python3
import argparse
from lib import run
import logging
logging.basicConfig()
log = logging.getLogger('bbb-streamer')
def main():
argp = argparse.ArgumentParser(allow_abbrev=False)
argp.add_argument("--debug", help="Print debug log", action='store_true')
argp.add_argument("--background", help="Background image, either direct file path or via http/https URL")
jnurlgroup = argp.add_argument_group('URL', 'Join using fully prepared API join URL')
jnurlgroup.add_argument("--join-url", help="Fully prepared API join URL, e.g. https://bbb.example.org/bigbluebutton/api/join?...")
glgroup = argp.add_argument_group('Greenlight', 'Join using Greenlight Frontend')
glgroup.add_argument("--greenlight-url", help="Greenlight URL, e.g. https://bbb.example.org/gl/my-cool-room")
glgroup.add_argument("--greenlight-name", help="Name for stream user", default="stream")
glgroup.add_argument("--greenlight-password", help="Greenlight password for protected rooms")
argp.add_argument("--rtmp-url", help="Output RTMP URL, e.g. rtmp://example.org/app/stream?auth=key", required=True)
args = argp.parse_args()
if sum([0 if x is None else 1 for x in [args.join_url, args.greenlight_url]]) != 1:
argp.error("Exactly one of --join-url/--greenlight-url is required")
if args.debug:
log.setLevel(logging.DEBUG)
if args.join_url:
log.info("Joining using prepared API join URL")
join_url = args.join_url
elif args.greenlight_url:
log.info("Joining using Greenlight frontend")
join_url = run.greenlight_join(args.greenlight_url, args.greenlight_name, args.greenlight_password)
run.start(join_url=join_url, rtmp_url=args.rtmp_url, background=args.background)
if __name__ == '__main__':
main()
| 40.333333
| 134
| 0.71405
|
3deb5b0f57e4b46fa49ae20c52eed49acc76483a
| 3,108
|
py
|
Python
|
satyrus/sat/types/problem.py
|
lucasvg/Satyrus3-FinalProject-EspTopsOTM
|
024785752abdc46e3463d8c94df7c3da873c354d
|
[
"MIT"
] | null | null | null |
satyrus/sat/types/problem.py
|
lucasvg/Satyrus3-FinalProject-EspTopsOTM
|
024785752abdc46e3463d8c94df7c3da873c354d
|
[
"MIT"
] | null | null | null |
satyrus/sat/types/problem.py
|
lucasvg/Satyrus3-FinalProject-EspTopsOTM
|
024785752abdc46e3463d8c94df7c3da873c354d
|
[
"MIT"
] | null | null | null |
from collections import deque
## Local
from ...satlib import arange
from .expr import Expr
from .main import Var, Number
from .symbols import CONS_INT, CONS_OPT
from .symbols.tokens import T_FORALL, T_EXISTS, T_EXISTS_ONE, T_AND, T_OR
class Loop(object):
""" :: LOOP ::
==========
"""
def __init__(self, var: Var, loop_type: str, start: Number, stop: Number, step: Number, conds: list=None):
self.var = var
self.type = str(loop_type)
self.start = start
self.stop = stop
self.step = step
self.conds = conds
def cond_func(self, compiler):
"""
"""
if self.conds is None:
return True
conds = [compiler.eval_expr(cond, calc=True) for cond in self.conds]
return all([type(conds) is Number and (conds != Number('0')) for cond in conds])
def indices(self, compiler):
I = []
start = compiler.eval_expr(self.start, calc=True)
stop = compiler.eval_expr(self.stop, calc=True)
step = compiler.eval_expr(self.step, calc=True)
for i in arange(start, stop, step):
i = Number(i)
compiler.memset(self.var, i)
if self.cond_func(compiler):
I.append(i)
else:
continue
return I
class Constraint(object):
""" :: CONSTRAINT ::
================
"""
HEAD_TABLE = {
T_FORALL: T_AND,
T_EXISTS: T_OR,
T_EXISTS_ONE: None,
}
def __init__(self, name: Var, cons_type: Var, level: int):
"""
"""
self.name = str(name)
self.type = str(cons_type)
self.level = int(level)
self.loop_stack = deque([])
self.expr = None
def add_loop(self, var: Var, loop_type: Var, start: Number, stop: Number, step: Number, conds: list):
""" ADD_LOOP
========
"""
self.loop_stack.append(Loop(var, loop_type, start, stop, step, conds))
def set_expr(self, expr: Expr):
""" SET_EXPR
========
Sets the expr of this constraint in the C.N.F.
"""
self.expr = Expr.cnf(expr)
def get_expr(self, compiler):
""" GET_EXPR
========
"""
return self._get_expr(compiler)
def _get_expr(self, compiler):
"""
"""
if not self.loop_stack:
return self.expr
## Retrieves the outermost loop from the stack
loop = self.loop_stack.popleft()
## Expression
head = self.HEAD_TABLE[loop.type]
tail = []
## Push compiler memory scope
compiler.push()
for i in loop.indices(compiler):
compiler.memset(loop.var, i)
expr = compiler.eval_expr(self._get_expr(compiler))
tail.append(expr)
else:
self.loop_stack.appendleft(loop)
compiler.pop()
return Expr(head, *tail)
| 26.793103
| 111
| 0.517053
|
fc77c82f88c02f562c2c58c18b3a7e97843f7536
| 5,241
|
py
|
Python
|
AutomatedTesting/Gem/PythonTests/Prefab/TestSuite_Main.py
|
LB-KatarzynaDylska/o3de
|
d8d273697ea8e1beeb698f62b84904a192b0ab76
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
AutomatedTesting/Gem/PythonTests/Prefab/TestSuite_Main.py
|
LB-KatarzynaDylska/o3de
|
d8d273697ea8e1beeb698f62b84904a192b0ab76
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
AutomatedTesting/Gem/PythonTests/Prefab/TestSuite_Main.py
|
LB-KatarzynaDylska/o3de
|
d8d273697ea8e1beeb698f62b84904a192b0ab76
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# This suite consists of all test cases that are passing and have been verified.
import pytest
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../automatedtesting_shared')
from base import TestAutomationBase
@pytest.mark.SUITE_main
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
@pytest.mark.parametrize("project", ["AutomatedTesting"])
class TestAutomation(TestAutomationBase):
def _run_prefab_test(self, request, workspace, editor, test_module, batch_mode=True, autotest_mode=True):
self._run_test(request, workspace, editor, test_module,
batch_mode=batch_mode,
autotest_mode=autotest_mode)
def test_OpenLevel_ContainingTwoEntities(self, request, workspace, editor, launcher_platform):
from Prefab.tests.open_level import OpenLevel_ContainingTwoEntities as test_module
self._run_prefab_test(request, workspace, editor, test_module)
def test_CreatePrefab_WithSingleEntity(self, request, workspace, editor, launcher_platform):
from Prefab.tests.create_prefab import CreatePrefab_WithSingleEntity as test_module
self._run_prefab_test(request, workspace, editor, test_module)
def test_InstantiatePrefab_ContainingASingleEntity(self, request, workspace, editor, launcher_platform):
from Prefab.tests.instantiate_prefab import InstantiatePrefab_ContainingASingleEntity as test_module
self._run_prefab_test(request, workspace, editor, test_module)
def test_InstantiatePrefab_FromCreatedPrefabWithSingleEntity(self, request, workspace, editor, launcher_platform):
from Prefab.tests.instantiate_prefab import InstantiatePrefab_FromCreatedPrefabWithSingleEntity as test_module
self._run_prefab_test(request, workspace, editor, test_module)
def test_DeletePrefab_ContainingASingleEntity(self, request, workspace, editor, launcher_platform):
from Prefab.tests.delete_prefab import DeletePrefab_ContainingASingleEntity as test_module
self._run_prefab_test(request, workspace, editor, test_module)
def test_ReparentPrefab_UnderPrefabAndEntityHierarchies(self, request, workspace, editor, launcher_platform):
from Prefab.tests.reparent_prefab import ReparentPrefab_UnderPrefabAndEntityHierarchies as test_module
self._run_prefab_test(request, workspace, editor, test_module, autotest_mode=False)
def test_DetachPrefab_UnderAnotherPrefab(self, request, workspace, editor, launcher_platform):
from Prefab.tests.detach_prefab import DetachPrefab_UnderAnotherPrefab as test_module
self._run_prefab_test(request, workspace, editor, test_module, autotest_mode=False)
def test_DuplicatePrefab_ContainingASingleEntity(self, request, workspace, editor, launcher_platform):
from Prefab.tests.duplicate_prefab import DuplicatePrefab_ContainingASingleEntity as test_module
self._run_prefab_test(request, workspace, editor, test_module)
def test_CreatePrefab_UnderAnEntity(self, request, workspace, editor, launcher_platform):
from Prefab.tests.create_prefab import CreatePrefab_UnderAnEntity as test_module
self._run_prefab_test(request, workspace, editor, test_module, autotest_mode=False)
def test_CreatePrefab_UnderAnotherPrefab(self, request, workspace, editor, launcher_platform):
from Prefab.tests.create_prefab import CreatePrefab_UnderAnotherPrefab as test_module
self._run_prefab_test(request, workspace, editor, test_module, autotest_mode=False)
def test_CreatePrefab_UnderChildEntityOfAnotherPrefab(self, request, workspace, editor, launcher_platform):
from Prefab.tests.create_prefab import CreatePrefab_UnderChildEntityOfAnotherPrefab as test_module
self._run_prefab_test(request, workspace, editor, test_module, autotest_mode=False)
def test_CreatePrefab_WithNestedEntities(self, request, workspace, editor, launcher_platform):
from Prefab.tests.create_prefab import CreatePrefab_WithNestedEntities as test_module
self._run_prefab_test(request, workspace, editor, test_module, autotest_mode=False)
def test_CreatePrefab_WithNestedEntitiesAndNestedPrefabs(self, request, workspace, editor, launcher_platform):
from Prefab.tests.create_prefab import CreatePrefab_WithNestedEntitiesAndNestedPrefabs as test_module
self._run_prefab_test(request, workspace, editor, test_module, autotest_mode=False)
def test_DeleteEntity_UnderAnotherPrefab(self, request, workspace, editor, launcher_platform):
from Prefab.tests.delete_entity import DeleteEntity_UnderAnotherPrefab as test_module
self._run_prefab_test(request, workspace, editor, test_module, autotest_mode=False)
def test_DeleteEntity_UnderLevelPrefab(self, request, workspace, editor, launcher_platform):
from Prefab.tests.delete_entity import DeleteEntity_UnderLevelPrefab as test_module
self._run_prefab_test(request, workspace, editor, test_module, autotest_mode=False)
| 59.556818
| 118
| 0.803854
|
1aee38404db49305005ca712f5ae49f2550d4cf3
| 4,353
|
py
|
Python
|
example_problems/tutorial/eggs/services/get_tables.py
|
DottaPaperella/TALight
|
580322c3121c9acde9827f996fd4e39e31d93a6f
|
[
"MIT"
] | 4
|
2021-06-27T13:27:24.000Z
|
2022-03-24T10:46:28.000Z
|
example_problems/tutorial/eggs/services/get_tables.py
|
DottaPaperella/TALight
|
580322c3121c9acde9827f996fd4e39e31d93a6f
|
[
"MIT"
] | 1
|
2021-01-23T06:50:31.000Z
|
2021-03-17T15:35:18.000Z
|
example_problems/tutorial/eggs/services/get_tables.py
|
DottaPaperella/TALight
|
580322c3121c9acde9827f996fd4e39e31d93a6f
|
[
"MIT"
] | 5
|
2021-04-01T15:21:57.000Z
|
2022-01-29T15:07:38.000Z
|
#!/usr/bin/env python3
from sys import stderr, exit
def convert2number(s):
try:
risp = int(s)
return risp
except (TypeError, ValueError):
pass
try:
risp = float(s)
return risp
except (TypeError, ValueError):
return None
def get_one_numeric_table(sep=None, should_be_int=False, should_be_nat=False, row_names_start_from=0, col_names_start_from=0, checks=[]):
""" When sep=None, the fields are separated by sequences of white characters. When sep="," then a .csv format is assumed, but you can specify use other separator characters or string. A "#" starts a comment for the rest of the line.
Examples:
>[tk.strip() for tk in "wfwqf, wqfwqfq, wfwfq".split(None)]
returns ['wfwqf,', 'wqfwqfq,', 'wfwfq']
> [tk.strip() for tk in "wfwqf, wqfwqfq, wfwfq".split(",")]
returns ['wfwqf', 'wqfwqfq', 'wfwfq']
"""
print("#? waiting for a rectangular table of numbers (a matrix). Insert a closing line '#end' after the last row of the table. Any other line beggining with the '#' character is ignored. You can use the 'TA_send_txt_file.py' util here to send us the lines of a file. Just plug in the util at the 'rtal connect' command like you do with any other bot and let the util feed in the file.")
def get_line():
raw_line = input().strip()
if raw_line[0] != "#":
return [tk.strip() for tk in raw_line.split("#")[0].split(sep)], None
key = raw_line[1:].strip().split()[0].upper()
if key.upper() == "END" or key.upper() == "NEXT":
return None, key
return None, "GEN_COMMENT"
first_line, cmd = get_line()
while first_line == None:
first_line, cmd = get_line()
last_col = len(first_line) -1
table_submitted = [ list(map(convert2number, first_line)) ]
if any(_== None for _ in table_submitted[-1]):
print(f"# Error (in the table format): All entries in your table should be numbers. Just check row {len(table_submitted)-1+row_names_start_from} in your file for a first occurrence of a type mismatch.")
exit(1)
def one_by_one_check():
for col, val in zip(range(len(table_submitted[-1])), table_submitted[-1]):
if should_be_int or should_be_nat:
if type(val) != int:
print(f"# Error (in the table format): the entry ({len(table_submitted)-1+row_names_start_from},{col+col_names_start_from}) in your table should be an integer number. However, the value {val} is a non integer float with decimal part.")
exit(1)
if should_be_nat:
if val<0:
print(f"# Error (in the table format): the entry ({len(table_submitted)-1+row_names_start_from},{col+row_names_start_from}) in your table should be a natural (i.e., non-negative) number. However, you entered the {val}<0 for that entry.")
exit(1)
for check in checks:
check(row_index_name=len(table_submitted)-1+row_names_start_from, col_index_name=col+col_names_start_from, entry_val=val)
one_by_one_check()
next_line, cmd = get_line()
while cmd == None or cmd.upper() != "END":
if cmd != None and cmd.upper() == "NEXT":
print("# Warning: I have asked for one single table! I will assume this line was a comment and proceed reading and loading the previous table line by line.")
elif next_line != None:
if len(next_line) != last_col+1:
print(f"# Error (in the table format): The row {len(table_submitted)+row_names_start_from} (rows are counted starting from {row_names_start_from}) of your table contains {len(next_line)} elements whereas all previous rows contain {last_col+1} elements.")
exit(1)
table_submitted.append(list(map(convert2number, next_line)))
if any(_== None for _ in table_submitted[-1]):
print(f"# Error (in the table format): All entries in your table should be numbers. Just check row {len(table_submitted)-1+row_names_start_from} in your file for a first occurrence of a type mismatch.")
exit(1)
one_by_one_check()
next_line, cmd = get_line()
print("# FILE GOT")
return table_submitted
| 56.532468
| 390
| 0.637721
|
0d41fdc3abb34a6668696f810b09df3d1f133f81
| 2,931
|
py
|
Python
|
pynetworking/features/ats_vlan_config_interface_lexer.py
|
alliedtelesis/py-networking
|
6c5d4bdafabfb4feef235a02344432e1f0336e48
|
[
"Apache-2.0"
] | 4
|
2015-04-24T20:36:56.000Z
|
2021-05-03T20:21:54.000Z
|
pynetworking/features/ats_vlan_config_interface_lexer.py
|
alliedtelesis/py-networking
|
6c5d4bdafabfb4feef235a02344432e1f0336e48
|
[
"Apache-2.0"
] | 1
|
2019-07-14T07:07:21.000Z
|
2019-07-14T07:07:21.000Z
|
pynetworking/features/ats_vlan_config_interface_lexer.py
|
alliedtelesis/py-networking
|
6c5d4bdafabfb4feef235a02344432e1f0336e48
|
[
"Apache-2.0"
] | 3
|
2015-04-24T20:37:04.000Z
|
2017-03-02T15:14:46.000Z
|
# -*- coding: utf-8 -*-
import re
import ply.lex as lex
class VlanInterfaceConfigLexer(object):
states = (
('ifport', 'exclusive'),
('ifportrange', 'exclusive'),
)
tokens = (
'IF_PORT',
'IF_PORT_RANGE',
'IF_VLAN',
'switchport_mode',
'switchport_access',
'switchport_trunk_native',
'switchport_trunk_allowed',
'END',
)
def t_if_end(self, t):
r'!.*'
t.lexer.begin('INITIAL')
def t_INITIAL_IF_PORT_RANGE(self, t):
r'interface\s+range\s+ethernet\s+[^\n]+\n'
t.value = re.split('\s+', t.value, maxsplit=3)[3]
t.lexer.push_state('ifportrange')
t.lexer.id = t.value
def t_INITIAL_IF_PORT(self, t):
r'interface\s+ethernet\s+\d\/[eg]\d+\n'
t.value = re.split('\s+', t.value)[2]
t.lexer.push_state('ifport')
t.lexer.id = t.value
def t_ifport_ifportrange_switchport_mode(self, t):
r'switchport\s+mode\s+(access|trunk)'
v = re.split('\s+', t.value, maxsplit=2)
t.value = (t.lexer.id, v[2])
return t
def t_ifport_ifportrange_switchport_access(self, t):
r'switchport\s+access\s+vlan\s+\d+'
v = re.split('\s+', t.value)
t.value = (t.lexer.id, v[3])
return t
def t_ifport_ifportrange_switchport_trunk_native(self, t):
r'switchport\s+trunk\s+native\s+vlan\s+\d+'
v = re.split('\s+', t.value)
t.value = (t.lexer.id, v[4])
return t
def t_ifport_ifportrange_switchport_trunk_allowed(self, t):
r'switchport\s+trunk\s+allowed\s+vlan\s+add\s+\d+'
v = re.split('\s+', t.value)
t.value = (t.lexer.id, v[5])
return t
def t_ifport_ifportrange_end(self, t):
r'exit'
t.lexer.pop_state()
def t_ANY_newline(self, t):
r'\n+'
pass
t_ANY_ignore = ' \t'
def t_ifport_ifportrange_SKIP(self, t):
r'[a-z].*\n'
pass
def t_INITIAL_SKIP(self, t):
r'[a-z].*'
pass
def t_ANY_error(self, t): # pragma: no cover
print "Illegal character '%s'" % t.value[0]
t.lexer.skip(1)
def __init__(self):
self.lexer = lex.lex(object=self, debug=0)
def run(self, data):
self.lexer.input(data)
result = {}
for tok in self.lexer:
t = tok.type.replace('_', ' ')
if tok.value[0] in result.keys():
if t in result[tok.value[0]]:
if isinstance(result[tok.value[0]][t], unicode):
result[tok.value[0]][t] = [result[tok.value[0]][t], tok.value[1]]
else:
result[tok.value[0]][t].append(tok.value[1])
else:
result[tok.value[0]][t] = tok.value[1]
else:
result[tok.value[0]] = {t: tok.value[1]}
return result
| 28.182692
| 89
| 0.528489
|
db9d4fb4678ecbb690deb214fe46e62301b6d84a
| 890
|
py
|
Python
|
release/scripts/templates/driver_functions.py
|
wycivil08/blendocv
|
f6cce83e1f149fef39afa8043aade9c64378f33e
|
[
"Unlicense"
] | 30
|
2015-01-29T14:06:05.000Z
|
2022-01-10T07:47:29.000Z
|
release/scripts/templates/driver_functions.py
|
ttagu99/blendocv
|
f6cce83e1f149fef39afa8043aade9c64378f33e
|
[
"Unlicense"
] | 1
|
2017-02-20T20:57:48.000Z
|
2018-12-19T23:44:38.000Z
|
release/scripts/templates/driver_functions.py
|
ttagu99/blendocv
|
f6cce83e1f149fef39afa8043aade9c64378f33e
|
[
"Unlicense"
] | 15
|
2015-04-23T02:38:36.000Z
|
2021-03-01T20:09:39.000Z
|
# This script defines functions to be used directly in drivers expressions to
# extend the builtin set of python functions.
#
# This can be executed on manually or set to 'Register' to
# initialize thefunctions on file load.
# two sample functions
def invert(f):
""" Simple function call:
invert(val)
"""
return 1.0 - f
uuid_store = {}
def slow_value(value, fac, uuid):
""" Delay the value by a factor, use a unique string to allow
use in multiple drivers without conflict:
slow_value(val, 0.5, "my_value")
"""
value_prev = uuid_store.get(uuid, value)
uuid_store[uuid] = value_new = (value_prev * fac) + (value * (1.0 - fac))
return value_new
import bpy
# Add variable defined in this script into the drivers namespace.
bpy.app.driver_namespace["invert"] = invert
bpy.app.driver_namespace["slow_value"] = slow_value
| 25.428571
| 77
| 0.68427
|
b2ba4b07e83ee203d0b4282ba0772985cfb69142
| 28
|
py
|
Python
|
mbserializer/tests/__init__.py
|
gomafutofu/mbserializer
|
013f287520fa593d5f8162ce31097f9c1bf34622
|
[
"MIT"
] | 1
|
2015-09-08T05:56:23.000Z
|
2015-09-08T05:56:23.000Z
|
mbserializer/tests/__init__.py
|
gomafutofu/mbserializer
|
013f287520fa593d5f8162ce31097f9c1bf34622
|
[
"MIT"
] | null | null | null |
mbserializer/tests/__init__.py
|
gomafutofu/mbserializer
|
013f287520fa593d5f8162ce31097f9c1bf34622
|
[
"MIT"
] | null | null | null |
__author__ = 'Junki Ishida'
| 14
| 27
| 0.75
|
46dede09821db8d973ea630be006581c99400533
| 987
|
py
|
Python
|
sp_products/suggested_keywords.py
|
wufangjie/adapi
|
0015cfef1b85f2c64be828c3ce3122469763fa83
|
[
"MIT"
] | 5
|
2021-01-07T07:11:39.000Z
|
2021-10-30T09:57:01.000Z
|
sp_products/suggested_keywords.py
|
wufangjie/adapi
|
0015cfef1b85f2c64be828c3ce3122469763fa83
|
[
"MIT"
] | 1
|
2020-08-10T06:49:11.000Z
|
2020-08-10T06:49:57.000Z
|
sp_products/suggested_keywords.py
|
wufangjie/adapi
|
0015cfef1b85f2c64be828c3ce3122469763fa83
|
[
"MIT"
] | 4
|
2021-02-03T12:38:37.000Z
|
2021-10-30T09:57:08.000Z
|
from ..adapi import Client
class SuggestKeywords(Client):
def get_suggest_keywords_by_ad_group_id(self, ad_group_id):
self.method = "get"
self.uri_path = "/v2/sp/adGroups/{}/suggested/keywords".format(ad_group_id)
return self.execute()
def get_suggest_keywords_extended_by_ad_group_id(self, ad_group_id):
self.method = "get"
self.uri_path = "/v2/sp/adGroups/{}//suggested/keywords/extended".format(ad_group_id)
return self.execute()
def get_suggest_keywords_by_asin(self, asin):
self.method = "get"
self.uri_path = "/v2/sp/asin/{}/suggested/keywords".format(asin)
return self.execute()
def get_suggest_keywords_by_asins(self, asins, max_num_suggestions=None):
self.method = "get"
self.uri_path = "/v2/sp/asin/suggested/keywords"
self.data = {
"asins": asins,
"maxNumSuggestions": max_num_suggestions
}
return self.execute()
| 29.029412
| 93
| 0.655522
|
64cd3683b67a8bb67ad65d03b75b21e34000b7f6
| 5,238
|
py
|
Python
|
models.py
|
twuilliam/open-search
|
5f74e3de5552a185e5d13d706bb3a9322606e704
|
[
"MIT"
] | 10
|
2020-07-29T13:06:20.000Z
|
2022-03-29T14:50:28.000Z
|
models.py
|
twuilliam/open-search
|
5f74e3de5552a185e5d13d706bb3a9322606e704
|
[
"MIT"
] | null | null | null |
models.py
|
twuilliam/open-search
|
5f74e3de5552a185e5d13d706bb3a9322606e704
|
[
"MIT"
] | 4
|
2020-10-05T02:18:04.000Z
|
2022-03-29T07:26:30.000Z
|
import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models
from utils import cosine_similarity
class VGG16(nn.Module):
def __init__(self, pretrained=True):
super(VGG16, self).__init__()
model = models.vgg16(pretrained=pretrained)
self.features = model.features
layers = list(model.classifier.children())[:-1]
self.classifier = nn.Sequential(*layers)
def forward(self, x):
# from 224x224 to 4096
x = self.features(x)
x = self.classifier(x.view(x.size(0), -1))
return x
class VGG19(nn.Module):
def __init__(self, pretrained=True):
super(VGG19, self).__init__()
model = models.vgg19(pretrained=pretrained)
self.features = model.features
layers = list(model.classifier.children())[:-1]
self.classifier = nn.Sequential(*layers)
def forward(self, x):
# from 224x224 to 4096
x = self.features(x)
x = self.classifier(x.view(x.size(0), -1))
return x
class ResNet50(nn.Module):
def __init__(self, pretrained=True):
super(ResNet50, self).__init__()
model = models.resnet50(pretrained=pretrained)
layers = list(model.children())[:-1]
self.model = nn.Sequential(*layers)
def forward(self, x):
# from 224x224 to 2048
x = self.model(x)
return x.view(x.size(0), -1)
def logits(self, x):
return self.last_layer(x)
class SEResNet50(nn.Module):
def __init__(self, pretrained=True):
super(SEResNet50, self).__init__()
import pretrainedmodels
if pretrained:
model = pretrainedmodels.se_resnet50()
else:
model = pretrainedmodels.se_resnet50(pretrained=None)
layers = list(model.children())[:-1]
self.model = nn.Sequential(*layers)
def forward(self, x):
# from 224x224 to 2048
x = self.model(x)
return x.view(x.size(0), -1)
class LinearProjection(nn.Module):
'''Linear projection'''
def __init__(self, n_in, n_out):
super(LinearProjection, self).__init__()
self.fc_embed = nn.Linear(n_in, n_out, bias=True)
self.bn1d = nn.BatchNorm1d(n_out)
self._init_params()
def forward(self, x):
x = self.fc_embed(x)
x = self.bn1d(x)
return x
def _init_params(self):
nn.init.xavier_normal(self.fc_embed.weight)
nn.init.constant(self.fc_embed.bias, 0)
nn.init.constant(self.bn1d.weight, 1)
nn.init.constant(self.bn1d.bias, 0)
class ConvNet(nn.Module):
def __init__(self, backbone, embedding):
super(ConvNet, self).__init__()
self.backbone = backbone
self.embedding = embedding
def forward(self, x):
x = self.backbone(x)
x = self.embedding(x)
return x
class ProxyNet(nn.Module):
"""ProxyNet"""
def __init__(self, n_classes, dim,
proxies=None, L2=False):
super(ProxyNet, self).__init__()
self.n_classes = n_classes
self.dim = dim
self.proxies = nn.Embedding(n_classes, dim,
scale_grad_by_freq=False)
if proxies is None:
self.proxies.weight = nn.Parameter(
torch.randn(self.n_classes, self.dim),
requires_grad=True)
else:
self.proxies.weight = nn.Parameter(proxies, requires_grad=False)
if L2:
self.normalize_proxies()
def normalize_proxies(self):
norm = self.proxies.weight.data.norm(p=2, dim=1)[:, None]
self.proxies.weight.data = self.proxies.weight.data / norm
def forward(self, y_true):
proxies_y_true = self.proxies(Variable(y_true))
return proxies_y_true
class ProxyLoss(nn.Module):
def __init__(self, temperature=1.):
super(ProxyLoss, self).__init__()
self.temperature = temperature
def forward(self, x, y, proxies):
"""Proxy loss
Arguments:
x (Tensor): batch of features
y (LongTensor): corresponding instance
"""
loss = self.softmax_embedding_loss(x, y, proxies)
preds = self.predict(x, proxies)
acc = (y == preds).type(torch.FloatTensor).mean()
return loss.mean(), acc
def softmax_embedding_loss(self, x, y, proxies):
idx = torch.from_numpy(np.arange(len(x), dtype=np.int)).cuda()
diff_iZ = cosine_similarity(x, proxies)
numerator_ip = torch.exp(diff_iZ[idx, y] / self.temperature)
denominator_ip = torch.exp(diff_iZ / self.temperature).sum(1) + 1e-8
return - torch.log(numerator_ip / denominator_ip)
def classify(self, x, proxies):
idx = torch.from_numpy(np.arange(len(x), dtype=np.int)).cuda()
diff_iZ = cosine_similarity(x, proxies)
numerator_ip = torch.exp(diff_iZ[idx, :] / self.temperature)
denominator_ip = torch.exp(diff_iZ / self.temperature).sum(1) + 1e-8
probs = numerator_ip / denominator_ip[:, None]
return probs
def predict(self, x, proxies):
probs = self.classify(x, proxies)
return probs.max(1)[1].data
| 29.761364
| 76
| 0.612447
|
e2f7627a494c774560dd7df0e635044b102909f1
| 1,741
|
py
|
Python
|
mars/worker/prochelper.py
|
pingrunhuang/mars
|
ae920c374e9844d7426d0cc09c0d97059dc5341c
|
[
"Apache-2.0"
] | 1
|
2019-09-22T16:00:48.000Z
|
2019-09-22T16:00:48.000Z
|
mars/worker/prochelper.py
|
turboFei/mars
|
cde691285d921add5460944764c7278e7ddec8ff
|
[
"Apache-2.0"
] | null | null | null |
mars/worker/prochelper.py
|
turboFei/mars
|
cde691285d921add5460944764c7278e7ddec8ff
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from .utils import WorkerActor
logger = logging.getLogger(__name__)
class ProcessHelperActor(WorkerActor):
"""
Actor handling utils on every process
"""
def __init__(self):
super(ProcessHelperActor, self).__init__()
self._dispatch_ref = None
self._daemon_ref = None
def post_create(self):
from .dispatcher import DispatchActor
from .daemon import WorkerDaemonActor
super(ProcessHelperActor, self).post_create()
self._dispatch_ref = self.promise_ref(DispatchActor.default_name())
self._dispatch_ref.register_free_slot(self.uid, 'process_helper')
self._daemon_ref = self.ctx.actor_ref(WorkerDaemonActor.default_name())
if self.ctx.has_actor(self._daemon_ref):
self._daemon_ref.register_process(self.ref(), os.getpid(), _tell=True)
else:
self._daemon_ref = None
def free_mkl_buffers(self):
"""
Free MKL buffer
"""
from ..lib.mkl_interface import mkl_free_buffers
if mkl_free_buffers is None:
return
mkl_free_buffers()
| 32.240741
| 82
| 0.699598
|
34e043a44c07582eb2be3b2e63d9ffe81dde4f20
| 10,658
|
py
|
Python
|
quantstats/utils.py
|
gabrieljenik/quantstats
|
a76c1e3f5cfab91305c91f4deea132413222c3e7
|
[
"Apache-2.0"
] | 2
|
2021-08-01T15:38:34.000Z
|
2021-10-01T13:20:29.000Z
|
quantstats/utils.py
|
gabrieljenik/quantstats
|
a76c1e3f5cfab91305c91f4deea132413222c3e7
|
[
"Apache-2.0"
] | null | null | null |
quantstats/utils.py
|
gabrieljenik/quantstats
|
a76c1e3f5cfab91305c91f4deea132413222c3e7
|
[
"Apache-2.0"
] | 2
|
2021-07-11T12:55:31.000Z
|
2021-08-31T06:57:05.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# QuantStats: Portfolio analytics for quants
# https://github.com/ranaroussi/quantstats
#
# Copyright 2019 Ran Aroussi
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# ˜
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io as _io
import datetime as _dt
import pandas as _pd
import numpy as _np
import yfinance as _yf
from . import stats as _stats
def _mtd(df):
return df[df.index >= _dt.datetime.now(
).strftime('%Y-%m-01')]
def _qtd(df):
date = _dt.datetime.now()
for q in [1, 4, 7, 10]:
if date.month <= q:
return df[df.index >= _dt.datetime(
date.year, q, 1).strftime('%Y-%m-01')]
return df[df.index >= date.strftime('%Y-%m-01')]
def _ytd(df):
return df[df.index >= _dt.datetime.now(
).strftime('%Y-01-01')]
def _pandas_date(df, dates):
if not isinstance(dates, list):
dates = [dates]
return df[df.index.isin(dates)]
def _pandas_current_month(df):
n = _dt.datetime.now()
daterange = _pd.date_range(_dt.date(n.year, n.month, 1), n)
return df[df.index.isin(daterange)]
def multi_shift(df, shift=3):
""" get last N rows relative to another row in pandas """
if isinstance(df, _pd.Series):
df = _pd.DataFrame(df)
dfs = [df.shift(i) for i in _np.arange(shift)]
for ix, dfi in enumerate(dfs[1:]):
dfs[ix + 1].columns = [str(col) for col in dfi.columns + str(ix + 1)]
return _pd.concat(dfs, 1, sort=True)
def to_returns(prices, rf=0.):
""" Calculates the simple arithmetic returns of a price series """
return _prepare_returns(prices, rf)
def to_prices(returns, base=1e5):
""" Converts returns series to price data """
returns = returns.copy().fillna(0).replace(
[_np.inf, -_np.inf], float('NaN'))
return base + base * _stats.compsum(returns)
def log_returns(returns, rf=0., nperiods=None):
""" shorthand for to_log_returns """
return to_log_returns(returns, rf, nperiods)
def to_log_returns(returns, rf=0., nperiods=None):
""" Converts returns series to log returns """
returns = _prepare_returns(returns, rf, nperiods)
try:
return _np.log(returns+1).replace([_np.inf, -_np.inf], float('NaN'))
except Exception:
return 0.
def exponential_stdev(returns, window=30, is_halflife=False):
""" Returns series representing exponential volatility of returns """
returns = _prepare_returns(returns)
halflife = window if is_halflife else None
return returns.ewm(com=None, span=window,
halflife=halflife, min_periods=window).std()
def rebase(prices, base=100.):
"""
Rebase all series to a given intial base.
This makes comparing/plotting different series together easier.
Args:
* prices: Expects a price series/dataframe
* base (number): starting value for all series.
"""
return prices.dropna() / prices.dropna().iloc[0] * base
def group_returns(returns, groupby, compounded=False):
""" summarize returns
group_returns(df, df.index.year)
group_returns(df, [df.index.year, df.index.month])
"""
if compounded:
return returns.groupby(groupby).apply(_stats.comp)
return returns.groupby(groupby).sum()
def aggregate_returns(returns, period=None, compounded=True):
""" Aggregates returns based on date periods """
if period is None or 'day' in period:
return returns
index = returns.index
if 'month' in period:
return group_returns(returns, index.month, compounded=compounded)
if 'quarter' in period:
return group_returns(returns, index.quarter, compounded=compounded)
if period == "A" or any(x in period for x in ['year', 'eoy', 'yoy']):
return group_returns(returns, index.year, compounded=compounded)
if 'week' in period:
return group_returns(returns, index.week, compounded=compounded)
if 'eow' in period or period == "W":
return group_returns(returns, [index.year, index.week],
compounded=compounded)
if 'eom' in period or period == "M":
return group_returns(returns, [index.year, index.month],
compounded=compounded)
if 'eoq' in period or period == "Q":
return group_returns(returns, [index.year, index.quarter],
compounded=compounded)
if not isinstance(period, str):
return group_returns(returns, period, compounded)
return returns
def to_excess_returns(returns, rf, nperiods=None):
"""
Calculates excess returns by subtracting
risk-free returns from total returns
Args:
* returns (Series, DataFrame): Returns
* rf (float, Series, DataFrame): Risk-Free rate(s)
* nperiods (int): Optional. If provided, will convert rf to different
frequency using deannualize
Returns:
* excess_returns (Series, DataFrame): Returns - rf
"""
if isinstance(rf, int):
rf = float(rf)
if not isinstance(rf, float):
rf = rf[rf.index.isin(returns.index)]
if nperiods is not None:
# deannualize
rf = _np.power(1 + rf, 1. / nperiods) - 1.
return returns - rf
def _prepare_prices(data, base=1.):
""" Converts return data into prices + cleanup """
data = data.copy()
if isinstance(data, _pd.DataFrame):
for col in data.columns:
if data[col].dropna().min() <= 0 or data[col].dropna().max() < 1:
data[col] = to_prices(data[col], base)
# is it returns?
# elif data.min() < 0 and data.max() < 1:
elif data.min() < 0 or data.max() < 1:
data = to_prices(data, base)
if isinstance(data, (_pd.DataFrame, _pd.Series)):
data = data.fillna(0).replace(
[_np.inf, -_np.inf], float('NaN'))
return data
def _prepare_returns(data, rf=0., nperiods=None):
""" Converts price data into returns + cleanup """
data = data.copy()
if isinstance(data, _pd.DataFrame):
for col in data.columns:
if data[col].dropna().min() >= 0 or data[col].dropna().max() > 1:
data[col] = data[col].pct_change()
elif data.min() >= 0 and data.max() > 1:
data = data.pct_change()
# cleanup data
data = data.replace([_np.inf, -_np.inf], float('NaN'))
if isinstance(data, (_pd.DataFrame, _pd.Series)):
data = data.fillna(0).replace(
[_np.inf, -_np.inf], float('NaN'))
if rf > 0:
return to_excess_returns(data, rf, nperiods)
return data
def download_returns(ticker, period="max"):
if isinstance(period, _pd.DatetimeIndex):
p = {"start": period[0]}
else:
p = {"period": period}
return _yf.Ticker(ticker).history(**p)['Close'].pct_change()
def _prepare_benchmark(benchmark=None, period="max", rf=0.):
"""
fetch benchmark if ticker is provided, and pass through
_prepare_returns()
period can be options or (expected) _pd.DatetimeIndex range
"""
if benchmark is None:
return None
if isinstance(benchmark, str):
benchmark = download_returns(benchmark)
elif isinstance(benchmark, _pd.DataFrame):
benchmark = benchmark[benchmark.columns[0]].copy()
if isinstance(period, _pd.DatetimeIndex):
benchmark = benchmark[benchmark.index.isin(period)]
return _prepare_returns(benchmark.dropna(), rf=rf)
def _round_to_closest(val, res, decimals=None):
""" round to closest resolution """
if decimals is None and "." in str(res):
decimals = len(str(res).split('.')[1])
return round(round(val / res) * res, decimals)
def _file_stream():
""" Returns a file stream """
return _io.BytesIO()
def _in_notebook(matplotlib_inline=False):
""" Identify enviroment (notebook, terminal, etc) """
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
# Jupyter notebook or qtconsole
if matplotlib_inline:
get_ipython().magic("matplotlib inline")
return True
if shell == 'TerminalInteractiveShell':
# Terminal running IPython
return False
# Other type (?)
return False
except NameError:
# Probably standard Python interpreter
return False
def _count_consecutive(data):
""" Counts consecutive data (like cumsum() with reset on zeroes) """
def _count(data):
return data * (data.groupby(
(data != data.shift(1)).cumsum()).cumcount() + 1)
if isinstance(data, _pd.DataFrame):
for col in data.columns:
data[col] = _count(data[col])
return data
return _count(data)
def _score_str(val):
""" Returns + sign for positive values (used in plots) """
return ("" if "-" in val else "+") + str(val)
def make_portfolio(returns, start_balance=1e5,
mode="comp", round_to=None):
""" Calculates compounded value of portfolio """
returns = _prepare_returns(returns)
if mode.lower() in ["cumsum", "sum"]:
p1 = start_balance + start_balance * returns.cumsum()
elif mode.lower() in ["compsum", "comp"]:
p1 = to_prices(returns, start_balance)
else:
# fixed amount every day
comp_rev = (start_balance + start_balance *
returns.shift(1)).fillna(start_balance) * returns
p1 = start_balance + comp_rev.cumsum()
# add day before with starting balance
p0 = _pd.Series(data=start_balance,
index=p1.index + _pd.Timedelta(days=-1))[:1]
portfolio = _pd.concat([p0, p1])
if isinstance(returns, _pd.DataFrame):
portfolio.loc[:1, :] = start_balance
portfolio.drop(columns=[0], inplace=True)
if round_to:
portfolio = _np.round(portfolio, round_to)
return portfolio
def _flatten_dataframe(df, set_index=None):
""" Dirty method for flattening multi-index dataframe """
s_buf = _io.StringIO()
df.to_csv(s_buf)
s_buf.seek(0)
df = _pd.read_csv(s_buf)
if set_index is not None:
df.set_index(set_index, inplace=True)
return df
| 30.192635
| 77
| 0.633702
|
3894ed3b72f9993a722248d15a2554286b2a5012
| 1,566
|
py
|
Python
|
tests/models/rl/unit/test_a2c.py
|
lavoiems/lightning-bolts
|
208e92ba3dcdbc029afd37e09ec9461fbcf3f293
|
[
"Apache-2.0"
] | 822
|
2020-04-21T03:30:43.000Z
|
2021-03-07T06:41:31.000Z
|
tests/models/rl/unit/test_a2c.py
|
lavoiems/lightning-bolts
|
208e92ba3dcdbc029afd37e09ec9461fbcf3f293
|
[
"Apache-2.0"
] | 538
|
2020-04-18T01:07:58.000Z
|
2021-03-09T13:48:50.000Z
|
tests/models/rl/unit/test_a2c.py
|
lavoiems/lightning-bolts
|
208e92ba3dcdbc029afd37e09ec9461fbcf3f293
|
[
"Apache-2.0"
] | 162
|
2020-04-17T15:44:54.000Z
|
2021-03-09T14:04:02.000Z
|
import argparse
import torch
from torch import Tensor
from pl_bolts.models.rl.advantage_actor_critic_model import AdvantageActorCritic
def test_a2c_loss():
"""Test the reinforce loss function."""
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser = AdvantageActorCritic.add_model_specific_args(parent_parser)
args_list = [
"--env",
"CartPole-v0",
"--batch_size",
"32",
]
hparams = parent_parser.parse_args(args_list)
model = AdvantageActorCritic(**vars(hparams))
batch_states = torch.rand(32, 4)
batch_actions = torch.rand(32).long()
batch_qvals = torch.rand(32)
loss = model.loss(batch_states, batch_actions, batch_qvals)
assert isinstance(loss, Tensor)
def test_a2c_train_batch():
"""Tests that a single batch generates correctly."""
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser = AdvantageActorCritic.add_model_specific_args(parent_parser)
args_list = [
"--env",
"CartPole-v0",
"--batch_size",
"32",
]
hparams = parent_parser.parse_args(args_list)
model = AdvantageActorCritic(**vars(hparams))
model.n_steps = 4
model.hparams.batch_size = 1
xp_dataloader = model.train_dataloader()
batch = next(iter(xp_dataloader))
assert len(batch) == 3
assert len(batch[0]) == model.hparams.batch_size
assert isinstance(batch, list)
assert isinstance(batch[0], Tensor)
assert isinstance(batch[1], Tensor)
assert isinstance(batch[2], Tensor)
| 27.964286
| 80
| 0.692209
|
e05e9a95170a5d9cdc87f3c3adb0bb27ee4c2f65
| 763
|
py
|
Python
|
boost/tools/build/v2/test/core_option_l.py
|
randolphwong/mcsema
|
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
|
[
"BSD-3-Clause"
] | 11
|
2016-04-12T16:29:29.000Z
|
2021-06-28T11:01:57.000Z
|
boost/tools/build/v2/test/core_option_l.py
|
randolphwong/mcsema
|
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
|
[
"BSD-3-Clause"
] | 3
|
2018-10-31T19:35:14.000Z
|
2019-06-04T17:11:27.000Z
|
boost/tools/build/v2/test/core_option_l.py
|
randolphwong/mcsema
|
eb5b376736e7f57ff0a61f7e4e5a436bbb874720
|
[
"BSD-3-Clause"
] | 9
|
2015-09-09T02:38:32.000Z
|
2021-01-30T00:24:24.000Z
|
#!/usr/bin/python
# Copyright 2007 Rene Rivera.
# Copyright 2011 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester(pass_toolset=0)
t.write("sleep.bat","""@setlocal
@echo off
@REM timeout /T %1 /NOBREAK >nul
ping 127.0.0.1 -n 2 -w 1000 >nul
ping 127.0.0.1 -n %1 -w 1000 >nul
@endlocal
@exit /B 0
""")
t.write("file.jam", """
if $(NT)
{
SLEEP = @call sleep.bat ;
}
else
{
SLEEP = sleep ;
}
actions .a. {
echo 001
$(SLEEP) 4
echo 002
}
.a. sleeper ;
DEPENDS all : sleeper ;
""")
t.run_build_system("-ffile.jam -d1 -l2", status=1)
t.expect_output_line("2 second time limit exceeded")
t.cleanup()
| 15.895833
| 82
| 0.672346
|
239b8fd625aae72787ba7e8703d3178d696eee65
| 21,928
|
py
|
Python
|
applications/incompressible_fluid_application/python_scripts/monolithic_solver_lagrangian_compressible_two_fluids_splited.py
|
AndreaVoltan/MyKratos7.0
|
e977752722e8ef1b606f25618c4bf8fd04c434cc
|
[
"BSD-4-Clause"
] | 2
|
2020-04-30T19:13:08.000Z
|
2021-04-14T19:40:47.000Z
|
applications/incompressible_fluid_application/python_scripts/monolithic_solver_lagrangian_compressible_two_fluids_splited.py
|
Jacklwln/Kratos
|
12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de
|
[
"BSD-4-Clause"
] | 1
|
2020-04-30T19:19:09.000Z
|
2020-05-02T14:22:36.000Z
|
applications/incompressible_fluid_application/python_scripts/monolithic_solver_lagrangian_compressible_two_fluids_splited.py
|
Jacklwln/Kratos
|
12ffe332622d7e8ea3e4a10bc061beb9d8e6e8de
|
[
"BSD-4-Clause"
] | 1
|
2020-06-12T08:51:24.000Z
|
2020-06-12T08:51:24.000Z
|
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.IncompressibleFluidApplication import *
from KratosMultiphysics.PFEMApplication import *
from KratosMultiphysics.MeshingApplication import *
from KratosMultiphysics.ExternalSolversApplication import *
CheckForPreviousImport()
def AddVariables(model_part):
model_part.AddNodalSolutionStepVariable(VELOCITY)
model_part.AddNodalSolutionStepVariable(ACCELERATION)
model_part.AddNodalSolutionStepVariable(MESH_VELOCITY)
model_part.AddNodalSolutionStepVariable(PRESSURE)
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE)
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE)
model_part.AddNodalSolutionStepVariable(AIR_PRESSURE_DT)
model_part.AddNodalSolutionStepVariable(WATER_PRESSURE_DT)
model_part.AddNodalSolutionStepVariable(IS_FLUID)
model_part.AddNodalSolutionStepVariable(IS_WATER)
model_part.AddNodalSolutionStepVariable(IS_VISITED)
model_part.AddNodalSolutionStepVariable(IS_POROUS)
model_part.AddNodalSolutionStepVariable(IS_STRUCTURE)
model_part.AddNodalSolutionStepVariable(IS_FREE_SURFACE)
model_part.AddNodalSolutionStepVariable(IS_INTERFACE)
model_part.AddNodalSolutionStepVariable(IS_BOUNDARY)
model_part.AddNodalSolutionStepVariable(ERASE_FLAG)
model_part.AddNodalSolutionStepVariable(DISPLACEMENT)
model_part.AddNodalSolutionStepVariable(VISCOSITY)
model_part.AddNodalSolutionStepVariable(VISCOSITY_AIR)
model_part.AddNodalSolutionStepVariable(VISCOSITY_WATER)
model_part.AddNodalSolutionStepVariable(DENSITY)
model_part.AddNodalSolutionStepVariable(DENSITY_AIR)
model_part.AddNodalSolutionStepVariable(DENSITY_WATER)
model_part.AddNodalSolutionStepVariable(AIR_SOUND_VELOCITY)
model_part.AddNodalSolutionStepVariable(WATER_SOUND_VELOCITY)
model_part.AddNodalSolutionStepVariable(SOUND_VELOCITY)
model_part.AddNodalSolutionStepVariable(BODY_FORCE)
model_part.AddNodalSolutionStepVariable(NODAL_AREA)
model_part.AddNodalSolutionStepVariable(NODAL_H)
model_part.AddNodalSolutionStepVariable(ADVPROJ)
model_part.AddNodalSolutionStepVariable(DIVPROJ)
model_part.AddNodalSolutionStepVariable(THAWONE)
model_part.AddNodalSolutionStepVariable(THAWTWO)
model_part.AddNodalSolutionStepVariable(REACTION)
model_part.AddNodalSolutionStepVariable(REACTION_WATER_PRESSURE)
model_part.AddNodalSolutionStepVariable(EXTERNAL_PRESSURE)
model_part.AddNodalSolutionStepVariable(ARRHENIUS)
model_part.AddNodalSolutionStepVariable(DISTANCE)
model_part.AddNodalSolutionStepVariable(AUX_INDEX)
print("variables for monolithic solver lagrangian compressible solution added correctly")
def AddDofs(model_part):
for node in model_part.Nodes:
# adding dofs
node.AddDof(VELOCITY_X, REACTION_X)
node.AddDof(VELOCITY_Y, REACTION_Y)
node.AddDof(VELOCITY_Z, REACTION_Z)
node.AddDof(WATER_PRESSURE, REACTION_WATER_PRESSURE)
node.AddDof(AIR_PRESSURE, REACTION_AIR_PRESSURE)
print("dofs for the monolithic solver lagrangian compressible added correctly")
class MonolithicSolver:
#
def __init__(self, model_part, domain_size, box_corner1, box_corner2):
self.model_part = model_part
self.alpha = -0.1
self.move_mesh_strategy = 2
self.time_scheme = ResidualBasedPredictorCorrectorVelocityBossakSchemeCompressible(
self.alpha, self.move_mesh_strategy)
# definition of the solvers
# self.linear_solver = SkylineLUFactorizationSolver()
# self.linear_solver =SuperLUSolver()
pPrecond = DiagonalPreconditioner()
# pPrecond = ILU0Preconditioner()
self.linear_solver = BICGSTABSolver(1e-6, 5000, pPrecond)
# definition of the convergence criteria
# self.conv_criteria = UPCriteria(1e-7,1e-9,1e-7,1e-9)
self.conv_criteria = UPCriteria(1e-5, 1e-6, 1e-5, 1e-6)
self.max_iter = 2
self.SetDivided = ElemBasedBCUtilities(model_part)
self.ChooseElement = ChooseElementProcess(model_part, 2)
# default settings
self.echo_level = 1
self.CalculateReactionFlag = False
self.ReformDofSetAtEachStep = True
self.CalculateNormDxFlag = True
self.MoveMeshFlag = True
self.remeshing_flag = True
# MESH CHANGES
self.PfemUtils = PfemUtils()
self.MeshMover = MoveMeshProcess(self.model_part)
self.node_erase_process = NodeEraseProcess(model_part)
# self.Mesher = TriGenPFEMModeler()
# self.Mesher = MSuitePFEMModeler()
self.Mesher = TriGenPFEMSegment()
self.neigh_finder = FindNodalNeighboursProcess(model_part, 9, 18)
self.elem_neighbor_finder = FindElementalNeighboursProcess(
model_part, 2, 10)
self.alpha_shape = 10000.0
self.h_factor = 0.5
# assign IS_FLUID to all nodes
# for node in self.model_part.Nodes:
# node.SetSolutionStepValue(IS_FLUID,0,1.0)
# detecting free_surface to all nodes
for node in self.model_part.Nodes:
if (node.GetSolutionStepValue(IS_BOUNDARY) == 1 and node.GetSolutionStepValue(IS_STRUCTURE) != 1):
node.SetSolutionStepValue(IS_FREE_SURFACE, 0, 1.0)
# U NEED IT FOR ALPHA-shape
(self.neigh_finder).Execute()
self.Hfinder = FindNodalHProcess(model_part)
(self.Hfinder).Execute()
# runtime box
self.box_corner1 = box_corner1
self.box_corner2 = box_corner2
#
def Initialize(self, output_time_increment):
# creating the solution strategy
self.solver = NewtonRaphsonStrategy(
self.model_part,
self.time_scheme,
self.linear_solver,
self.conv_criteria,
self.max_iter,
self.CalculateReactionFlag,
self.ReformDofSetAtEachStep,
self.MoveMeshFlag)
(self.solver).SetEchoLevel(self.echo_level)
# time increment for output
self.output_time_increment = output_time_increment
self.next_output_time = self.output_time_increment
# self.CalculateDistanceAndDiviedSet(2);
# (self.neigh_finder).Execute();
# FIND NEIGHBOUR ELEMENTS AND COLORing
# (self.elem_neighbor_finder).ClearNeighbours()
# (self.elem_neighbor_finder).Execute()
# (self.PfemUtils).ColourAirWaterElement(self.model_part,2)
#
def Solve(self, time, gid_io):
# (self.neigh_finder).Execute();
# (self.solver).Solve()
# print"After solve before clear"
# (self.solver).Clear()
# print"After clear"
# (self.PfemUtils).MarkOuterNodes(self.box_corner1,self.box_corner2,(self.model_part).Nodes );
# (self.PfemUtils).MarkExcessivelyCloseNodes((self.model_part).Nodes, .05)
# (self.node_erase_process).Execute();
# self.Remesh()
# self.OutputStep(time,gid_io)
self.CalculateDistanceAndDiviedSet(2)
# self.AssignH()
# self.ImplosionDistToH()
# (FindElementalNeighboursProcess(self.model_part, 2, 10)).Execute()
(self.solver).Predict()
print("AFTER PREDICT")
self.Remesh()
print("AFTER REMESH")
self.DistToH()
(self.solver).Solve()
print("AFTER SOLVE")
(self.PfemUtils).MoveNodes(self.model_part)
print("AFTER Move")
(self.solver).Clear()
self.OutputStep(time, gid_io)
#
def EstimateDeltaTime(self, min_dt, max_dt):
print("Estimating delta time")
calc_dt = (
self.PfemUtils).EstimateDeltaTime(
min_dt,
max_dt,
self.model_part)
print("calculated dt")
return calc_dt
# def EstimateDeltaTime(self,min_dt,max_dt):
# print "Estimating delta time"
# return (self.UlfUtils).EstimateDeltaTime(max_dt,domain_size)
#
def SetEchoLevel(self, level):
(self.solver).SetEchoLevel(level)
#
# def Remesh(self):
#
# if (self.remeshing_flag==True):
# print "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
# (self.Mesher).ReGenerateMesh("ASGSCompressible2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, True, self.alpha_shape, self.h_factor)
# (self.Mesher).ReGenerateMesh("ASGSCompressible2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, False, self.alpha_shape, self.h_factor)
# print "AAAAAAAAAAFFFFFFFFFFFFFTTTTTTTTTTTTTERRRRRRRRRRRRRR"
# calculating fluid neighbours before applying boundary conditions
# (self.neigh_finder).Execute();
#
def Remesh(self):
if (self.remeshing_flag):
(self.PfemUtils).MoveLonelyNodes(self.model_part)
#(self.MeshMover).Execute();
print(self.box_corner1)
(self.PfemUtils).MarkOuterNodes(
self.box_corner1, self.box_corner2, (self.model_part).Nodes)
(self.PfemUtils).MarkNodesTouchingWall(self.model_part, 2, .05)
(self.PfemUtils).MarkExcessivelyCloseNodes(
(self.model_part).Nodes, 0.5)
(self.PfemUtils).MarkNodesTouchingInterface(self.model_part, 2, .1)
# FIND NEIGHBOUR ELEMENTS AND COLORing
(self.elem_neighbor_finder).ClearNeighbours()
(self.elem_neighbor_finder).Execute()
(self.PfemUtils).ColourAirWaterElement(self.model_part, 2)
#
# (self.PfemUtils).InterfaceDetecting(self.model_part,2, .9)
# (self.PfemUtils).ChangeWallWaterFlag(self.model_part,2)
# (self.PfemUtils).ChangeInterfaceWaterFlag(self.model_part,2)
# for node in (self.model_part).Nodes:
# if(node.GetSolutionStepValue(IS_INTERFACE) == 1.0):
# print node.GetValue(ERASE_FLAG)
#(self.node_erase_process).Execute(); to be able to compute neighbors earase process is done inside the mesher
(self.neigh_finder).ClearNeighbours()
(self.neigh_finder).Execute()
# ((self.model_part).Elements).clear();
# ((self.model_part).Conditions).clear();
(self.Mesher).ReGenerateMesh("ASGSCompressible2D", "Monolithic2DNeumann",
self.model_part, self.node_erase_process, True, True, self.alpha_shape, self.h_factor)
# (self.Mesher).ReGenerateMesh("ASGSCOMPPRDC2D", "Monolithic2DNeumann",self.model_part,self.node_erase_process,True, False, self.alpha_shape, self.h_factor)
(self.elem_neighbor_finder).ClearNeighbours()
(self.elem_neighbor_finder).Execute()
# (self.neigh_finder).Execute();
(self.PfemUtils).ColourAirWaterElement(self.model_part, 2)
(self.PfemUtils).InterfaceDetecting(self.model_part, 2, .9)
(self.ChooseElement).Execute()
# calculating fluid neighbours before applying boundary conditions
(self.neigh_finder).ClearNeighbours()
(self.neigh_finder).Execute()
(self.PfemUtils).ApplyBoundaryConditions(self.model_part, 2)
(self.PfemUtils).IdentifyFluidNodes(self.model_part)
# (self.PfemUtils).ApplyMinimalPressureConditions(self.model_part);
# (self.PfemUtils).InterfaceDetecting(self.model_part,2, .9)
# (self.PfemUtils).ChangeWallWaterFlag(self.model_part,2)
# (self.PfemUtils).ChangeInterfaceWaterFlag(self.model_part,2)
# (self.PfemUtils).ColourAirWaterElement(self.model_part,2)
# for node in self.model_part.Nodes:
# node.SetSolutionStepValue(IS_FREE_SURFACE,0,0.0)
#
# for node in self.model_part.Nodes:
# if (node.GetSolutionStepValue(IS_BOUNDARY)==1 and node.GetSolutionStepValue(IS_STRUCTURE)!=1):
# node.SetSolutionStepValue(IS_FREE_SURFACE,0,1.0)
#
def FindNeighbours(self):
(self.neigh_finder).Execute()
#
def OutputStep(self, time, gid_io):
if(time >= self.next_output_time):
self.next_output_time = self.next_output_time + \
self.output_time_increment
# writing mesh
gid_io.InitializeMesh(time)
gid_io.WriteNodeMesh((self.model_part).GetMesh())
gid_io.WriteMesh((self.model_part).GetMesh())
gid_io.FinalizeMesh()
gid_io.InitializeResults(time, (self.model_part).GetMesh())
gid_io.WriteNodalResults(
PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
EXTERNAL_PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_FREE_SURFACE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_BOUNDARY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_STRUCTURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_INTERFACE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
MESH_VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DENSITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
AIR_PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
WATER_PRESSURE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DENSITY_AIR,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DENSITY_WATER,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
AIR_SOUND_VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
WATER_SOUND_VELOCITY,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_FLUID,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_WATER,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
NODAL_H,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DISTANCE,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
DISPLACEMENT,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
IS_VISITED,
(self.model_part).Nodes,
time,
0)
gid_io.WriteNodalResults(
AUX_INDEX,
(self.model_part).Nodes,
time,
0)
gid_io.PrintOnGaussPoints(IS_WATER_ELEMENT, self.model_part, time)
gid_io.Flush()
gid_io.FinalizeResults()
#
def CalculateDistanceAndDiviedSet(self, domain_size):
(self.neigh_finder).Execute()
distance_tools = ElemBasedDistanceUtilities(self.model_part)
distance_calculator = BodyDistanceCalculationUtils()
# assign IS_VISITED1 to elem with DISTANCE>=0 and change DSITANCE to posetive for external ones
# Assign Zero distance to interface nodes
for node in (self.model_part).Nodes:
if(node.GetSolutionStepValue(IS_INTERFACE) == 1.0):
node.SetSolutionStepValue(DISTANCE, 0, 0.0)
distance_tools.MarkExternalAndMixedNodes()
distance_tools.ChangeSignToDistance()
# calculate distances towards the interior of the domain
if(domain_size == 2):
distance_calculator.CalculateDistances2D(
(self.model_part).Elements,
DISTANCE,
True)
else:
distance_calculator.CalculateDistances3D(
(self.model_part).Elements,
DISTANCE,
True)
# change sign
distance_tools.ChangeSignToDistance()
# mark as visited all of the nodes inside the fluid domain
distance_tools.MarkInternalAndMixedNodes()
print(((self.model_part).Elements).Size())
# calculate distances towards the outside
if(domain_size == 2):
distance_calculator.CalculateDistances2D(
(self.model_part).Elements,
DISTANCE,
True)
else:
distance_calculator.CalculateDistances3D(
(self.model_part).Elements,
DISTANCE,
True)
# Decide IS_WATER flag due to DISTANCE
# for node in (self.model_part).Nodes:
# if(node.GetSolutionStepValue(DISTANCE)<= 0.0):
# node.SetSolutionStepValue(IS_WATER,0,0.0)
# else:
# node.SetSolutionStepValue(IS_WATER,0,1.0)
# if(node.GetSolutionStepValue(DISTANCE)== 0.0):
# print"This node has distance zero, is_interface is assigned"
# node.SetSolutionStepValue(IS_INTERFACE,0,1.0)
# node.SetSolutionStepValue(IS_VISITED,0,1.0)
# save as distance of the old time step
distance_tools.SaveScalarVariableToOldStep(DISTANCE)
print("finished RecalculateDistanceFunction")
# (self.SetDivided).SetDividedElem_2D()
print(">>>>>ELEMENTS ARE DIVIDED<<<<<<<<<<<<")
#
def DistToH(self):
possible_h = self.CalculateRadius()
print(possible_h)
min_H = possible_h * 3.14 / 200
# min_H = .0007#0.001
sec_min_H = 10 * min_H # .004
max_H = .02
ref_dist = 4 * min_H
sec_ref_dist = 20 * min_H
third_ref_dist = 200 * min_H
slope = (sec_min_H - min_H) / (sec_ref_dist - ref_dist)
second_slope = (max_H - sec_min_H) / (third_ref_dist - sec_ref_dist)
# search for min an max of H
# for node in (self.model_part).Nodes:
# node_H = node.GetSolutionStepValue(NODAL_H,0)
# if(node_H<self.min_H):
# self.min_H = node_H
# else:
# if(node_H > self.max_H):
# self.max_H = node_H
# H = H + dist * dist
# print ">>>>>DISt TO H ASSIGNMENT<<<<<<<<<<<<"
for node in (self.model_part).Nodes:
current_dist = node.GetSolutionStepValue(DISTANCE, 0)
if(abs(current_dist) <= ref_dist):
node_H = min_H # + slope*abs(current_dist)
node.SetSolutionStepValue(NODAL_H, 0, node_H)
if(ref_dist < abs(current_dist) and abs(current_dist) <= sec_ref_dist):
node_H = min_H + slope * (abs(current_dist) - ref_dist)
node.SetSolutionStepValue(NODAL_H, 0, node_H)
if(sec_ref_dist < abs(current_dist) and abs(current_dist) <= third_ref_dist):
node_H = sec_min_H + second_slope * \
(abs(current_dist) - sec_ref_dist)
node.SetSolutionStepValue(NODAL_H, 0, node_H)
if(abs(current_dist) > third_ref_dist):
node_H = max_H
node.SetSolutionStepValue(NODAL_H, 0, node_H)
# assign new value
# node.SetSolutionStepValue(NODAL_H,0,node_H)
# NearboundaryH
(self.PfemUtils).AssignNearBoundaryH(self.model_part, 5.0)
#
def CalculateRadius(self):
max_radi = 0.0
for node in (self.model_part).Nodes:
if node.GetSolutionStepValue(IS_INTERFACE) == 1.0:
X_ref = node.X
Y_ref = node.Y
for node in (self.model_part).Nodes:
if node.GetSolutionStepValue(IS_INTERFACE) == 1.0:
radi = pow(node.X - X_ref, 2) + pow(node.Y - Y_ref, 2)
if(radi > max_radi):
max_radi = radi
max_radi = pow(max_radi, 0.5)
return max_radi
#
def AssignH(self):
for node in (self.model_part).Nodes:
if(node.GetSolutionStepValue(IS_INTERFACE) == 1.0):
node.SetSolutionStepValue(NODAL_H, 0, .03)
else:
node.SetSolutionStepValue(NODAL_H, 0, .1)
print(">>>>>HHHHHH ASSIGNMENT<<<<<<<<<<<<")
#
#
def ImplosionDistToH(self):
min_H = .0005
max_H = .05
ref_dist = .0025
tol = .001
slope = (max_H - min_H) / ref_dist
# search for min an max of H
# for node in (self.model_part).Nodes:
# node_H = node.GetSolutionStepValue(NODAL_H,0)
# if(node_H<self.min_H):
# self.min_H = node_H
# else:
# if(node_H > self.max_H):
# self.max_H = node_H
# H = H + dist * dist
print(">>>>>DISt TO H ASSIGNMENT<<<<<<<<<<<<")
for node in (self.model_part).Nodes:
current_dist = node.GetSolutionStepValue(DISTANCE, 0)
if(current_dist > tol):
if(abs(current_dist) <= ref_dist):
node_H = min_H + slope * abs(current_dist)
else:
node_H = max_H
if(current_dist < -tol):
node_H = min_H
# assign new value
node.SetSolutionStepValue(NODAL_H, 0, node_H)
print(">>>>>DISt TO H ASSIGNMENT<<<<<<<<<<<<")
#
| 36.304636
| 160
| 0.631202
|
4e4eafe9018b3a5ed5dc51b08d419da24cc73636
| 765
|
py
|
Python
|
odps/mars_extension/dataframe/__init__.py
|
hekaisheng/aliyun-odps-python-sdk
|
a08f5a9f006487dd3443ebe000f363e9cbee6a80
|
[
"Apache-2.0"
] | null | null | null |
odps/mars_extension/dataframe/__init__.py
|
hekaisheng/aliyun-odps-python-sdk
|
a08f5a9f006487dd3443ebe000f363e9cbee6a80
|
[
"Apache-2.0"
] | null | null | null |
odps/mars_extension/dataframe/__init__.py
|
hekaisheng/aliyun-odps-python-sdk
|
a08f5a9f006487dd3443ebe000f363e9cbee6a80
|
[
"Apache-2.0"
] | 1
|
2017-06-27T08:18:29.000Z
|
2017-06-27T08:18:29.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .datasource import read_odps_table, DataFrameReadTable
from .datastore import write_odps_table, DataFrameWriteTable
| 38.25
| 74
| 0.76732
|
f4ad0af4262b460e5b1d7b516e11a19812a03318
| 754
|
py
|
Python
|
Tutorial_Kivy_Codemy/codemy_KivyMd_32_ButtonBar.py
|
LivioAlvarenga/Tutoriais_Kivy_KivyMD
|
b6225578e764eaf0312afafbb2f76dc06f92342d
|
[
"MIT"
] | null | null | null |
Tutorial_Kivy_Codemy/codemy_KivyMd_32_ButtonBar.py
|
LivioAlvarenga/Tutoriais_Kivy_KivyMD
|
b6225578e764eaf0312afafbb2f76dc06f92342d
|
[
"MIT"
] | null | null | null |
Tutorial_Kivy_Codemy/codemy_KivyMd_32_ButtonBar.py
|
LivioAlvarenga/Tutoriais_Kivy_KivyMD
|
b6225578e764eaf0312afafbb2f76dc06f92342d
|
[
"MIT"
] | null | null | null |
# https://www.youtube.com/watch?v=G-Rp41BzGxg&list=PLCC34OHNcOtpz7PJQ7Tv7hqFBP_xDDjqg&index=44
from kivymd.app import MDApp
from kivy.lang import Builder
class Codemy_Tutorial_App(MDApp):
def build(self):
self.theme_cls.theme_style = 'Dark'
self.theme_cls.primary_palette = 'BlueGray'
return Builder.load_file('codemy_KivyMd_32_ButtonBar.kv')
def presser(self):
self.root.ids.my_label.text = 'Botão toolbar pressionado!'
self.root.ids.top_toolbar.title = 'Botão toolbar pressionado!'
def presser1(self):
self.root.ids.my_label.text = 'Botão menu pressionado!'
self.root.ids.top_toolbar.title = 'Botão menu pressionado!'
if __name__ == '__main__':
Codemy_Tutorial_App().run()
| 31.416667
| 94
| 0.713528
|
35b09c9674eb99f74242c8fd482bf9436add9c2c
| 91
|
py
|
Python
|
cogdl/__init__.py
|
li-ziang/cogdl
|
60022d3334e3abae2d2a505e6e049a26acf10f39
|
[
"MIT"
] | 1
|
2020-06-17T08:47:41.000Z
|
2020-06-17T08:47:41.000Z
|
cogdl/__init__.py
|
li-ziang/cogdl
|
60022d3334e3abae2d2a505e6e049a26acf10f39
|
[
"MIT"
] | null | null | null |
cogdl/__init__.py
|
li-ziang/cogdl
|
60022d3334e3abae2d2a505e6e049a26acf10f39
|
[
"MIT"
] | 1
|
2020-05-19T11:45:45.000Z
|
2020-05-19T11:45:45.000Z
|
__version__ = "0.5.2"
from .experiments import experiment
from .pipelines import pipeline
| 18.2
| 35
| 0.791209
|
6523a7871aca73b34087adb1e132e0b9292ddd67
| 18,318
|
py
|
Python
|
utils/reconstruct.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 67
|
2021-12-02T05:53:44.000Z
|
2022-03-31T07:21:26.000Z
|
utils/reconstruct.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 13
|
2021-12-05T14:23:46.000Z
|
2022-03-25T21:07:20.000Z
|
utils/reconstruct.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 16
|
2022-01-11T11:48:24.000Z
|
2022-03-27T19:20:58.000Z
|
"""
https://github.com/mattragoza/liGAN/blob/master/fitting.py
License: GNU General Public License v2.0
https://github.com/mattragoza/liGAN/blob/master/LICENSE
"""
import numpy as np
from rdkit.Chem import AllChem as Chem
from rdkit import Geometry
from openbabel import openbabel as ob
from openbabel import pybel
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from .protein_ligand import ATOM_FAMILIES_ID
class MolReconsError(Exception):
pass
def reachable_r(a,b, seenbonds):
'''Recursive helper.'''
for nbr in ob.OBAtomAtomIter(a):
bond = a.GetBond(nbr).GetIdx()
if bond not in seenbonds:
seenbonds.add(bond)
if nbr == b:
return True
elif reachable_r(nbr,b,seenbonds):
return True
return False
def reachable(a,b):
'''Return true if atom b is reachable from a without using the bond between them.'''
if a.GetExplicitDegree() == 1 or b.GetExplicitDegree() == 1:
return False #this is the _only_ bond for one atom
#otherwise do recursive traversal
seenbonds = set([a.GetBond(b).GetIdx()])
return reachable_r(a,b,seenbonds)
def forms_small_angle(a,b,cutoff=45):
'''Return true if bond between a and b is part of a small angle
with a neighbor of a only.'''
for nbr in ob.OBAtomAtomIter(a):
if nbr != b:
degrees = b.GetAngle(a,nbr)
if degrees < cutoff:
return True
return False
def make_obmol(xyz, atomic_numbers):
mol = ob.OBMol()
mol.BeginModify()
atoms = []
for xyz,t in zip(xyz, atomic_numbers):
x,y,z = xyz
# ch = struct.channels[t]
atom = mol.NewAtom()
atom.SetAtomicNum(t)
atom.SetVector(x,y,z)
atoms.append(atom)
return mol, atoms
def connect_the_dots(mol, atoms, indicators, maxbond=4):
'''Custom implementation of ConnectTheDots. This is similar to
OpenBabel's version, but is more willing to make long bonds
(up to maxbond long) to keep the molecule connected. It also
attempts to respect atom type information from struct.
atoms and struct need to correspond in their order
Assumes no hydrogens or existing bonds.
'''
pt = Chem.GetPeriodicTable()
if len(atoms) == 0:
return
mol.BeginModify()
#just going to to do n^2 comparisons, can worry about efficiency later
coords = np.array([(a.GetX(),a.GetY(),a.GetZ()) for a in atoms])
dists = squareform(pdist(coords))
# types = [struct.channels[t].name for t in struct.c]
for (i,a) in enumerate(atoms):
for (j,b) in enumerate(atoms):
if a == b:
break
if dists[i,j] < 0.01: #reduce from 0.4
continue #don't bond too close atoms
if dists[i,j] < maxbond:
flag = 0
if indicators[i][ATOM_FAMILIES_ID['Aromatic']] and indicators[j][ATOM_FAMILIES_ID['Aromatic']]:
# print('Aromatic', ATOM_FAMILIES_ID['Aromatic'], indicators[i])
flag = ob.OB_AROMATIC_BOND
# if 'Aromatic' in types[i] and 'Aromatic' in types[j]:
# flag = ob.OB_AROMATIC_BOND
mol.AddBond(a.GetIdx(),b.GetIdx(),1,flag)
atom_maxb = {}
for (i,a) in enumerate(atoms):
#set max valance to the smallest max allowed by openbabel or rdkit
#since we want the molecule to be valid for both (rdkit is usually lower)
maxb = ob.GetMaxBonds(a.GetAtomicNum())
maxb = min(maxb,pt.GetDefaultValence(a.GetAtomicNum()))
if a.GetAtomicNum() == 16: # sulfone check
if count_nbrs_of_elem(a, 8) >= 2:
maxb = 6
# if indicators[i][ATOM_FAMILIES_ID['Donor']]:
# maxb -= 1 #leave room for hydrogen
# if 'Donor' in types[i]:
# maxb -= 1 #leave room for hydrogen
atom_maxb[a.GetIdx()] = maxb
#remove any impossible bonds between halogens
for bond in ob.OBMolBondIter(mol):
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
if atom_maxb[a1.GetIdx()] == 1 and atom_maxb[a2.GetIdx()] == 1:
mol.DeleteBond(bond)
def get_bond_info(biter):
'''Return bonds sorted by their distortion'''
bonds = [b for b in biter]
binfo = []
for bond in bonds:
bdist = bond.GetLength()
#compute how far away from optimal we are
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
ideal = ob.GetCovalentRad(a1.GetAtomicNum()) + ob.GetCovalentRad(a2.GetAtomicNum())
stretch = bdist-ideal
binfo.append((stretch,bdist,bond))
binfo.sort(reverse=True, key=lambda t: t[:2]) #most stretched bonds first
return binfo
#prioritize removing hypervalency causing bonds, do more valent
#constrained atoms first since their bonds introduce the most problems
#with reachability (e.g. oxygen)
# hypers = sorted([(atom_maxb[a.GetIdx()],a.GetExplicitValence() - atom_maxb[a.GetIdx()], a) for a in atoms],key=lambda aa: (aa[0],-aa[1]))
# for mb,diff,a in hypers:
# if a.GetExplicitValence() <= atom_maxb[a.GetIdx()]:
# continue
# binfo = get_bond_info(ob.OBAtomBondIter(a))
# for stretch,bdist,bond in binfo:
# #can we remove this bond without disconnecting the molecule?
# a1 = bond.GetBeginAtom()
# a2 = bond.GetEndAtom()
# #get right valence
# if a1.GetExplicitValence() > atom_maxb[a1.GetIdx()] or \
# a2.GetExplicitValence() > atom_maxb[a2.GetIdx()]:
# #don't fragment the molecule
# if not reachable(a1,a2):
# continue
# mol.DeleteBond(bond)
# if a.GetExplicitValence() <= atom_maxb[a.GetIdx()]:
# break #let nbr atoms choose what bonds to throw out
binfo = get_bond_info(ob.OBMolBondIter(mol))
#now eliminate geometrically poor bonds
for stretch,bdist,bond in binfo:
#can we remove this bond without disconnecting the molecule?
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
#as long as we aren't disconnecting, let's remove things
#that are excessively far away (0.45 from ConnectTheDots)
#get bonds to be less than max allowed
#also remove tight angles, because that is what ConnectTheDots does
if stretch > 0.45 or forms_small_angle(a1,a2) or forms_small_angle(a2,a1):
#don't fragment the molecule
if not reachable(a1,a2):
continue
mol.DeleteBond(bond)
mol.EndModify()
def convert_ob_mol_to_rd_mol(ob_mol,struct=None):
'''Convert OBMol to RDKit mol, fixing up issues'''
ob_mol.DeleteHydrogens()
n_atoms = ob_mol.NumAtoms()
rd_mol = Chem.RWMol()
rd_conf = Chem.Conformer(n_atoms)
for ob_atom in ob.OBMolAtomIter(ob_mol):
rd_atom = Chem.Atom(ob_atom.GetAtomicNum())
#TODO copy format charge
if ob_atom.IsAromatic() and ob_atom.IsInRing() and ob_atom.MemberOfRingSize() <= 6:
#don't commit to being aromatic unless rdkit will be okay with the ring status
#(this can happen if the atoms aren't fit well enough)
rd_atom.SetIsAromatic(True)
i = rd_mol.AddAtom(rd_atom)
ob_coords = ob_atom.GetVector()
x = ob_coords.GetX()
y = ob_coords.GetY()
z = ob_coords.GetZ()
rd_coords = Geometry.Point3D(x, y, z)
rd_conf.SetAtomPosition(i, rd_coords)
rd_mol.AddConformer(rd_conf)
for ob_bond in ob.OBMolBondIter(ob_mol):
i = ob_bond.GetBeginAtomIdx()-1
j = ob_bond.GetEndAtomIdx()-1
bond_order = ob_bond.GetBondOrder()
if bond_order == 1:
rd_mol.AddBond(i, j, Chem.BondType.SINGLE)
elif bond_order == 2:
rd_mol.AddBond(i, j, Chem.BondType.DOUBLE)
elif bond_order == 3:
rd_mol.AddBond(i, j, Chem.BondType.TRIPLE)
else:
raise Exception('unknown bond order {}'.format(bond_order))
if ob_bond.IsAromatic():
bond = rd_mol.GetBondBetweenAtoms (i,j)
bond.SetIsAromatic(True)
rd_mol = Chem.RemoveHs(rd_mol, sanitize=False)
pt = Chem.GetPeriodicTable()
#if double/triple bonds are connected to hypervalent atoms, decrement the order
positions = rd_mol.GetConformer().GetPositions()
nonsingles = []
for bond in rd_mol.GetBonds():
if bond.GetBondType() == Chem.BondType.DOUBLE or bond.GetBondType() == Chem.BondType.TRIPLE:
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
dist = np.linalg.norm(positions[i]-positions[j])
nonsingles.append((dist,bond))
nonsingles.sort(reverse=True, key=lambda t: t[0])
for (d,bond) in nonsingles:
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
if calc_valence(a1) > pt.GetDefaultValence(a1.GetAtomicNum()) or \
calc_valence(a2) > pt.GetDefaultValence(a2.GetAtomicNum()):
btype = Chem.BondType.SINGLE
if bond.GetBondType() == Chem.BondType.TRIPLE:
btype = Chem.BondType.DOUBLE
bond.SetBondType(btype)
for atom in rd_mol.GetAtoms():
#set nitrogens with 4 neighbors to have a charge
if atom.GetAtomicNum() == 7 and atom.GetDegree() == 4:
atom.SetFormalCharge(1)
rd_mol = Chem.AddHs(rd_mol,addCoords=True)
positions = rd_mol.GetConformer().GetPositions()
center = np.mean(positions[np.all(np.isfinite(positions),axis=1)],axis=0)
for atom in rd_mol.GetAtoms():
i = atom.GetIdx()
pos = positions[i]
if not np.all(np.isfinite(pos)):
#hydrogens on C fragment get set to nan (shouldn't, but they do)
rd_mol.GetConformer().SetAtomPosition(i,center)
try:
Chem.SanitizeMol(rd_mol,Chem.SANITIZE_ALL^Chem.SANITIZE_KEKULIZE)
except:
raise MolReconsError()
# try:
# Chem.SanitizeMol(rd_mol,Chem.SANITIZE_ALL^Chem.SANITIZE_KEKULIZE)
# except: # mtr22 - don't assume mols will pass this
# pass
# # dkoes - but we want to make failures as rare as possible and should debug them
# m = pybel.Molecule(ob_mol)
# i = np.random.randint(1000000)
# outname = 'bad%d.sdf'%i
# print("WRITING",outname)
# m.write('sdf',outname,overwrite=True)
# pickle.dump(struct,open('bad%d.pkl'%i,'wb'))
#but at some point stop trying to enforce our aromaticity -
#openbabel and rdkit have different aromaticity models so they
#won't always agree. Remove any aromatic bonds to non-aromatic atoms
for bond in rd_mol.GetBonds():
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
if bond.GetIsAromatic():
if not a1.GetIsAromatic() or not a2.GetIsAromatic():
bond.SetIsAromatic(False)
elif a1.GetIsAromatic() and a2.GetIsAromatic():
bond.SetIsAromatic(True)
return rd_mol
def calc_valence(rdatom):
'''Can call GetExplicitValence before sanitize, but need to
know this to fix up the molecule to prevent sanitization failures'''
cnt = 0.0
for bond in rdatom.GetBonds():
cnt += bond.GetBondTypeAsDouble()
return cnt
def count_nbrs_of_elem(atom, atomic_num):
'''
Count the number of neighbors atoms
of atom with the given atomic_num.
'''
count = 0
for nbr in ob.OBAtomAtomIter(atom):
if nbr.GetAtomicNum() == atomic_num:
count += 1
return count
def fixup(atoms, mol, indicators):
'''Set atom properties to match channel. Keep doing this
to beat openbabel over the head with what we want to happen.'''
mol.SetAromaticPerceived(True) #avoid perception
for i, atom in enumerate(atoms):
# ch = struct.channels[t]
ind = indicators[i]
if ind[ATOM_FAMILIES_ID['Aromatic']]:
atom.SetAromatic(True)
atom.SetHyb(2)
# if ind[ATOM_FAMILIES_ID['Donor']]:
# if atom.GetExplicitDegree() == atom.GetHvyDegree():
# if atom.GetHvyDegree() == 1 and atom.GetAtomicNum() == 7:
# atom.SetImplicitHCount(2)
# else:
# atom.SetImplicitHCount(1)
# elif ind[ATOM_FAMILIES_ID['Acceptor']]: # NOT AcceptorDonor because of else
# atom.SetImplicitHCount(0)
if (atom.GetAtomicNum() in (7, 8)) and atom.IsInRing(): # Nitrogen, Oxygen
#this is a little iffy, ommitting until there is more evidence it is a net positive
#we don't have aromatic types for nitrogen, but if it
#is in a ring with aromatic carbon mark it aromatic as well
acnt = 0
for nbr in ob.OBAtomAtomIter(atom):
if nbr.IsAromatic():
acnt += 1
if acnt > 1:
atom.SetAromatic(True)
def raw_obmol_from_generated(data):
xyz = data.ligand_context_pos.clone().cpu().tolist()
atomic_nums = data.ligand_context_element.clone().cpu().tolist()
# indicators = data.ligand_context_feature_full[:, -len(ATOM_FAMILIES_ID):].clone().cpu().bool().tolist()
mol, atoms = make_obmol(xyz, atomic_nums)
return mol, atoms
UPGRADE_BOND_ORDER = {Chem.BondType.SINGLE:Chem.BondType.DOUBLE, Chem.BondType.DOUBLE:Chem.BondType.TRIPLE}
def postprocess_rd_mol_1(rdmol):
rdmol = Chem.RemoveHs(rdmol)
# Construct bond nbh list
nbh_list = {}
for bond in rdmol.GetBonds():
begin, end = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()
if begin not in nbh_list: nbh_list[begin] = [end]
else: nbh_list[begin].append(end)
if end not in nbh_list: nbh_list[end] = [begin]
else: nbh_list[end].append(begin)
# Fix missing bond-order
for atom in rdmol.GetAtoms():
idx = atom.GetIdx()
num_radical = atom.GetNumRadicalElectrons()
if num_radical > 0:
for j in nbh_list[idx]:
if j <= idx: continue
nb_atom = rdmol.GetAtomWithIdx(j)
nb_radical = nb_atom.GetNumRadicalElectrons()
if nb_radical > 0:
bond = rdmol.GetBondBetweenAtoms(idx, j)
bond.SetBondType(UPGRADE_BOND_ORDER[bond.GetBondType()])
nb_atom.SetNumRadicalElectrons(nb_radical - 1)
num_radical -= 1
atom.SetNumRadicalElectrons(num_radical)
num_radical = atom.GetNumRadicalElectrons()
if num_radical > 0:
atom.SetNumRadicalElectrons(0)
num_hs = atom.GetNumExplicitHs()
atom.SetNumExplicitHs(num_hs + num_radical)
return rdmol
def postprocess_rd_mol_2(rdmol):
rdmol_edit = Chem.RWMol(rdmol)
ring_info = rdmol.GetRingInfo()
ring_info.AtomRings()
rings = [set(r) for r in ring_info.AtomRings()]
for i, ring_a in enumerate(rings):
if len(ring_a) == 3:
non_carbon = []
atom_by_symb = {}
for atom_idx in ring_a:
symb = rdmol.GetAtomWithIdx(atom_idx).GetSymbol()
if symb != 'C':
non_carbon.append(atom_idx)
if symb not in atom_by_symb:
atom_by_symb[symb] = [atom_idx]
else:
atom_by_symb[symb].append(atom_idx)
if len(non_carbon) == 2:
rdmol_edit.RemoveBond(*non_carbon)
if 'O' in atom_by_symb and len(atom_by_symb['O']) == 2:
rdmol_edit.RemoveBond(*atom_by_symb['O'])
rdmol_edit.GetAtomWithIdx(atom_by_symb['O'][0]).SetNumExplicitHs(
rdmol_edit.GetAtomWithIdx(atom_by_symb['O'][0]).GetNumExplicitHs() + 1
)
rdmol_edit.GetAtomWithIdx(atom_by_symb['O'][1]).SetNumExplicitHs(
rdmol_edit.GetAtomWithIdx(atom_by_symb['O'][1]).GetNumExplicitHs() + 1
)
rdmol = rdmol_edit.GetMol()
for atom in rdmol.GetAtoms():
if atom.GetFormalCharge() > 0:
atom.SetFormalCharge(0)
return rdmol
def reconstruct_from_generated(data):
xyz = data.ligand_context_pos.clone().cpu().tolist()
atomic_nums = data.ligand_context_element.clone().cpu().tolist()
indicators = data.ligand_context_feature_full[:, -len(ATOM_FAMILIES_ID):].clone().cpu().bool().tolist()
mol, atoms = make_obmol(xyz, atomic_nums)
fixup(atoms, mol, indicators)
connect_the_dots(mol, atoms, indicators, 2)
fixup(atoms, mol, indicators)
mol.EndModify()
fixup(atoms, mol, indicators)
mol.AddPolarHydrogens()
mol.PerceiveBondOrders()
fixup(atoms, mol, indicators)
for (i,a) in enumerate(atoms):
ob.OBAtomAssignTypicalImplicitHydrogens(a)
fixup(atoms, mol, indicators)
mol.AddHydrogens()
fixup(atoms, mol, indicators)
#make rings all aromatic if majority of carbons are aromatic
for ring in ob.OBMolRingIter(mol):
if 5 <= ring.Size() <= 6:
carbon_cnt = 0
aromatic_ccnt = 0
for ai in ring._path:
a = mol.GetAtom(ai)
if a.GetAtomicNum() == 6:
carbon_cnt += 1
if a.IsAromatic():
aromatic_ccnt += 1
if aromatic_ccnt >= carbon_cnt/2 and aromatic_ccnt != ring.Size():
#set all ring atoms to be aromatic
for ai in ring._path:
a = mol.GetAtom(ai)
a.SetAromatic(True)
#bonds must be marked aromatic for smiles to match
for bond in ob.OBMolBondIter(mol):
a1 = bond.GetBeginAtom()
a2 = bond.GetEndAtom()
if a1.IsAromatic() and a2.IsAromatic():
bond.SetAromatic(True)
mol.PerceiveBondOrders()
rd_mol = convert_ob_mol_to_rd_mol(mol)
# Post-processing
rd_mol = postprocess_rd_mol_1(rd_mol)
rd_mol = postprocess_rd_mol_2(rd_mol)
return rd_mol
| 36.273267
| 143
| 0.609455
|
57206868362f89b168d73742319f64e0c10709a8
| 2,516
|
py
|
Python
|
tests/test_binary.py
|
tchaye59/torchutils
|
ca7b01bf63b6c3adaa36a4a66dfd87e927ef2460
|
[
"MIT"
] | null | null | null |
tests/test_binary.py
|
tchaye59/torchutils
|
ca7b01bf63b6c3adaa36a4a66dfd87e927ef2460
|
[
"MIT"
] | null | null | null |
tests/test_binary.py
|
tchaye59/torchutils
|
ca7b01bf63b6c3adaa36a4a66dfd87e927ef2460
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchmetrics as tm
from torch.utils.data import random_split
from torch.utils.data.dataloader import DataLoader
from torchvision import transforms as T
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor
from torchutils.losses import binary_cross_entropy_weighted_focal_loss
from torchutils.metrics import accuracy, binary_accuracy
from torchutils.models import BaseModel
dataset = MNIST(root='data', download=True, transform=ToTensor(),
target_transform=T.Lambda(lambda y: torch.tensor([int(y == 8), ])), )
val_size = 10000
train_size = len(dataset) - val_size
train_ds, val_ds = random_split(dataset, [train_size, val_size])
len(train_ds), len(val_ds)
batch_size = 128
train_loader = DataLoader(train_ds, batch_size, shuffle=True, num_workers=0, pin_memory=True)
val_loader = DataLoader(val_ds, batch_size * 2, num_workers=0, pin_memory=True)
for x, y in train_loader:
break
class MnistModel(BaseModel):
"""Feedfoward neural network with 1 hidden layer"""
def __init__(self, in_size, hidden_size, out_size):
super().__init__()
# hidden layer
self.linear1 = nn.Linear(in_size, hidden_size)
# output layer
self.linear2 = nn.Linear(hidden_size, out_size)
def forward(self, xb):
# Flatten the image tensors
xb = xb.view(xb.size(0), -1)
# Get intermediate outputs using hidden layer
out = self.linear1(xb)
# Apply activation function
out = F.relu(out)
# Get predictions using output layer
out = self.linear2(out)
return torch.sigmoid(out)
input_size = 784
hidden_size = 32
num_classes = 1
model = MnistModel(input_size, hidden_size, num_classes)
optim = torch.optim.Adam(model.parameters(), 0.0001)
callbacks = [
]
metrics = {
"acc": tm.Accuracy(),
'precision': tm.Precision(),
'recall': tm.Recall(),
'f1': tm.F1(),
# 'ss': tm.StatScores(),
}
model.compile(loss=binary_cross_entropy_weighted_focal_loss,
optimizer=optim,
metrics=metrics)
trainer = pl.Trainer(logger=False, max_epochs=5, callbacks=callbacks)
trainer.fit(model, train_loader, val_loader)
print(model.get_history())
df = pd.DataFrame(model.get_history())
df.to_csv('pretrained.csv', index=False)
# test (pass in the loader)
# trainer.test(model=model, dataloaders=val_loader)
| 27.955556
| 93
| 0.715024
|
a0242e302235693d21bc12dfab659059bbaad25b
| 3,021
|
py
|
Python
|
boardencoder/snapshotencoder.py
|
luxunxiansheng/DRLGP
|
85b08186fbf189b625dcfce2b5c3bf6c3f428bbe
|
[
"MIT"
] | null | null | null |
boardencoder/snapshotencoder.py
|
luxunxiansheng/DRLGP
|
85b08186fbf189b625dcfce2b5c3bf6c3f428bbe
|
[
"MIT"
] | null | null | null |
boardencoder/snapshotencoder.py
|
luxunxiansheng/DRLGP
|
85b08186fbf189b625dcfce2b5c3bf6c3f428bbe
|
[
"MIT"
] | 1
|
2020-08-05T01:39:38.000Z
|
2020-08-05T01:39:38.000Z
|
# #### BEGIN LICENSE BLOCK #####
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
#
# Contributor(s):
#
# Bin.Li (ornot2008@yahoo.com)
#
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# #### END LICENSE BLOCK #####
#
# /
import numpy as np
from common.encoder import Encoder
from common.point import Point
class SnapshotEncoder(Encoder):
def __init__(self, num_plane, board_size):
self._board_size = board_size
self._board_width = board_size
self._board_height = board_size
self._num_plane = num_plane
def name(self):
return 'SnapshotEncoder'
@property
def num_plane(self):
return self._num_plane
@property
def board_width(self):
return self._board_width
@property
def board_height(self):
return self._board_height
def encode(self, boards, player_in_action, previous_move=None):
board_matrix = np.zeros(self.shape(), dtype=int)
for plane in range(len(boards)):
for row in range(self._board_height):
for col in range(self._board_width):
point = Point(row+1, col+1)
piece = boards[plane].get_piece_at_point(point)
if piece.owner_id != -1:
board_matrix[plane, row, col] = piece.owner_id
return board_matrix
def shape(self):
return self._num_plane, self._board_height, self._board_width
def encode_point(self, point):
return self._board_width*(point.row-1)+(point.col-1)
def decode_point_index(self, index):
row = index // self._board_width
col = index % self._board_width
return Point(row=row+1, col=col+1)
def num_points(self):
return self._board_width*self._board_height
| 34.724138
| 77
| 0.684211
|
8800820d9dccf0330e6e37ad2382b038d148d339
| 861
|
py
|
Python
|
problem_39.py
|
alfonsokim/project-euler
|
cdc5a271c22f3ad78681ac920f2d9be6e75cdbc5
|
[
"Unlicense"
] | null | null | null |
problem_39.py
|
alfonsokim/project-euler
|
cdc5a271c22f3ad78681ac920f2d9be6e75cdbc5
|
[
"Unlicense"
] | null | null | null |
problem_39.py
|
alfonsokim/project-euler
|
cdc5a271c22f3ad78681ac920f2d9be6e75cdbc5
|
[
"Unlicense"
] | null | null | null |
import itertools
from collections import defaultdict
# =======================================================================================
def next_triangle(max_perimeter):
for a, b in itertools.product(range(1, max_perimeter), range(1, max_perimeter)):
c = ((a*a) + (b*b)) ** 0.5
if c.is_integer() and (a + b + c) <= max_perimeter:
yield a, b, int(c)
# =======================================================================================
def solve():
solutions = defaultdict(list)
for sides in next_triangle(1000):
solutions[sum(sides)].append(sides)
max_solutions = sorted(solutions.items(), key=lambda e: len(e[1]) * -1)[0]
return max_solutions[0]
# =======================================================================================
if __name__ == '__main__':
print solve()
| 37.434783
| 89
| 0.432056
|
adbdddeda45cdc228058a4b3cb55b954ed8d7051
| 4,670
|
py
|
Python
|
tests/garage/sampler/test_off_policy_vectorized_sampler_integration.py
|
bainro/garage
|
c5afbb19524792d9bbad9b9741f45e1d48ddca3d
|
[
"MIT"
] | null | null | null |
tests/garage/sampler/test_off_policy_vectorized_sampler_integration.py
|
bainro/garage
|
c5afbb19524792d9bbad9b9741f45e1d48ddca3d
|
[
"MIT"
] | null | null | null |
tests/garage/sampler/test_off_policy_vectorized_sampler_integration.py
|
bainro/garage
|
c5afbb19524792d9bbad9b9741f45e1d48ddca3d
|
[
"MIT"
] | null | null | null |
import gym
import numpy as np
import pytest
import tensorflow as tf
from garage.envs import normalize
from garage.np.exploration_strategies import OUStrategy
from garage.replay_buffer import SimpleReplayBuffer
from garage.sampler import OffPolicyVectorizedSampler
from garage.tf.algos import DDPG
from garage.tf.envs import TfEnv
from garage.tf.experiment import LocalTFRunner
from garage.tf.policies import ContinuousMLPPolicy
from garage.tf.q_functions import ContinuousMLPQFunction
from tests.fixtures import snapshot_config, TfGraphTestCase
from tests.fixtures.envs.dummy import DummyDictEnv
from tests.fixtures.policies import DummyPolicy
from tests.fixtures.tf.algos.dummy_off_policy_algo import DummyOffPolicyAlgo
class TestOffPolicyVectorizedSampler(TfGraphTestCase):
@pytest.mark.mujoco
def test_no_reset(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
# This tests if off-policy sampler respect batch_size
# when no_reset is set to True
env = TfEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
action_noise = OUStrategy(env.spec, sigma=0.2)
policy = ContinuousMLPPolicy(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh)
qf = ContinuousMLPQFunction(env_spec=env.spec,
hidden_sizes=[64, 64],
hidden_nonlinearity=tf.nn.relu)
replay_buffer = SimpleReplayBuffer(env_spec=env.spec,
size_in_transitions=int(1e6),
time_horizon=100)
algo = DDPG(
env_spec=env.spec,
policy=policy,
policy_lr=1e-4,
qf_lr=1e-3,
qf=qf,
replay_buffer=replay_buffer,
target_update_tau=1e-2,
n_train_steps=50,
discount=0.9,
min_buffer_size=int(1e4),
exploration_strategy=action_noise,
)
sampler = OffPolicyVectorizedSampler(algo, env, 1, no_reset=True)
sampler.start_worker()
runner.initialize_tf_vars()
paths1 = sampler.obtain_samples(0, 5)
paths2 = sampler.obtain_samples(0, 5)
len1 = sum([len(path['rewards']) for path in paths1])
len2 = sum([len(path['rewards']) for path in paths2])
assert len1 == 5 and len2 == 5, 'Sampler should respect batch_size'
# yapf: disable
# When done is False in 1st sampling, the next sampling should be
# stacked with the last batch in 1st sampling
case1 = (len(paths1[-1]['rewards']) + len(paths2[0]['rewards'])
== paths2[0]['running_length'])
# When done is True in 1st sampling, the next sampling should be
# separated
case2 = len(paths2[0]['rewards']) == paths2[0]['running_length']
done = paths1[-1]['dones'][-1]
assert (
(not done and case1) or (done and case2)
), 'Running length should be the length of full path'
# yapf: enable
case1 = np.isclose(
paths1[-1]['rewards'].sum() + paths2[0]['rewards'].sum(),
paths2[0]['undiscounted_return'])
case2 = np.isclose(paths2[0]['rewards'].sum(),
paths2[0]['undiscounted_return'])
assert (
(not done and case1) or (done and case2)
), 'Undiscounted_return should be the sum of rewards of full path'
def test_algo_with_goal_without_es(self):
# This tests if sampler works properly when algorithm
# includes goal but is without exploration policy
env = DummyDictEnv()
policy = DummyPolicy(env)
replay_buffer = SimpleReplayBuffer(env_spec=env,
size_in_transitions=int(1e6),
time_horizon=100)
algo = DummyOffPolicyAlgo(env_spec=env,
qf=None,
replay_buffer=replay_buffer,
policy=policy,
exploration_strategy=None)
sampler = OffPolicyVectorizedSampler(algo, env, 1, no_reset=True)
sampler.start_worker()
sampler.obtain_samples(0, 30)
| 44.056604
| 79
| 0.571949
|
502d43fea9653e2fc0be16d73a85ac9c685f9873
| 334
|
py
|
Python
|
models/backbone/__init__.py
|
killf/FarSeg
|
a696576bfe76ad4b2c5fea842830ae2e60e0b867
|
[
"MIT"
] | 7
|
2020-10-22T08:27:12.000Z
|
2021-11-14T15:27:18.000Z
|
models/backbone/__init__.py
|
killf/FarSeg
|
a696576bfe76ad4b2c5fea842830ae2e60e0b867
|
[
"MIT"
] | 1
|
2020-10-29T02:13:04.000Z
|
2020-10-29T13:27:58.000Z
|
models/backbone/__init__.py
|
killf/FarSeg
|
a696576bfe76ad4b2c5fea842830ae2e60e0b867
|
[
"MIT"
] | 1
|
2021-05-05T05:32:28.000Z
|
2021-05-05T05:32:28.000Z
|
from .resnet import *
BACKBONES = {
"ResNet18": resnet18,
"ResNet34": resnet34,
"ResNet50": resnet50,
"ResNet101": resnet101,
"ResNet152": resnet152,
"ResNext50_32x4d": resnext50_32x4d,
"ResNeXt101_32x8d": resnext101_32x8d,
"WideResNet50_2": wide_resnet50_2,
"WideResNet101_2": wide_resnet101_2
}
| 23.857143
| 41
| 0.691617
|
41433f3c17fb6ab214e7490d9731bffbd3df1648
| 21,010
|
py
|
Python
|
test/functional/tests/cli/test_cli_standby.py
|
kmajzero/open-cas-linux
|
9d7afc467494cc6a929c00c1b938d9894e96ec8b
|
[
"BSD-3-Clause"
] | null | null | null |
test/functional/tests/cli/test_cli_standby.py
|
kmajzero/open-cas-linux
|
9d7afc467494cc6a929c00c1b938d9894e96ec8b
|
[
"BSD-3-Clause"
] | null | null | null |
test/functional/tests/cli/test_cli_standby.py
|
kmajzero/open-cas-linux
|
9d7afc467494cc6a929c00c1b938d9894e96ec8b
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright(c) 2019-2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
from api.cas import casadm, casadm_parser, dmesg
from api.cas.casadm import standby_init
from api.cas.cli import casadm_bin
from core.test_run import TestRun
from storage_devices.device import Device
from storage_devices.disk import DiskType, DiskTypeSet
from test_tools.dd import Dd
from test_utils.filesystem.file import File
from test_utils.os_utils import sync
from test_utils.output import CmdException
from test_utils.size import Size, Unit
from api.cas.cli_messages import (
check_stderr_msg,
missing_param,
disallowed_param,
operation_forbiden_in_standby,
mutually_exclusive_params_init,
mutually_exclusive_params_load,
activate_without_detach,
cache_line_size_mismatch,
)
from api.cas.cache_config import CacheLineSize, CacheStatus
from api.cas import cli
from api.cas.ioclass_config import IoClass
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
def test_standby_neg_cli_params():
"""
title: Verifying parameters for starting a standby cache instance
description: |
Try executing the standby init command with required arguments missing or
disallowed arguments present.
pass_criteria:
- The execution is unsuccessful for all improper argument combinations
- A proper error message is displayed for unsuccessful executions
"""
with TestRun.step("Prepare the device for the cache."):
cache_device = TestRun.disks["cache"]
cache_device.create_partitions([Size(500, Unit.MebiByte)])
cache_device = cache_device.partitions[0]
with TestRun.step("Prepare config for testing standby init without required params"):
init_required_params = dict(
[("--cache-device", cache_device.path), ("--cache-id", 5), ("--cache-line-size", 32)]
)
# Prepare full valid `standby init` command
valid_cmd = casadm_bin + " --standby --init"
for name, value in init_required_params.items():
valid_cmd += f" {name} {value}"
# Try to initialize standby instance with one missing param at the time
for name, value in init_required_params.items():
with TestRun.step(f'Try to init standby instance without "{name}" param'):
tested_param = f"{name} {value}"
tested_cmd = valid_cmd.replace(tested_param, "")
output = TestRun.executor.run(tested_cmd)
if output.exit_code == 0:
TestRun.LOGGER.error(
f'"{tested_cmd}" command succeeded despite missing required "{name}" parameter!'
)
if not check_stderr_msg(output, missing_param) or name not in output.stderr:
TestRun.LOGGER.error(
f'Expected error message in format "{missing_param[0]}" with "{name}" '
f'(the missing param). Got "{output.stderr}" instead.'
)
with TestRun.step("Prepare config for testing standby init with disallowed params"):
init_disallowed_params = dict(
[
("--core-device", "/dev/disk/by-id/core_dev_id"),
("--core-id", 5),
("--cache-mode", 32),
("--file", "/etc/opencas/ioclass-config.csv"),
("--io-class-id", "0"),
]
)
for name, value in init_disallowed_params.items():
with TestRun.step(f'Try to init standby instance with disallowed "{name}" param'):
tested_param = f"{name} {value}"
tested_cmd = f"{valid_cmd} {tested_param}"
output = TestRun.executor.run(tested_cmd)
if output.exit_code == 0:
TestRun.LOGGER.error(
f'"{tested_cmd}" command succeeded despite disallowed "{name}" parameter!'
)
if not check_stderr_msg(output, disallowed_param):
TestRun.LOGGER.error(
f'Expected error message in format "{disallowed_param[0]}" '
f'Got "{output.stderr}" instead.'
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
def test_activate_neg_cli_params():
"""
title: Verifying parameters for activating a standby cache instance.
description: |
Try executing the standby activate command with required arguments missing or disallowed
arguments present.
pass_criteria:
-The execution is unsuccessful for all improper argument combinations
-A proper error message is displayed for unsuccessful executions
"""
with TestRun.step("Prepare the device for the cache."):
cache_device = TestRun.disks["cache"]
cache_device.create_partitions([Size(500, Unit.MebiByte)])
cache_device = cache_device.partitions[0]
cache_id = 1
cache_line_size = 32
with TestRun.step("Init standby cache"):
cache_dev = Device(cache_device.path)
cache = standby_init(
cache_dev=cache_dev, cache_id=cache_id, cache_line_size=cache_line_size, force=True
)
with TestRun.step("Detach standby cache"):
cache.standby_detach()
# Test standby activate
with TestRun.step("Prepare config for testing standby activate with required params"):
standby_activate_required_params = dict(
[("--cache-device", cache_device.path), ("--cache-id", cache_id)]
)
# Prepare full valid `standby activate` command
valid_cmd = casadm_bin + " --standby --activate"
for name, value in standby_activate_required_params.items():
valid_cmd += f" {name} {value}"
for name, value in standby_activate_required_params.items():
with TestRun.step(f'Try to standby activate instance without "{name}" param'):
tested_param = f"{name} {value}"
tested_cmd = valid_cmd.replace(tested_param, "")
output = TestRun.executor.run(tested_cmd)
if output.exit_code == 0:
TestRun.LOGGER.error(
f'"{tested_cmd}" command succeeded despite missing obligatory'
f' "{name}" parameter!'
)
if not check_stderr_msg(output, missing_param) or name not in output.stderr:
TestRun.LOGGER.error(
f'Expected error message in format "{missing_param[0]}" with "{name}" '
f'(the missing param). Got "{output.stderr}" instead.'
)
with TestRun.step("Prepare config for testing standby activate with disallowed params"):
activate_disallowed_params = dict(
[
("--core-device", "/dev/disk/by-id/core_dev_id"),
("--core-id", 5),
("--cache-mode", 32),
("--file", "/etc/opencas/ioclass-config.csv"),
("--io-class-id", "0"),
("--cache-line-size", 32),
]
)
for name, value in activate_disallowed_params.items():
with TestRun.step(f'Try to activate standby instance with disallowed "{name}" param'):
tested_param = f"{name} {value}"
tested_cmd = f"{valid_cmd} {tested_param}"
output = TestRun.executor.run(tested_cmd)
if output.exit_code == 0:
TestRun.LOGGER.error(
f'"{tested_cmd}" command succeeded despite disallowed "{name}" parameter!'
)
if not check_stderr_msg(output, disallowed_param):
TestRun.LOGGER.error(
f'Expected error message in format "{disallowed_param[0]}" '
f'Got "{output.stderr}" instead.'
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
def test_standby_neg_cli_management():
"""
title: Blocking management commands in standby state
description: |
Try executing management commands for a cache in standby state
pass_criteria:
- The execution is unsuccessful for blocked management commands
- The execution is successful for allowed management commands
- A proper error message is displayed for unsuccessful executions
"""
with TestRun.step("Prepare the device for the cache."):
device = TestRun.disks["cache"]
device.create_partitions([Size(500, Unit.MebiByte), Size(500, Unit.MebiByte)])
cache_device = device.partitions[0]
core_device = device.partitions[1]
with TestRun.step("Prepare the standby instance"):
cache_id = 1
cache = casadm.standby_init(
cache_dev=cache_device, cache_id=cache_id, cache_line_size=32, force=True
)
ioclass_config_path = "/tmp/standby_cli_neg_mngt_test_ioclass_config_file.csv"
TestRun.executor.run(f"rm -rf {ioclass_config_path}")
random_ioclass_config = IoClass.generate_random_ioclass_list(5)
IoClass.save_list_to_config_file(
random_ioclass_config, ioclass_config_path=ioclass_config_path
)
blocked_mngt_commands = [
cli.get_param_cutoff_cmd(str(cache_id), "1"),
cli.get_param_cleaning_cmd(str(cache_id)),
cli.get_param_cleaning_alru_cmd(str(cache_id)),
cli.get_param_cleaning_acp_cmd(str(cache_id)),
cli.set_param_cutoff_cmd(str(cache_id), "1", threshold="1"),
cli.set_param_cutoff_cmd(str(cache_id), policy="never"),
cli.set_param_cleaning_cmd(str(cache_id), policy="nop"),
cli.set_param_cleaning_alru_cmd(str(cache_id), wake_up="30"),
cli.set_param_cleaning_acp_cmd(str(cache_id), wake_up="100"),
cli.set_param_promotion_cmd(str(cache_id), policy="nhit"),
cli.set_param_promotion_nhit_cmd(str(cache_id), threshold="5"),
cli.set_cache_mode_cmd("wb", str(cache_id)),
cli.add_core_cmd(str(cache_id), core_device.path),
cli.remove_core_cmd(str(cache_id), "1"),
cli.remove_inactive_cmd(str(cache_id), "1"),
cli.reset_counters_cmd(str(cache_id)),
cli.flush_cache_cmd(str(cache_id)),
cli.flush_core_cmd(str(cache_id), "1"),
cli.load_io_classes_cmd(str(cache_id), ioclass_config_path),
cli.list_io_classes_cmd(str(cache_id), output_format="csv"),
cli.script_try_add_cmd(str(cache_id), core_device.path, core_id=1),
cli.script_purge_cache_cmd(str(cache_id)),
cli.script_purge_core_cmd(str(cache_id), "1"),
cli.script_detach_core_cmd(str(cache_id), "1"),
cli.script_remove_core_cmd(str(cache_id), "1"),
]
with TestRun.step("Try to execute forbidden management commands in standby mode"):
for cmd in blocked_mngt_commands:
TestRun.LOGGER.info(f"Verify {cmd}")
output = TestRun.executor.run_expect_fail(cmd)
if not check_stderr_msg(output, operation_forbiden_in_standby):
TestRun.LOGGER.error(
f'Expected the following error message "{operation_forbiden_in_standby[0]}" '
f'Got "{output.stderr}" instead.'
)
with TestRun.step("Stop the standby instance"):
TestRun.executor.run(f"rm -rf {ioclass_config_path}")
cache.stop()
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
def test_start_neg_cli_flags():
"""
title: Blocking standby start command with mutually exclusive flags
description: |
Try executing the standby start command with different combinations of mutually
exclusive flags.
pass_criteria:
- The command execution is unsuccessful for commands with mutually exclusive flags
- A proper error message is displayed
"""
with TestRun.step("Prepare the device for the cache."):
cache_device = TestRun.disks["cache"]
cache_device.create_partitions([Size(500, Unit.MebiByte)])
cache_device = cache_device.partitions[0]
cache_id = 1
cache_line_size = 32
with TestRun.step("Try to start standby cache with mutually exclusive parameters"):
init_required_params = f' --cache-device {cache_device.path}' \
f' --cache-id {cache_id}' \
f' --cache-line-size {cache_line_size}'
mutually_exclusive_cmd_init = f"{casadm_bin} --standby --init --load" \
f" {init_required_params}"
output = TestRun.executor.run_expect_fail(mutually_exclusive_cmd_init)
if not check_stderr_msg(output, mutually_exclusive_params_init):
TestRun.LOGGER.error(
f'Expected error message in format '
f'"{mutually_exclusive_params_init[0]}"'
f'Got "{output.stderr}" instead.'
)
mutually_exclusive_cmd_load = [
f"{casadm_bin} --standby --load --cache-device {cache_device.path}"
f" --cache-id {cache_id}",
f"{casadm_bin} --standby --load --cache-device {cache_device.path}"
f" --cache-line-size {cache_line_size}",
f"{casadm_bin} --standby --load --cache-device {cache_device.path}"
f" --force"
]
for cmd in mutually_exclusive_cmd_load:
output = TestRun.executor.run_expect_fail(cmd)
if not check_stderr_msg(output, mutually_exclusive_params_load):
TestRun.LOGGER.error(
f'Expected error message in format '
f'"{mutually_exclusive_params_load[0]}"'
f'Got "{output.stderr}" instead.'
)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
def test_activate_without_detach():
"""
title: Activate cache without detach command.
description: |
Try activate passive cache without detach command before activation.
pass_criteria:
- The activation is not possible
- The cache remains in Standby state after unsuccessful activation
- The cache exported object is present after an unsuccessful activation
"""
with TestRun.step("Prepare the device for the cache."):
cache_dev = TestRun.disks["cache"]
cache_dev.create_partitions([Size(500, Unit.MebiByte)])
cache_dev = cache_dev.partitions[0]
cache_id = 1
cache_exp_obj_name = f"cas-cache-{cache_id}"
with TestRun.step("Start cache instance."):
cache = casadm.start_cache(cache_dev=cache_dev, cache_id=cache_id)
with TestRun.step("Stop cache instance."):
cache.stop()
with TestRun.step("Load standby cache instance."):
casadm.standby_load(cache_dev=cache_dev)
with TestRun.step("Verify if the cache exported object appeared in the system"):
output = TestRun.executor.run_expect_success(f"ls -la /dev/ | grep {cache_exp_obj_name}")
if output.stdout[0] != "b":
TestRun.fail("The cache exported object is not a block device")
with TestRun.step("Try to activate cache instance"):
cmd = f"{casadm_bin} --standby --activate --cache-id {cache_id} --cache-device " \
f"{cache_dev.path}"
output = TestRun.executor.run(cmd)
if not check_stderr_msg(output, activate_without_detach):
TestRun.LOGGER.error(
f'Expected error message in format '
f'"{activate_without_detach[0]}"'
f'Got "{output.stderr}" instead.'
)
with TestRun.step("Verify if cache is in standby state after failed activation"):
caches = casadm_parser.get_caches()
if len(caches) < 1:
TestRun.LOGGER.error(f'Cache not present in system')
else:
cache_status = caches[0].get_status()
if cache_status != CacheStatus.standby:
TestRun.LOGGER.error(
f'Expected Cache state: "{CacheStatus.standby.value}" '
f'Got "{cache_status.value}" instead.'
)
with TestRun.step("Verify if the cache exported object remains in the system"):
output = TestRun.executor.run_expect_success(f"ls -la /dev/ | grep {cache_exp_obj_name}")
if output.stdout[0] != "b":
TestRun.fail("The cache exported object is not a block device")
@pytest.mark.require_disk("active_cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
@pytest.mark.require_disk("standby_cache", DiskTypeSet([DiskType.nand, DiskType.optane]))
def test_activate_neg_cache_line_size():
"""
title: Blocking cache with mismatching cache line size activation.
description: |
Try restoring cache operations from a replicated cache that was initialized
with different cache line size than the original cache.
pass_criteria:
- The activation is cancelled
- The cache remains in Standby detached state after an unsuccessful activation
- A proper error message is displayed
"""
with TestRun.step("Prepare cache devices"):
active_cache_dev = TestRun.disks["active_cache"]
active_cache_dev.create_partitions([Size(500, Unit.MebiByte)])
active_cache_dev = active_cache_dev.partitions[0]
standby_cache_dev = TestRun.disks["standby_cache"]
standby_cache_dev.create_partitions([Size(500, Unit.MebiByte)])
standby_cache_dev = standby_cache_dev.partitions[0]
cache_id = 1
active_cls, standby_cls = CacheLineSize.LINE_4KiB, CacheLineSize.LINE_16KiB
cache_exp_obj_name = f"cas-cache-{cache_id}"
with TestRun.step("Start active cache instance."):
active_cache = casadm.start_cache(cache_dev=active_cache_dev, cache_id=cache_id,
cache_line_size=active_cls)
with TestRun.step("Create dump file with cache metadata"):
with TestRun.step("Get metadata size"):
dmesg_out = TestRun.executor.run_expect_success("dmesg").stdout
md_size = dmesg.get_metadata_size(dmesg_out)
with TestRun.step("Dump the metadata of the cache"):
dump_file_path = "/tmp/test_activate_corrupted.dump"
md_dump = File(dump_file_path)
md_dump.remove(force=True, ignore_errors=True)
dd_count = int(md_size / Size(1, Unit.MebiByte)) + 1
(
Dd().input(active_cache_dev.path)
.output(md_dump.full_path)
.block_size(Size(1, Unit.MebiByte))
.count(dd_count)
.run()
)
md_dump.refresh_item()
with TestRun.step("Stop cache instance."):
active_cache.stop()
with TestRun.step("Start standby cache instance."):
standby_cache = casadm.standby_init(cache_dev=standby_cache_dev, cache_id=cache_id,
cache_line_size=int(
standby_cls.value.value / Unit.KibiByte.value),
force=True)
with TestRun.step("Verify if the cache exported object appeared in the system"):
output = TestRun.executor.run_expect_success(
f"ls -la /dev/ | grep {cache_exp_obj_name}"
)
if output.stdout[0] != "b":
TestRun.fail("The cache exported object is not a block device")
with TestRun.step("Detach standby cache instance"):
standby_cache.standby_detach()
with TestRun.step(f"Copy changed metadata to the standby instance"):
Dd().input(md_dump.full_path).output(standby_cache_dev.path).run()
sync()
with TestRun.step("Try to activate cache instance"):
with pytest.raises(CmdException) as cmdExc:
output = standby_cache.standby_activate(standby_cache_dev)
if not check_stderr_msg(output, cache_line_size_mismatch):
TestRun.LOGGER.error(
f'Expected error message in format '
f'"{cache_line_size_mismatch[0]}"'
f'Got "{output.stderr}" instead.'
)
assert "Failed to activate standby cache." in str(cmdExc.value)
with TestRun.step("Verify if cache is in standby detached state after failed activation"):
cache_status = standby_cache.get_status()
if cache_status != CacheStatus.standby_detached:
TestRun.LOGGER.error(
f'Expected Cache state: "{CacheStatus.standby.value}" '
f'Got "{cache_status.value}" instead.'
)
| 45.47619
| 100
| 0.625083
|
03ba26d7075b9d70df7043455f1ff1dc3c87c65d
| 399
|
py
|
Python
|
exercicios_resolvidos/ex015.py
|
WagnerAndrade-DEV/Python-Basics
|
77b6f4b48721809c6a13ddbb7b7bc4c3bc9f712f
|
[
"MIT"
] | null | null | null |
exercicios_resolvidos/ex015.py
|
WagnerAndrade-DEV/Python-Basics
|
77b6f4b48721809c6a13ddbb7b7bc4c3bc9f712f
|
[
"MIT"
] | null | null | null |
exercicios_resolvidos/ex015.py
|
WagnerAndrade-DEV/Python-Basics
|
77b6f4b48721809c6a13ddbb7b7bc4c3bc9f712f
|
[
"MIT"
] | null | null | null |
#Escreva um programa que pergunte a quantidade de Km percorridos por um carro alugado e a quantidade de dias pelos quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro custa R$60 por dia e R$0,15 por Km rodado
dias = int(input('Quantos dias alugados?: '))
km = int(input('Quantos Km rodados?: '))
valor = (dias * 60) + (km * 0.15)
print('O total a pagar é R${:.2f}' .format(valor))
| 49.875
| 224
| 0.706767
|
3671bc35da7afa873db50671fafe420e51c0e587
| 3,117
|
py
|
Python
|
oakling/oakling/settings.py
|
zym1115718204/oakling
|
e925e324c0a18b4cb246a1811f2dca522c4e2892
|
[
"Apache-2.0"
] | 1
|
2018-03-22T10:45:22.000Z
|
2018-03-22T10:45:22.000Z
|
oakling/oakling/settings.py
|
zym1115718204/oakling
|
e925e324c0a18b4cb246a1811f2dca522c4e2892
|
[
"Apache-2.0"
] | null | null | null |
oakling/oakling/settings.py
|
zym1115718204/oakling
|
e925e324c0a18b4cb246a1811f2dca522c4e2892
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for oakling project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from common import *
# --------------------------------------------------------------------
# Version Info
# --------------------------------------------------------------------
# Version Info
VERSION = 'Oakling 1.0'
# --------------------------------------------------------------------
# MongdDB Settings
# --------------------------------------------------------------------
# Mongodb settings
MongoDBS = {
'oakling_project': {
'host': 'mongodb://localhost/oakling_project',
},
'oakling_task': {
'host': 'mongodb://localhost/oakling_task',
}
}
from mongoengine import connect # noqa
for name, db in MongoDBS.iteritems():
connect(host=db['host'], alias=name)
# --------------------------------------------------------------------
# APP Tree Settings
# --------------------------------------------------------------------
REGISTER_DATASYSTEMS = [
"LOCAL",
"HDFS",
]
# default, Tree root url;
BASETREE_URL = "/dashboard/data/"
# default, Local File data directory
LOCAL_DATAFILE_DIRS = os.path.join(os.path.dirname(BASE_DIR), "data")
# default hdfs data settings
HDFS_NAMENODE_HOST = "namenode"
HDFS_NAMENODE_PORT = 8020
HDFS_DATAFILE_DIRS = os.path.join("/tmp", "data")
# --------------------------------------------------------------------
# Utils Settings
# --------------------------------------------------------------------
# Spiders Path
PROJECTS_PATH = os.path.join(os.path.dirname(BASE_DIR), "projects")
# Execute Path
EXECUTE_PATH = os.path.join(BASE_DIR, "execute")
# --------------------------------------------------------------------
# Celery settings
# --------------------------------------------------------------------
# BROKER_URL = 'amqp://guest:guest@localhost//'
BROKER_URL = 'redis://localhost:6379/0'
ANALYSIS_REDIS = 'redis://localhost:6379/1'
NODES_REDIS = 'redis://localhost:6379/1'
#: Only add pickle to this list if your broker is secured
#: from unwanted access (see userguide/security.html)
# BROKER_URL = 'amqp://'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TIMEZONE = 'Europe/Oslo'
CELERY_ENABLE_UTC = True
CELERY_ROUTES = {
'oakling.celery.debug_task': 'test',
'collector.tasks.low_processor': 'low_processor',
'collector.tasks.mid_processor': 'mid_processor',
'collector.tasks.high_processor': 'high_processor',
}
CELERY_ANNOTATIONS = {
'collector.tasks.low_processor': {'rate_limit': '6000/m'},
'collector.tasks.mid_processor': {'rate_limit': '6000/m'},
'collector.tasks.high_processor': {'rate_limit': '6000/m'},
'oakling.celery.debug_task': {'rate_limit': '6000/m'},
}
CELERY_IMPORTS = (
'oakling.celery',
'collector.tasks',
)
| 28.59633
| 70
| 0.545075
|
59827d555348300a1d315c0f322126b508a33533
| 8,637
|
py
|
Python
|
snsim/utils.py
|
bcarreres/snsim
|
86ffc49f254cd89c74be9c3350c00982e3d216e2
|
[
"BSD-3-Clause"
] | 5
|
2021-07-14T18:23:59.000Z
|
2022-02-02T13:09:55.000Z
|
snsim/utils.py
|
bcarreres/snsim
|
86ffc49f254cd89c74be9c3350c00982e3d216e2
|
[
"BSD-3-Clause"
] | 7
|
2021-02-25T15:19:59.000Z
|
2021-11-24T08:24:55.000Z
|
snsim/utils.py
|
bcarreres/snsim
|
86ffc49f254cd89c74be9c3350c00982e3d216e2
|
[
"BSD-3-Clause"
] | 1
|
2021-05-19T11:25:18.000Z
|
2021-05-19T11:25:18.000Z
|
"""This module contains usefull function for the simulation."""
import numpy as np
import sncosmo as snc
import astropy.time as atime
from astropy.coordinates import SkyCoord
from astropy import cosmology as acosmo
import astropy.units as u
from .constants import C_LIGHT_KMS
def set_cosmo(cosmo_dic):
"""Load an astropy cosmological model.
Parameters
----------
cosmo_dic : dict
A dict containing cosmology parameters.
Returns
-------
astropy.cosmology.object
An astropy cosmological model.
"""
astropy_mod = list(map(lambda x: x.lower(), acosmo.parameters.available))
if 'name' in cosmo_dic.keys():
name = cosmo_dic['name'].lower()
if name in astropy_mod:
if name == 'planck18':
return acosmo.Planck18
elif name == 'planck18_arxiv_v2':
return acosmo.Planck18_arXiv_v2
elif name == 'planck15':
return acosmo.Planck15
elif name == 'planck13':
return acosmo.Planck13
elif name == 'wmap9':
return acosmo.WMAP9
elif name == 'wmap7':
return acosmo.WMAP7
elif name == 'wmap5':
return acosmo.WMAP5
else:
raise ValueError(f'Available model are {astropy_mod}')
else:
if 'Ode0' not in cosmo_dic.keys():
cosmo_dic['Ode0'] = 1 - cosmo_dic['Om0']
return acosmo.w0waCDM(**cosmo_dic)
def scale_M0_jla(H0):
"""Compute a value of M0 corresponding to JLA results.
Parameters
----------
H0 : float
The H0 constant to scale M0.
Returns
-------
float
Scaled SN absolute magnitude.
"""
# mb = 5 * log10(c/H0_jla * Dl(z)) + 25 + MB_jla
# mb = 5 * log10(c/HO_True * Dl(z)) + 25 + MB_jla - 5 * log10(1 + dH0)
# with dH0 = (H0_jla - H0_True)/ H0_True
# MB_True = MB_jla - 5 * log10(1 + dH0)
# Scale the H0 value of JLA to the H0 value of sim
H0_jla = 70 # km/s/Mpc
M0_jla = -19.05
dH0 = (H0_jla - H0) / H0
return M0_jla - 5 * np.log10(1 + dH0)
def init_astropy_time(date):
"""Take a date and give a astropy.time.Time object.
Parameters
----------
date : int, float or str
The date in MJD number or YYYY-MM-DD string.
Returns
-------
astropy.time.Time
An astropy.time Time object of the given date.
"""
if isinstance(date, (int, float)):
date_format = 'mjd'
elif isinstance(date, str):
date_format = 'iso'
return atime.Time(date, format=date_format)
def compute_z_cdf(z_shell, shell_time_rate):
"""Compute the cumulative distribution function of redshift.
Parameters
----------
z_shell : numpy.ndarray(float)
The redshift of the shell edges.
shell_time_rate : numpy.ndarray(float)
The time rate of each shell.
Returns
-------
list(numpy.ndarray(float), numpy.ndarray(float))
redshift, CDF(redshift).
"""
dist = np.append(0, np.cumsum(shell_time_rate))
norm = dist[-1]
return [z_shell, dist / norm]
def asym_gauss(mean, sig_low, sig_high=None, rand_gen=None, size=1):
"""Generate random parameters using an asymetric Gaussian distribution.
Parameters
----------
mean : float
The central value of the Gaussian.
sig_low : float
The low sigma.
sig_high : float
The high sigma.
rand_gen : numpy.random.default_rng, optional
Numpy random generator.
size: int
Number of numbers to generate
Returns
-------
numpy.ndarray(float)
Random(s) variable(s).
"""
if sig_high is None:
sig_high = sig_low
if rand_gen is None:
low_or_high = np.random.random(size=size)
nbr = abs(np.random.normal(size=size))
else:
low_or_high = rand_gen.random(size)
nbr = abs(rand_gen.normal(size=size))
cond = low_or_high < sig_low / (sig_high + sig_low)
nbr *= -sig_low * cond + sig_high * ~cond
return mean + nbr
def compute_z2cmb(ra, dec, cmb):
"""Compute the redshifts of a list of objects relative to the CMB.
Parameters
----------
ra : np.ndarray(float)
Right Ascension of the objects.
dec : np.ndarray(float)
Declinaison of the objects.
cmb : dict
Dict containing cmb coords and velocity.
Returns
-------
np.ndarray(float)
Redshifts relative to cmb.
"""
l_cmb = cmb['l_cmb']
b_cmb = cmb['b_cmb']
v_cmb = cmb['v_cmb']
# use ra dec to simulate the effect of our motion
coordfk5 = SkyCoord(ra * u.rad,
dec * u.rad,
frame='fk5') # coord in fk5 frame
galac_coord = coordfk5.transform_to('galactic')
l_gal = galac_coord.l.rad - 2 * np.pi * \
np.sign(galac_coord.l.rad) * (abs(galac_coord.l.rad) > np.pi)
b_gal = galac_coord.b.rad
ss = np.sin(b_gal) * np.sin(b_cmb * np.pi / 180)
ccc = np.cos(b_gal) * np.cos(b_cmb * np.pi / 180) * np.cos(l_gal - l_cmb * np.pi / 180)
return (1 - v_cmb * (ss + ccc) / C_LIGHT_KMS) - 1.
def init_sn_model(name, model_dir=None):
"""Initialise a sncosmo model.
Parameters
----------
name : str
Name of the model.
model_dir : str
Path to the model files.
Returns
-------
sncosmo.Model
sncosmo Model corresponding to input configuration.
"""
if model_dir is None:
return snc.Model(source=name)
else:
if name == 'salt2':
return snc.Model(source=snc.SALT2Source(model_dir, name='salt2'))
elif name == 'salt3':
return snc.Model(source=snc.SALT3Source(model_dir, name='salt3'))
return None
def snc_fitter(lc, fit_model, fit_par, **kwargs):
"""Fit a given lightcurve with sncosmo.
Parameters
----------
lc : astropy.Table
The SN lightcurve.
fit_model : sncosmo.Model
Model used to fit the ligthcurve.
fit_par : list(str)
The parameters to fit.
Returns
-------
sncosmo.utils.Result (numpy.nan if no result)
sncosmo dict of fit results.
"""
try:
res = snc.fit_lc(data=lc, model=fit_model,
vparam_names=fit_par, **kwargs)
if res[0]['covariance'] is None:
res[0]['covariance'] = np.empty((len(res[0]['vparam_names']),
len(res[0]['vparam_names'])))
res[0]['covariance'][:] = np.nan
res[0]['param_names'] = np.append(res[0]['param_names'], 'mb')
res[0]['parameters'] = np.append(res[0]['parameters'],
res[1].source_peakmag('bessellb', 'ab'))
res_dic = {k: v for k, v in zip(res[0]['param_names'], res[0]['parameters'])}
res = np.append(res, res_dic)
except (RuntimeError, snc.fitting.DataQualityError):
res = ['NaN', 'NaN', 'NaN']
return res
def norm_flux(flux_table, zp):
"""Rescale the flux to a given zeropoint.
Parameters
----------
flux_table : astropy.Table
A table containing at least flux and fluxerr.
zp : float
The zeropoint to rescale the flux.
Returns
-------
np.ndarray(float), np.ndarray(float)
Rescaled flux and fluxerr arry.
"""
norm_factor = 10**(0.4 * (zp - flux_table['zp']))
flux_norm = flux_table['flux'] * norm_factor
fluxerr_norm = flux_table['fluxerr'] * norm_factor
return flux_norm, fluxerr_norm
def flux_to_Jansky(zp, band):
"""Give the factor to convert flux in uJy.
Parameters
----------
zp : float
The actual zeropoint of flux.
band : str
The sncosmo band in which compute the factor.
Returns
-------
float
The conversion factor.
"""
magsys = snc.get_magsystem('ab')
b = snc.get_bandpass(band)
nu, dnu = snc.utils.integration_grid(
snc.constants.C_AA_PER_S / b.maxwave(),
snc.constants.C_AA_PER_S / b.minwave(),
snc.constants.C_AA_PER_S / snc.constants.MODEL_BANDFLUX_SPACING)
trans = b(snc.constants.C_AA_PER_S / nu)
trans_int = np.sum(trans / nu) * dnu / snc.constants.H_ERG_S
norm = 10**(-0.4 * zp) * magsys.zpbandflux(b) / trans_int * 10**23 * 10**6
return norm
def print_dic(dic, prefix=''):
indent = ' '
for K in dic:
if isinstance(dic[K], dict):
print(prefix + K + ':')
print_dic(dic[K], prefix=prefix + indent)
else:
print(prefix + f'{K}: {dic[K]}')
| 27.506369
| 91
| 0.582031
|
87fe37fb12032f1b9127840d796ca56a211169f5
| 14,527
|
py
|
Python
|
nailgun/nailgun/test/integration/test_network_manager.py
|
Axam/nsx-web
|
4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5
|
[
"Apache-2.0"
] | 1
|
2021-04-06T16:13:35.000Z
|
2021-04-06T16:13:35.000Z
|
nailgun/nailgun/test/integration/test_network_manager.py
|
Axam/nsx-web
|
4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5
|
[
"Apache-2.0"
] | null | null | null |
nailgun/nailgun/test/integration/test_network_manager.py
|
Axam/nsx-web
|
4f60d71c05e08740cbdf19b6c9bb0c4cb1e29ad5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from mock import Mock
from mock import patch
from netaddr import IPAddress
from netaddr import IPNetwork
from netaddr import IPRange
from sqlalchemy import not_
import nailgun
from nailgun.db.sqlalchemy.models import IPAddr
from nailgun.db.sqlalchemy.models import IPAddrRange
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Node
from nailgun.db.sqlalchemy.models import NodeNICInterface
from nailgun.network.neutron import NeutronManager
from nailgun.network.nova_network import NovaNetworkManager
from nailgun.openstack.common import jsonutils
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
class TestNetworkManager(BaseIntegrationTest):
@fake_tasks(fake_rpc=False, mock_rpc=False)
@patch('nailgun.rpc.cast')
def test_assign_ips(self, mocked_rpc):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"pending_addition": True, "api": True},
{"pending_addition": True, "api": True}
]
)
nailgun.task.task.Cobbler = Mock()
self.env.network_manager.assign_ips(
[n.id for n in self.env.nodes],
"management"
)
management_net = self.db.query(NetworkGroup).\
filter(
NetworkGroup.cluster_id == self.env.clusters[0].id
).filter_by(
name='management'
).first()
assigned_ips = []
for node in self.env.nodes:
ips = self.db.query(IPAddr).\
filter_by(node=node.id).\
filter_by(network=management_net.id).all()
self.assertEqual(1, len(ips))
self.assertEqual(
True,
self.env.network_manager.check_ip_belongs_to_net(
ips[0].ip_addr,
management_net
)
)
assigned_ips.append(ips[0].ip_addr)
# check for uniqueness of IPs:
self.assertEqual(len(assigned_ips), len(list(set(assigned_ips))))
# check it doesn't contain broadcast and other special IPs
net_ip = IPNetwork(management_net.cidr)[0]
gateway = management_net.gateway
broadcast = IPNetwork(management_net.cidr)[-1]
self.assertEqual(False, net_ip in assigned_ips)
self.assertEqual(False, gateway in assigned_ips)
self.assertEqual(False, broadcast in assigned_ips)
@fake_tasks(fake_rpc=False, mock_rpc=False)
@patch('nailgun.rpc.cast')
def test_assign_ips_idempotent(self, mocked_rpc):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{
"pending_addition": True,
"api": True,
"status": "discover"
}
]
)
node_db = self.env.nodes[0]
self.env.network_manager.assign_ips(
[node_db.id],
"management"
)
self.env.network_manager.assign_ips(
[node_db.id],
"management"
)
self.db.refresh(node_db)
self.assertEqual(
len(
filter(
lambda n: n['name'] == 'management',
self.env.network_manager.get_node_networks(
node_db
)
)
),
1
)
def test_assign_vip_is_idempotent(self):
cluster = self.env.create_cluster(api=True)
vip = self.env.network_manager.assign_vip(
cluster['id'],
"management"
)
vip2 = self.env.network_manager.assign_vip(
cluster['id'],
"management"
)
self.assertEqual(vip, vip2)
def test_get_node_networks_for_vlan_manager(self):
cluster = self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"pending_addition": True},
]
)
networks_data = \
{'networking_parameters': {'net_manager': 'VlanManager'}}
resp = self.env.nova_networks_put(cluster['id'], networks_data)
task = jsonutils.loads(resp.body)
self.assertEqual(task['status'], 'ready')
network_data = self.env.network_manager.get_node_networks(
self.env.nodes[0]
)
self.assertEqual(len(network_data), 4)
fixed_nets = filter(lambda net: net['name'] == 'fixed', network_data)
self.assertEqual(fixed_nets, [])
def test_ipaddr_joinedload_relations(self):
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"pending_addition": True, "api": True},
{"pending_addition": True, "api": True}
]
)
self.env.network_manager.assign_ips(
[n.id for n in self.env.nodes],
"management"
)
ips = self.env.network_manager._get_ips_except_admin(joined=True)
self.assertEqual(len(ips), 2)
self.assertTrue(isinstance(ips[0].node_data, Node))
self.assertTrue(isinstance(ips[0].network_data, NetworkGroup))
def test_nets_empty_list_if_node_does_not_belong_to_cluster(self):
node = self.env.create_node(api=False)
network_data = self.env.network_manager.get_node_networks(node)
self.assertEqual(network_data, [])
def test_assign_admin_ips(self):
node = self.env.create_node()
self.env.network_manager.assign_admin_ips(node.id, 2)
admin_ng_id = self.env.network_manager.get_admin_network_group_id()
admin_network_range = self.db.query(IPAddrRange).\
filter_by(network_group_id=admin_ng_id).all()[0]
admin_ips = self.db.query(IPAddr).\
filter_by(node=node.id).\
filter_by(network=admin_ng_id).all()
self.assertEqual(len(admin_ips), 2)
map(
lambda x: self.assertIn(
IPAddress(x.ip_addr),
IPRange(
admin_network_range.first,
admin_network_range.last
)
),
admin_ips
)
def test_assign_admin_ips_large_range(self):
map(self.db.delete, self.db.query(IPAddrRange).all())
admin_ng_id = self.env.network_manager.get_admin_network_group_id()
mock_range = IPAddrRange(
first='10.0.0.1',
last='10.255.255.254',
network_group_id=admin_ng_id
)
self.db.add(mock_range)
self.db.commit()
# Creating two nodes
n1 = self.env.create_node()
n2 = self.env.create_node()
nc = zip([n1.id, n2.id], [2048, 2])
# Assinging admin IPs on created nodes
map(lambda (n, c): self.env.network_manager.assign_admin_ips(n, c), nc)
# Asserting count of admin node IPs
def asserter(x):
n, c = x
l = len(self.db.query(IPAddr).filter_by(network=admin_ng_id).
filter_by(node=n).all())
self.assertEqual(l, c)
map(asserter, nc)
def test_assign_admin_ips_idempotent(self):
node = self.env.create_node()
self.env.network_manager.assign_admin_ips(node.id, 2)
admin_net_id = self.env.network_manager.get_admin_network_group_id()
admin_ips = set([i.ip_addr for i in self.db.query(IPAddr).
filter_by(node=node.id).
filter_by(network=admin_net_id).all()])
self.env.network_manager.assign_admin_ips(node.id, 2)
admin_ips2 = set([i.ip_addr for i in self.db.query(IPAddr).
filter_by(node=node.id).
filter_by(network=admin_net_id).all()])
self.assertEqual(admin_ips, admin_ips2)
def test_assign_admin_ips_only_one(self):
map(self.db.delete, self.db.query(IPAddrRange).all())
admin_net_id = self.env.network_manager.get_admin_network_group_id()
mock_range = IPAddrRange(
first='10.0.0.1',
last='10.0.0.1',
network_group_id=admin_net_id
)
self.db.add(mock_range)
self.db.commit()
node = self.env.create_node()
self.env.network_manager.assign_admin_ips(node.id, 1)
admin_net_id = self.env.network_manager.get_admin_network_group_id()
admin_ips = self.db.query(IPAddr).\
filter_by(node=node.id).\
filter_by(network=admin_net_id).all()
self.assertEqual(len(admin_ips), 1)
self.assertEqual(admin_ips[0].ip_addr, '10.0.0.1')
@fake_tasks(fake_rpc=False, mock_rpc=False)
@patch('nailgun.rpc.cast')
def test_admin_ip_cobbler(self, mocked_rpc):
node_1_meta = {}
self.env.set_interfaces_in_meta(node_1_meta, [{
"name": "eth0",
"mac": "00:00:00:00:00:00",
}, {
"name": "eth1",
"mac": "00:00:00:00:00:01"}])
node_2_meta = {}
self.env.set_interfaces_in_meta(node_2_meta, [{
"name": "eth0",
"mac": "00:00:00:00:00:02",
}, {
"name": "eth1",
"mac": "00:00:00:00:00:03"}])
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{
"api": True,
"pending_addition": True,
"mac": "00:00:00:00:00:00",
"meta": node_1_meta
},
{
"api": True,
"pending_addition": True,
"mac": "00:00:00:00:00:02",
"meta": node_2_meta
}
]
)
self.env.launch_deployment()
rpc_nodes_provision = nailgun.task.manager.rpc.cast. \
call_args_list[0][0][1][0]['args']['provisioning_info']['nodes']
admin_ng_id = self.env.network_manager.get_admin_network_group_id()
admin_network_range = self.db.query(IPAddrRange).\
filter_by(network_group_id=admin_ng_id).all()[0]
map(
lambda (x, y): self.assertIn(
IPAddress(
rpc_nodes_provision[x]['interfaces'][y]['ip_address']
),
IPRange(
admin_network_range.first,
admin_network_range.last
)
),
itertools.product((0, 1), ('eth0',))
)
class TestNovaNetworkManager(BaseIntegrationTest):
def setUp(self):
super(TestNovaNetworkManager, self).setUp()
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{'api': True,
'pending_addition': True}
])
self.node_db = self.env.nodes[0]
def test_get_default_nic_assignment(self):
admin_nic_id = self.node_db.admin_interface.id
admin_nets = [n.name for n in self.db.query(
NodeNICInterface).get(admin_nic_id).assigned_networks_list]
other_nic = self.db.query(NodeNICInterface).filter_by(
node_id=self.node_db.id
).filter(
not_(NodeNICInterface.id == admin_nic_id)
).first()
other_nets = [n.name for n in other_nic.assigned_networks_list]
nics = NovaNetworkManager.get_default_networks_assignment(self.node_db)
def_admin_nic = [n for n in nics if n['id'] == admin_nic_id]
def_other_nic = [n for n in nics if n['id'] == other_nic.id]
self.assertEqual(len(def_admin_nic), 1)
self.assertEqual(len(def_other_nic), 1)
self.assertEqual(
set(admin_nets),
set([n['name'] for n in def_admin_nic[0]['assigned_networks']]))
self.assertEqual(
set(other_nets),
set([n['name'] for n in def_other_nic[0]['assigned_networks']]))
class TestNeutronManager(BaseIntegrationTest):
def check_networks_assignment(self, node_db):
node_nics = self.db.query(NodeNICInterface).filter_by(
node_id=node_db.id
).all()
def_nics = NeutronManager.get_default_networks_assignment(node_db)
self.assertEqual(len(node_nics), len(def_nics))
for n_nic in node_nics:
n_assigned = set(n['name'] for n in n_nic.assigned_networks)
for d_nic in def_nics:
if d_nic['id'] == n_nic.id:
d_assigned = set(n['name']
for n in d_nic['assigned_networks']) \
if d_nic.get('assigned_networks') else set()
self.assertEqual(n_assigned, d_assigned)
break
else:
self.fail("NIC is not found")
def test_gre_get_default_nic_assignment(self):
self.env.create(
cluster_kwargs={
'net_provider': 'neutron',
'net_segment_type': 'gre'},
nodes_kwargs=[
{'api': True,
'pending_addition': True}
])
self.check_networks_assignment(self.env.nodes[0])
def test_vlan_get_default_nic_assignment(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(
meta,
[{'name': 'eth0', 'mac': '00:00:00:00:00:11'},
{'name': 'eth1', 'mac': '00:00:00:00:00:22'},
{'name': 'eth2', 'mac': '00:00:00:00:00:33'}])
self.env.create(
cluster_kwargs={
'net_provider': 'neutron',
'net_segment_type': 'vlan'},
nodes_kwargs=[
{'api': True,
'meta': meta,
'pending_addition': True}
])
self.check_networks_assignment(self.env.nodes[0])
| 34.588095
| 79
| 0.570799
|
07b0009a020c1a5dc29d6c312813895952d1e6ad
| 1,797
|
py
|
Python
|
src/plugins/pcr/config.py
|
cdlaimin/CoolQBot
|
eb77046dd9f8c53c4e7b2e8419d2e447261ade97
|
[
"MIT"
] | 72
|
2019-10-23T08:07:58.000Z
|
2022-03-31T12:02:08.000Z
|
src/plugins/pcr/config.py
|
cdlaimin/CoolQBot
|
eb77046dd9f8c53c4e7b2e8419d2e447261ade97
|
[
"MIT"
] | 87
|
2019-03-11T09:52:31.000Z
|
2022-03-21T21:56:48.000Z
|
src/plugins/pcr/config.py
|
cdlaimin/CoolQBot
|
eb77046dd9f8c53c4e7b2e8419d2e447261ade97
|
[
"MIT"
] | 24
|
2019-03-08T08:15:17.000Z
|
2021-12-24T05:25:58.000Z
|
""" 配置文件
"""
from typing import List
from nonebot import get_driver
from pydantic import BaseSettings, validator
from src.utils.helpers import groupidtostr, strtogroupid
from src.utils.plugin import PluginData
DATA = PluginData("pcr")
class Config(BaseSettings):
# 新闻推送相关配置
# 自动推送新闻的间隔,单位 分钟
push_news_interval: int = int(DATA.config.get("news", "push_news_interval", "30"))
# 上次推送新闻的发布 ID
push_news_last_news_id: int = int(
DATA.config.get("news", "push_news_last_news_id", "0")
)
@validator("push_news_last_news_id")
def push_news_last_news_id_validator(cls, v):
"""验证并保存配置"""
DATA.config.set("news", "push_news_last_news_id", str(v))
return v
# 启用新闻推送的群
push_news_group_id: List[int] = strtogroupid(DATA.config.get("news", "group_id"))
@validator("push_news_group_id", always=True)
def push_news_group_id_validator(cls, v: List[int]):
"""验证并保存配置"""
DATA.config.set("news", "group_id", groupidtostr(v))
return v
# 日程推送功能
calender_hour: int = int(DATA.config.get("calender", "hour", fallback="7"))
calender_minute: int = int(DATA.config.get("calender", "minute", fallback="30"))
calender_second: int = int(DATA.config.get("calender", "second", fallback="0"))
# 启用日程推送的群
push_calender_group_id: List[int] = strtogroupid(
DATA.config.get("calender", "group_id")
)
@validator("push_calender_group_id", always=True)
def push_calender_group_id_validator(cls, v: List[int]):
"""验证并保存配置"""
DATA.config.set("calender", "group_id", groupidtostr(v))
return v
class Config:
extra = "ignore"
validate_assignment = True
global_config = get_driver().config
plugin_config = Config(**global_config.dict())
| 29.459016
| 86
| 0.671675
|
44df28fae9a71b9af5ad8e7864a6ecbd5ac4ebae
| 614
|
py
|
Python
|
Prime3.py
|
rashidulhasanhridoy/Prime-Number-Problem-in-Python-3
|
15edc619b6d282cf482dc312fc01aa5d4a9ee6d1
|
[
"Apache-2.0"
] | 1
|
2020-07-21T18:01:04.000Z
|
2020-07-21T18:01:04.000Z
|
Prime3.py
|
rashidulhasanhridoy/Prime-Number-Problem-in-Python-3
|
15edc619b6d282cf482dc312fc01aa5d4a9ee6d1
|
[
"Apache-2.0"
] | null | null | null |
Prime3.py
|
rashidulhasanhridoy/Prime-Number-Problem-in-Python-3
|
15edc619b6d282cf482dc312fc01aa5d4a9ee6d1
|
[
"Apache-2.0"
] | null | null | null |
#This program will show n-th number prime.
def prime(X):
if X == 1:
return 0
else:
for i in range(2, X):
if X % i == 0:
return 0
break
else:
return 1
P = []
count = 0
i = 1
while True:
Y = prime(i)
if Y == 1:
P.append(i)
count += 1
i += 1
if count == 1500:
#Here you can change the total number of prime,
# you want to see. 1500 means this program will shom first 1500 prime numbers.
break
N = int(input(''))
for i in range(N):
M = int(input(''))
print(P[M - 1])
| 21.928571
| 86
| 0.480456
|
40214ade0843fb29c7958bb6e5eb25440757e5e5
| 50,130
|
py
|
Python
|
gplearn/genetic.py
|
hofesh/gplearn
|
2f93916a134fc0a2e4410025aa31f7805848c9d5
|
[
"BSD-3-Clause"
] | null | null | null |
gplearn/genetic.py
|
hofesh/gplearn
|
2f93916a134fc0a2e4410025aa31f7805848c9d5
|
[
"BSD-3-Clause"
] | null | null | null |
gplearn/genetic.py
|
hofesh/gplearn
|
2f93916a134fc0a2e4410025aa31f7805848c9d5
|
[
"BSD-3-Clause"
] | null | null | null |
"""Genetic Programming in Python, with a scikit-learn inspired API
The :mod:`gplearn.genetic` module implements Genetic Programming. These
are supervised learning methods based on applying evolutionary operations on
computer programs.
"""
# Author: Trevor Stephens <trevorstephens.com>
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
from time import time
from warnings import warn
import numpy as np
from scipy.stats import rankdata
from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin
from sklearn.externals import six
from sklearn.externals.joblib import Parallel, delayed
from sklearn.utils.validation import check_X_y, check_array
from ._program import _Program
from .fitness import _fitness_map, _Fitness
from .functions import _function_map, _Function
from .utils import _partition_estimators
from .utils import check_random_state, NotFittedError
__all__ = ['SymbolicRegressor', 'SymbolicTransformer']
MAX_INT = np.iinfo(np.int32).max
def _parallel_evolve(n_programs, parents, X, y, sample_weight, seeds, params):
"""Private function used to build a batch of programs within a job."""
n_samples, n_features = X.shape
# Unpack parameters
tournament_size = params['tournament_size']
function_set = params['function_set']
arities = params['arities']
init_depth = params['init_depth']
init_method = params['init_method']
const_range = params['const_range']
metric = params['_metric']
parsimony_coefficient = params['parsimony_coefficient']
method_probs = params['method_probs']
p_point_replace = params['p_point_replace']
max_samples = params['max_samples']
feature_names = params['feature_names']
max_samples = int(max_samples * n_samples)
def _tournament():
"""Find the fittest individual from a sub-population."""
contenders = random_state.randint(0, len(parents), tournament_size)
fitness = [parents[p].fitness_ for p in contenders]
if metric.greater_is_better:
parent_index = contenders[np.argmax(fitness)]
else:
parent_index = contenders[np.argmin(fitness)]
return parents[parent_index], parent_index
# Build programs
programs = []
for i in range(n_programs):
random_state = check_random_state(seeds[i])
if parents is None:
program = None
genome = None
else:
method = random_state.uniform()
parent, parent_index = _tournament()
if method < method_probs[0]:
# crossover
donor, donor_index = _tournament()
program, removed, remains = parent.crossover(donor.program,
random_state)
genome = {'method': 'Crossover',
'parent_idx': parent_index,
'parent_nodes': removed,
'donor_idx': donor_index,
'donor_nodes': remains}
elif method < method_probs[1]:
# subtree_mutation
program, removed, _ = parent.subtree_mutation(random_state)
genome = {'method': 'Subtree Mutation',
'parent_idx': parent_index,
'parent_nodes': removed}
elif method < method_probs[2]:
# hoist_mutation
program, removed = parent.hoist_mutation(random_state)
genome = {'method': 'Hoist Mutation',
'parent_idx': parent_index,
'parent_nodes': removed}
elif method < method_probs[3]:
# point_mutation
program, mutated = parent.point_mutation(random_state)
genome = {'method': 'Point Mutation',
'parent_idx': parent_index,
'parent_nodes': mutated}
else:
# reproduction
program = parent.reproduce()
genome = {'method': 'Reproduction',
'parent_idx': parent_index,
'parent_nodes': []}
program = _Program(function_set=function_set,
arities=arities,
init_depth=init_depth,
init_method=init_method,
n_features=n_features,
metric=metric,
const_range=const_range,
p_point_replace=p_point_replace,
parsimony_coefficient=parsimony_coefficient,
feature_names=feature_names,
random_state=random_state,
program=program)
program.parents = genome
# Draw samples, using sample weights, and then fit
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,))
else:
curr_sample_weight = sample_weight.copy()
oob_sample_weight = curr_sample_weight.copy()
indices, not_indices = program.get_all_indices(n_samples,
max_samples,
random_state)
curr_sample_weight[not_indices] = 0
oob_sample_weight[indices] = 0
program.raw_fitness_ = program.raw_fitness(X, y, curr_sample_weight)
if max_samples < n_samples:
# Calculate OOB fitness
program.oob_fitness_ = program.raw_fitness(X, y, oob_sample_weight)
programs.append(program)
return programs
class BaseSymbolic(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for symbolic regression / classification estimators.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
population_size=1000,
hall_of_fame=None,
n_components=None,
generations=20,
tournament_size=20,
stopping_criteria=0.0,
const_range=(-1., 1.),
init_depth=(2, 6),
init_method='half and half',
function_set=('add', 'sub', 'mul', 'div'),
metric='mean absolute error',
parsimony_coefficient=0.001,
p_crossover=0.9,
p_subtree_mutation=0.01,
p_hoist_mutation=0.01,
p_point_mutation=0.01,
p_point_replace=0.05,
max_samples=1.0,
feature_names=None,
warm_start=False,
low_memory=False,
n_jobs=1,
verbose=0,
random_state=None):
self.population_size = population_size
self.hall_of_fame = hall_of_fame
self.n_components = n_components
self.generations = generations
self.tournament_size = tournament_size
self.stopping_criteria = stopping_criteria
self.const_range = const_range
self.init_depth = init_depth
self.init_method = init_method
self.function_set = function_set
self.metric = metric
self.parsimony_coefficient = parsimony_coefficient
self.p_crossover = p_crossover
self.p_subtree_mutation = p_subtree_mutation
self.p_hoist_mutation = p_hoist_mutation
self.p_point_mutation = p_point_mutation
self.p_point_replace = p_point_replace
self.max_samples = max_samples
self.feature_names = feature_names
self.warm_start = warm_start
self.low_memory = low_memory
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _verbose_reporter(self, run_details=None):
"""A report of the progress of the evolution process.
Parameters
----------
run_details : dict
Information about the evolution.
"""
if run_details is None:
print(' |{:^25}|{:^42}|'.format('Population Average',
'Best Individual'))
print('-' * 4 + ' ' + '-' * 25 + ' ' + '-' * 42 + ' ' + '-' * 10)
line_format = '{:>4} {:>8} {:>16} {:>8} {:>16} {:>16} {:>10}'
print(line_format.format('Gen', 'Length', 'Fitness', 'Length',
'Fitness', 'OOB Fitness', 'Time Left'))
else:
# Estimate remaining time for run
gen = run_details['generation'][-1]
generation_time = np.mean(run_details['generation_time'][-3:])
remaining_time = (self.generations - gen - 1) * generation_time
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
oob_fitness = 'N/A'
line_format = '{:4d} {:8.2f} {:16g} {:8d} {:16g} {:>16} {:>10}'
if self.max_samples < 1.0:
oob_fitness = run_details['best_oob_fitness'][-1]
line_format = '{:4d} {:8.2f} {:16.8f} {:8d} {:16.8f} {:16.8f} {:>10}'
# line_format = '{:4d} {:8.2f} {:16g} {:8d} {:16g} {:16g} {:>10}'
print(line_format.format(run_details['generation'][-1],
run_details['average_length'][-1],
run_details['average_fitness'][-1],
run_details['best_length'][-1],
run_details['best_fitness'][-1],
oob_fitness,
remaining_time))
def fit(self, X, y, sample_weight=None):
"""Fit the Genetic Program according to X, y.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Check arrays
X, y = check_X_y(X, y, y_numeric=True)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
_, self.n_features_ = X.shape
hall_of_fame = self.hall_of_fame
if hall_of_fame is None:
hall_of_fame = self.population_size
if hall_of_fame > self.population_size or hall_of_fame < 1:
raise ValueError('hall_of_fame (%d) must be less than or equal to '
'population_size (%d).' % (self.hall_of_fame,
self.population_size))
n_components = self.n_components
if n_components is None:
n_components = hall_of_fame
if n_components > hall_of_fame or n_components < 1:
raise ValueError('n_components (%d) must be less than or equal to '
'hall_of_fame (%d).' % (self.n_components,
self.hall_of_fame))
self._function_set = []
for function in self.function_set:
if isinstance(function, six.string_types):
if function not in _function_map:
raise ValueError('invalid function name %s found in '
'`function_set`.' % function)
self._function_set.append(_function_map[function])
elif isinstance(function, _Function):
self._function_set.append(function)
else:
raise ValueError('invalid type %s found in `function_set`.'
% type(function))
if not self._function_set:
raise ValueError('No valid functions found in `function_set`.')
# For point-mutation to find a compatible replacement node
self._arities = {}
for function in self._function_set:
arity = function.arity
self._arities[arity] = self._arities.get(arity, [])
self._arities[arity].append(function)
if isinstance(self.metric, _Fitness):
self._metric = self.metric
elif isinstance(self, RegressorMixin):
if self.metric not in ('mean absolute error', 'mse', 'rmse',
'pearson', 'spearman'):
raise ValueError('Unsupported metric: %s' % self.metric)
else:
self._metric = _fitness_map[self.metric]
elif isinstance(self, TransformerMixin):
if self.metric not in ('pearson', 'spearman'):
raise ValueError('Unsupported metric: %s' % self.metric)
else:
self._metric = _fitness_map[self.metric]
self._method_probs = np.array([self.p_crossover,
self.p_subtree_mutation,
self.p_hoist_mutation,
self.p_point_mutation])
self._method_probs = np.cumsum(self._method_probs)
if self._method_probs[-1] > 1:
raise ValueError('The sum of p_crossover, p_subtree_mutation, '
'p_hoist_mutation and p_point_mutation should '
'total to 1.0 or less.')
if self.init_method not in ('half and half', 'grow', 'full'):
raise ValueError('Valid program initializations methods include '
'"grow", "full" and "half and half". Given %s.'
% self.init_method)
if not((isinstance(self.const_range, tuple) and
len(self.const_range) == 2) or self.const_range is None):
raise ValueError('const_range should be a tuple with length two, '
'or None.')
if (not isinstance(self.init_depth, tuple) or
len(self.init_depth) != 2):
raise ValueError('init_depth should be a tuple with length two.')
if self.init_depth[0] > self.init_depth[1]:
raise ValueError('init_depth should be in increasing numerical '
'order: (min_depth, max_depth).')
if self.feature_names is not None:
if self.n_features_ != len(self.feature_names):
raise ValueError('The supplied `feature_names` has different '
'length to n_features. Expected %d, got %d.'
% (self.n_features_, len(self.feature_names)))
for feature_name in self.feature_names:
if not isinstance(feature_name, six.string_types):
raise ValueError('invalid type %s found in '
'`feature_names`.' % type(feature_name))
params = self.get_params()
params['_metric'] = self._metric
params['function_set'] = self._function_set
params['arities'] = self._arities
params['method_probs'] = self._method_probs
if not self.warm_start or not hasattr(self, '_programs'):
# Free allocated memory, if any
self._programs = []
self.run_details_ = {'generation': [],
'average_length': [],
'average_fitness': [],
'best_length': [],
'best_fitness': [],
'best_program': [],
'best_oob_fitness': [],
'generation_time': []}
prior_generations = len(self._programs)
n_more_generations = self.generations - prior_generations
if n_more_generations < 0:
raise ValueError('generations=%d must be larger or equal to '
'len(_programs)=%d when warm_start==True'
% (self.generations, len(self._programs)))
elif n_more_generations == 0:
fitness = [program.raw_fitness_ for program in self._programs[-1]]
warn('Warm-start fitting without increasing n_estimators does not '
'fit new programs.')
if self.warm_start:
# Generate and discard seeds that would have been produced on the
# initial fit call.
for i in range(len(self._programs)):
_ = random_state.randint(MAX_INT, size=self.population_size)
if self.verbose:
# Print header fields
self._verbose_reporter()
for gen in range(prior_generations, self.generations):
start_time = time()
if gen == 0:
parents = None
else:
parents = self._programs[gen - 1]
# Parallel loop
n_jobs, n_programs, starts = _partition_estimators(
self.population_size, self.n_jobs)
seeds = random_state.randint(MAX_INT, size=self.population_size)
population = Parallel(n_jobs=n_jobs,
verbose=int(self.verbose > 1))(
delayed(_parallel_evolve)(n_programs[i],
parents,
X,
y,
sample_weight,
seeds[starts[i]:starts[i + 1]],
params)
for i in range(n_jobs))
# Reduce, maintaining order across different n_jobs
population = list(itertools.chain.from_iterable(population))
fitness = [program.raw_fitness_ for program in population]
length = [program.length_ for program in population]
parsimony_coefficient = None
if self.parsimony_coefficient == 'auto':
parsimony_coefficient = (np.cov(length, fitness)[1, 0] /
np.var(length))
# parsimony_coefficient = parsimony_coefficient * parsimony_coefficient
# print('parsimony_coefficient:', parsimony_coefficient)
for program in population:
program.fitness_ = program.fitness(parsimony_coefficient)
self._programs.append(population)
# Remove old programs that didn't make it into the new population.
if not self.low_memory:
for old_gen in np.arange(gen, 0, -1):
indices = []
for program in self._programs[old_gen]:
if program is not None:
for idx in program.parents:
if 'idx' in idx:
indices.append(program.parents[idx])
indices = set(indices)
for idx in range(self.population_size):
if idx not in indices:
self._programs[old_gen - 1][idx] = None
elif gen > 0:
# Remove old generations
self._programs[gen - 1] = None
# Record run details
if self._metric.greater_is_better:
best_program = population[np.argmax(fitness)]
else:
best_program = population[np.argmin(fitness)]
self.run_details_['generation'].append(gen)
self.run_details_['average_length'].append(np.mean(length))
self.run_details_['average_fitness'].append(np.mean(fitness))
self.run_details_['best_length'].append(best_program.length_)
self.run_details_['best_fitness'].append(best_program.raw_fitness_)
self.run_details_['best_program'].append(best_program)
oob_fitness = np.nan
if self.max_samples < 1.0:
oob_fitness = best_program.oob_fitness_
self.run_details_['best_oob_fitness'].append(oob_fitness)
generation_time = time() - start_time
self.run_details_['generation_time'].append(generation_time)
if self.verbose:
self._verbose_reporter(self.run_details_)
# Check for early stopping
if self._metric.greater_is_better:
best_fitness = fitness[np.argmax(fitness)]
if best_fitness >= self.stopping_criteria:
break
else:
best_fitness = fitness[np.argmin(fitness)]
if best_fitness <= self.stopping_criteria:
break
if isinstance(self, RegressorMixin):
# Find the best individual in the final generation
if self._metric.greater_is_better:
self._program = self._programs[-1][np.argmax(fitness)]
else:
self._program = self._programs[-1][np.argmin(fitness)]
if isinstance(self, TransformerMixin):
# Find the best individuals in the final generation
fitness = np.array(fitness)
if self._metric.greater_is_better:
hall_of_fame = fitness.argsort()[::-1][:self.hall_of_fame]
else:
hall_of_fame = fitness.argsort()[:self.hall_of_fame]
evaluation = np.array([gp.execute(X) for gp in
[self._programs[-1][i] for
i in hall_of_fame]])
if self.metric == 'spearman':
evaluation = np.apply_along_axis(rankdata, 1, evaluation)
with np.errstate(divide='ignore', invalid='ignore'):
correlations = np.abs(np.corrcoef(evaluation))
np.fill_diagonal(correlations, 0.)
components = list(range(self.hall_of_fame))
indices = list(range(self.hall_of_fame))
# Iteratively remove least fit individual of most correlated pair
while len(components) > self.n_components:
most_correlated = np.unravel_index(np.argmax(correlations),
correlations.shape)
# The correlation matrix is sorted by fitness, so identifying
# the least fit of the pair is simply getting the higher index
worst = max(most_correlated)
components.pop(worst)
indices.remove(worst)
correlations = correlations[:, indices][indices, :]
indices = list(range(len(components)))
self._best_programs = [self._programs[-1][i] for i in
hall_of_fame[components]]
return self
class SymbolicRegressor(BaseSymbolic, RegressorMixin):
"""A Genetic Programming symbolic regressor.
A symbolic regressor is an estimator that begins by building a population
of naive random formulas to represent a relationship. The formulas are
represented as tree-like structures with mathematical functions being
recursively applied to variables and constants. Each successive generation
of programs is then evolved from the one that came before it by selecting
the fittest individuals from the population to undergo genetic operations
such as crossover, mutation or reproduction.
Parameters
----------
population_size : integer, optional (default=500)
The number of programs in each generation.
generations : integer, optional (default=10)
The number of generations to evolve.
tournament_size : integer, optional (default=20)
The number of programs that will compete to become part of the next
generation.
stopping_criteria : float, optional (default=0.0)
The required metric value required in order to stop evolution early.
const_range : tuple of two floats, or None, optional (default=(-1., 1.))
The range of constants to include in the formulas. If None then no
constants will be included in the candidate programs.
init_depth : tuple of two ints, optional (default=(2, 6))
The range of tree depths for the initial population of naive formulas.
Individual trees will randomly choose a maximum depth from this range.
When combined with `init_method='half and half'` this yields the well-
known 'ramped half and half' initialization method.
init_method : str, optional (default='half and half')
- 'grow' : Nodes are chosen at random from both functions and
terminals, allowing for smaller trees than `init_depth` allows. Tends
to grow asymmetrical trees.
- 'full' : Functions are chosen until the `init_depth` is reached, and
then terminals are selected. Tends to grow 'bushy' trees.
- 'half and half' : Trees are grown through a 50/50 mix of 'full' and
'grow', making for a mix of tree shapes in the initial population.
function_set : iterable, optional (default=('add', 'sub', 'mul', 'div'))
The functions to use when building and evolving programs. This iterable
can include strings to indicate either individual functions as outlined
below, or you can also include your own functions as built using the
``make_function`` factory from the ``functions`` module.
Available individual functions are:
- 'add' : addition, arity=2.
- 'sub' : subtraction, arity=2.
- 'mul' : multiplication, arity=2.
- 'div' : protected division where a denominator near-zero returns 1.,
arity=2.
- 'sqrt' : protected square root where the absolute value of the
argument is used, arity=1.
- 'log' : protected log where the absolute value of the argument is
used and a near-zero argument returns 0., arity=1.
- 'abs' : absolute value, arity=1.
- 'neg' : negative, arity=1.
- 'inv' : protected inverse where a near-zero argument returns 0.,
arity=1.
- 'max' : maximum, arity=2.
- 'min' : minimum, arity=2.
- 'sin' : sine (radians), arity=1.
- 'cos' : cosine (radians), arity=1.
- 'tan' : tangent (radians), arity=1.
metric : str, optional (default='mean absolute error')
The name of the raw fitness metric. Available options include:
- 'mean absolute error'.
- 'mse' for mean squared error.
- 'rmse' for root mean squared error.
- 'pearson', for Pearson's product-moment correlation coefficient.
- 'spearman' for Spearman's rank-order correlation coefficient.
Note that 'pearson' and 'spearman' will not directly predict the target
but could be useful as value-added features in a second-step estimator.
This would allow the user to generate one engineered feature at a time,
using the SymbolicTransformer would allow creation of multiple features
at once.
parsimony_coefficient : float or "auto", optional (default=0.001)
This constant penalizes large programs by adjusting their fitness to
be less favorable for selection. Larger values penalize the program
more which can control the phenomenon known as 'bloat'. Bloat is when
evolution is increasing the size of programs without a significant
increase in fitness, which is costly for computation time and makes for
a less understandable final result. This parameter may need to be tuned
over successive runs.
If "auto" the parsimony coefficient is recalculated for each generation
using c = Cov(l,f)/Var( l), where Cov(l,f) is the covariance between
program size l and program fitness f in the population, and Var(l) is
the variance of program sizes.
p_crossover : float, optional (default=0.9)
The probability of performing crossover on a tournament winner.
Crossover takes the winner of a tournament and selects a random subtree
from it to be replaced. A second tournament is performed to find a
donor. The donor also has a subtree selected at random and this is
inserted into the original parent to form an offspring in the next
generation.
p_subtree_mutation : float, optional (default=0.01)
The probability of performing subtree mutation on a tournament winner.
Subtree mutation takes the winner of a tournament and selects a random
subtree from it to be replaced. A donor subtree is generated at random
and this is inserted into the original parent to form an offspring in
the next generation.
p_hoist_mutation : float, optional (default=0.01)
The probability of performing hoist mutation on a tournament winner.
Hoist mutation takes the winner of a tournament and selects a random
subtree from it. A random subtree of that subtree is then selected
and this is 'hoisted' into the original subtrees location to form an
offspring in the next generation. This method helps to control bloat.
p_point_mutation : float, optional (default=0.01)
The probability of performing point mutation on a tournament winner.
Point mutation takes the winner of a tournament and selects random
nodes from it to be replaced. Terminals are replaced by other terminals
and functions are replaced by other functions that require the same
number of arguments as the original node. The resulting tree forms an
offspring in the next generation.
Note : The above genetic operation probabilities must sum to less than
one. The balance of probability is assigned to 'reproduction', where a
tournament winner is cloned and enters the next generation unmodified.
p_point_replace : float, optional (default=0.05)
For point mutation only, the probability that any given node will be
mutated.
max_samples : float, optional (default=1.0)
The fraction of samples to draw from X to evaluate each program on.
feature_names : list, optional (default=None)
Optional list of feature names, used purely for representations in
the `print` operation or `export_graphviz`. If None, then X0, X1, etc
will be used for representations.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more generations to the evolution, otherwise, just fit a new
evolution.
low_memory : bool, optional (default=False)
When set to ``True``, only the current generation is retained. Parent
information is discarded. For very large populations or runs with many
generations, this can result in substantial memory use reduction.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for `fit`. If -1, then the number
of jobs is set to the number of cores.
verbose : int, optional (default=0)
Controls the verbosity of the evolution building process.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
run_details_ : dict
Details of the evolution process. Includes the following elements:
- 'generation' : The generation index.
- 'average_length' : The average program length of the generation.
- 'average_fitness' : The average program fitness of the generation.
- 'best_length' : The length of the best program in the generation.
- 'best_fitness' : The fitness of the best program in the generation.
- 'best_oob_fitness' : The out of bag fitness of the best program in
the generation (requires `max_samples` < 1.0).
- 'generation_time' : The time it took for the generation to evolve.
See Also
--------
SymbolicTransformer
References
----------
.. [1] J. Koza, "Genetic Programming", 1992.
.. [2] R. Poli, et al. "A Field Guide to Genetic Programming", 2008.
"""
def __init__(self,
population_size=1000,
generations=20,
tournament_size=20,
stopping_criteria=0.0,
const_range=(-1., 1.),
init_depth=(2, 6),
init_method='half and half',
function_set=('add', 'sub', 'mul', 'div'),
metric='mean absolute error',
parsimony_coefficient=0.001,
p_crossover=0.9,
p_subtree_mutation=0.01,
p_hoist_mutation=0.01,
p_point_mutation=0.01,
p_point_replace=0.05,
max_samples=1.0,
feature_names=None,
warm_start=False,
low_memory=False,
n_jobs=1,
verbose=0,
random_state=None):
super(SymbolicRegressor, self).__init__(
population_size=population_size,
generations=generations,
tournament_size=tournament_size,
stopping_criteria=stopping_criteria,
const_range=const_range,
init_depth=init_depth,
init_method=init_method,
function_set=function_set,
metric=metric,
parsimony_coefficient=parsimony_coefficient,
p_crossover=p_crossover,
p_subtree_mutation=p_subtree_mutation,
p_hoist_mutation=p_hoist_mutation,
p_point_mutation=p_point_mutation,
p_point_replace=p_point_replace,
max_samples=max_samples,
feature_names=feature_names,
warm_start=warm_start,
low_memory=low_memory,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state)
def __str__(self):
"""Overloads `print` output of the object to resemble a LISP tree."""
if not hasattr(self, '_program'):
return self.__repr__()
return self._program.__str__()
def predict(self, X):
"""Perform regression on test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples]
Predicted values for X.
"""
if not hasattr(self, '_program'):
raise NotFittedError('SymbolicRegressor not fitted.')
X = check_array(X)
_, n_features = X.shape
if self.n_features_ != n_features:
raise ValueError('Number of features of the model must match the '
'input. Model n_features is %s and input '
'n_features is %s.'
% (self.n_features_, n_features))
y = self._program.execute(X)
return y
class SymbolicTransformer(BaseSymbolic, TransformerMixin):
"""A Genetic Programming symbolic transformer.
A symbolic transformer is a supervised transformer that begins by building
a population of naive random formulas to represent a relationship. The
formulas are represented as tree-like structures with mathematical
functions being recursively applied to variables and constants. Each
successive generation of programs is then evolved from the one that came
before it by selecting the fittest individuals from the population to
undergo genetic operations such as crossover, mutation or reproduction.
The final population is searched for the fittest individuals with the least
correlation to one another.
Parameters
----------
population_size : integer, optional (default=500)
The number of programs in each generation.
hall_of_fame : integer, or None, optional (default=100)
The number of fittest programs to compare from when finding the
least-correlated individuals for the n_components. If `None`, the
entire final generation will be used.
n_components : integer, or None, optional (default=10)
The number of best programs to return after searching the hall_of_fame
for the least-correlated individuals. If `None`, the entire
hall_of_fame will be used.
generations : integer, optional (default=10)
The number of generations to evolve.
tournament_size : integer, optional (default=20)
The number of programs that will compete to become part of the next
generation.
stopping_criteria : float, optional (default=1.0)
The required metric value required in order to stop evolution early.
const_range : tuple of two floats, or None, optional (default=(-1., 1.))
The range of constants to include in the formulas. If None then no
constants will be included in the candidate programs.
init_depth : tuple of two ints, optional (default=(2, 6))
The range of tree depths for the initial population of naive formulas.
Individual trees will randomly choose a maximum depth from this range.
When combined with `init_method='half and half'` this yields the well-
known 'ramped half and half' initialization method.
init_method : str, optional (default='half and half')
- 'grow' : Nodes are chosen at random from both functions and
terminals, allowing for smaller trees than `init_depth` allows. Tends
to grow asymmetrical trees.
- 'full' : Functions are chosen until the `init_depth` is reached, and
then terminals are selected. Tends to grow 'bushy' trees.
- 'half and half' : Trees are grown through a 50/50 mix of 'full' and
'grow', making for a mix of tree shapes in the initial population.
function_set : iterable, optional (default=('add', 'sub', 'mul', 'div'))
The functions to use when building and evolving programs. This iterable
can include strings to indicate either individual functions as outlined
below, or you can also include your own functions as built using the
``make_function`` factory from the ``functions`` module.
Available individual functions are:
- 'add' : addition, arity=2.
- 'sub' : subtraction, arity=2.
- 'mul' : multiplication, arity=2.
- 'div' : protected division where a denominator near-zero returns 1.,
arity=2.
- 'sqrt' : protected square root where the absolute value of the
argument is used, arity=1.
- 'log' : protected log where the absolute value of the argument is
used and a near-zero argument returns 0., arity=1.
- 'abs' : absolute value, arity=1.
- 'neg' : negative, arity=1.
- 'inv' : protected inverse where a near-zero argument returns 0.,
arity=1.
- 'max' : maximum, arity=2.
- 'min' : minimum, arity=2.
- 'sin' : sine (radians), arity=1.
- 'cos' : cosine (radians), arity=1.
- 'tan' : tangent (radians), arity=1.
metric : str, optional (default='pearson')
The name of the raw fitness metric. Available options include:
- 'pearson', for Pearson's product-moment correlation coefficient.
- 'spearman' for Spearman's rank-order correlation coefficient.
parsimony_coefficient : float or "auto", optional (default=0.001)
This constant penalizes large programs by adjusting their fitness to
be less favorable for selection. Larger values penalize the program
more which can control the phenomenon known as 'bloat'. Bloat is when
evolution is increasing the size of programs without a significant
increase in fitness, which is costly for computation time and makes for
a less understandable final result. This parameter may need to be tuned
over successive runs.
If "auto" the parsimony coefficient is recalculated for each generation
using c = Cov(l,f)/Var( l), where Cov(l,f) is the covariance between
program size l and program fitness f in the population, and Var(l) is
the variance of program sizes.
p_crossover : float, optional (default=0.9)
The probability of performing crossover on a tournament winner.
Crossover takes the winner of a tournament and selects a random subtree
from it to be replaced. A second tournament is performed to find a
donor. The donor also has a subtree selected at random and this is
inserted into the original parent to form an offspring in the next
generation.
p_subtree_mutation : float, optional (default=0.01)
The probability of performing subtree mutation on a tournament winner.
Subtree mutation takes the winner of a tournament and selects a random
subtree from it to be replaced. A donor subtree is generated at random
and this is inserted into the original parent to form an offspring in
the next generation.
p_hoist_mutation : float, optional (default=0.01)
The probability of performing hoist mutation on a tournament winner.
Hoist mutation takes the winner of a tournament and selects a random
subtree from it. A random subtree of that subtree is then selected
and this is 'hoisted' into the original subtrees location to form an
offspring in the next generation. This method helps to control bloat.
p_point_mutation : float, optional (default=0.01)
The probability of performing point mutation on a tournament winner.
Point mutation takes the winner of a tournament and selects random
nodes from it to be replaced. Terminals are replaced by other terminals
and functions are replaced by other functions that require the same
number of arguments as the original node. The resulting tree forms an
offspring in the next generation.
Note : The above genetic operation probabilities must sum to less than
one. The balance of probability is assigned to 'reproduction', where a
tournament winner is cloned and enters the next generation unmodified.
p_point_replace : float, optional (default=0.05)
For point mutation only, the probability that any given node will be
mutated.
max_samples : float, optional (default=1.0)
The fraction of samples to draw from X to evaluate each program on.
feature_names : list, optional (default=None)
Optional list of feature names, used purely for representations in
the `print` operation or `export_graphviz`. If None, then X0, X1, etc
will be used for representations.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more generations to the evolution, otherwise, just fit a new
evolution.
low_memory : bool, optional (default=False)
When set to ``True``, only the current generation is retained. Parent
information is discarded. For very large populations or runs with many
generations, this can result in substantial memory use reduction.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for `fit`. If -1, then the number
of jobs is set to the number of cores.
verbose : int, optional (default=0)
Controls the verbosity of the evolution building process.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
run_details_ : dict
Details of the evolution process. Includes the following elements:
- 'generation' : The generation index.
- 'average_length' : The average program length of the generation.
- 'average_fitness' : The average program fitness of the generation.
- 'best_length' : The length of the best program in the generation.
- 'best_fitness' : The fitness of the best program in the generation.
- 'best_oob_fitness' : The out of bag fitness of the best program in
the generation (requires `max_samples` < 1.0).
- 'generation_time' : The time it took for the generation to evolve.
See Also
--------
SymbolicRegressor
References
----------
.. [1] J. Koza, "Genetic Programming", 1992.
.. [2] R. Poli, et al. "A Field Guide to Genetic Programming", 2008.
"""
def __init__(self,
population_size=1000,
hall_of_fame=100,
n_components=10,
generations=20,
tournament_size=20,
stopping_criteria=1.0,
const_range=(-1., 1.),
init_depth=(2, 6),
init_method='half and half',
function_set=('add', 'sub', 'mul', 'div'),
metric='pearson',
parsimony_coefficient=0.001,
p_crossover=0.9,
p_subtree_mutation=0.01,
p_hoist_mutation=0.01,
p_point_mutation=0.01,
p_point_replace=0.05,
max_samples=1.0,
feature_names=None,
warm_start=False,
low_memory=False,
n_jobs=1,
verbose=0,
random_state=None):
super(SymbolicTransformer, self).__init__(
population_size=population_size,
hall_of_fame=hall_of_fame,
n_components=n_components,
generations=generations,
tournament_size=tournament_size,
stopping_criteria=stopping_criteria,
const_range=const_range,
init_depth=init_depth,
init_method=init_method,
function_set=function_set,
metric=metric,
parsimony_coefficient=parsimony_coefficient,
p_crossover=p_crossover,
p_subtree_mutation=p_subtree_mutation,
p_hoist_mutation=p_hoist_mutation,
p_point_mutation=p_point_mutation,
p_point_replace=p_point_replace,
max_samples=max_samples,
feature_names=feature_names,
warm_start=warm_start,
low_memory=low_memory,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state)
def __len__(self):
"""Overloads `len` output to be the number of fitted components."""
if not hasattr(self, '_best_programs'):
return 0
return self.n_components
def __getitem__(self, item):
"""Return the ith item of the fitted components."""
if item >= len(self):
raise IndexError
return self._best_programs[item]
def __str__(self):
"""Overloads `print` output of the object to resemble LISP trees."""
if not hasattr(self, '_best_programs'):
return self.__repr__()
output = str([gp.__str__() for gp in self])
return output.replace("',", ",\n").replace("'", "")
def transform(self, X):
"""Transform X according to the fitted transformer.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape = [n_samples, n_components]
Transformed array.
"""
if not hasattr(self, '_best_programs'):
raise NotFittedError('SymbolicTransformer not fitted.')
X = check_array(X)
_, n_features = X.shape
if self.n_features_ != n_features:
raise ValueError('Number of features of the model must match the '
'input. Model n_features is %s and input '
'n_features is %s.'
% (self.n_features_, n_features))
X_new = np.array([gp.execute(X) for gp in self._best_programs]).T
return X_new
def fit_transform(self, X, y, sample_weight=None):
"""Fit to data, then transform it.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
Returns
-------
X_new : array-like, shape = [n_samples, n_components]
Transformed array.
"""
return self.fit(X, y, sample_weight).transform(X)
| 43.629243
| 87
| 0.601456
|
6f23cf58cc6c297356a20b4a8e6a1b00fa2e9db3
| 4,575
|
py
|
Python
|
micropython-maixpy-0_6_2-66/stubs/fpioa_manager.py
|
mongonta0716/stub_for_maixpy
|
f8f29454668919873f9a0f14bb5a9b01ab103bc8
|
[
"MIT"
] | 1
|
2021-08-22T09:10:43.000Z
|
2021-08-22T09:10:43.000Z
|
micropython-maixpy-0_6_2-66/stubs/fpioa_manager.py
|
mongonta0716/stub_for_maixpy
|
f8f29454668919873f9a0f14bb5a9b01ab103bc8
|
[
"MIT"
] | null | null | null |
micropython-maixpy-0_6_2-66/stubs/fpioa_manager.py
|
mongonta0716/stub_for_maixpy
|
f8f29454668919873f9a0f14bb5a9b01ab103bc8
|
[
"MIT"
] | null | null | null |
"""
Module: 'fpioa_manager' on micropython-maixpy-0.6.2-66
"""
# MCU: {'ver': '0.6.2-66', 'build': '66', 'sysname': 'MaixPy', 'platform': 'MaixPy', 'version': '0.6.2', 'release': '0.6.2', 'port': 'MaixPy', 'family': 'micropython', 'name': 'micropython', 'machine': 'Sipeed_M1 with kendryte-k210', 'nodename': 'MaixPy'}
# Stubber: 1.3.9
class FPIOA:
''
CLK_I2C1 = 23
CLK_I2C2 = 203
CLK_SPI1 = 22
CLK_SPI2 = 202
CMOS_D0 = 138
CMOS_D1 = 139
CMOS_D2 = 140
CMOS_D3 = 141
CMOS_D4 = 142
CMOS_D5 = 143
CMOS_D6 = 144
CMOS_D7 = 145
CMOS_HREF = 136
CMOS_PCLK = 137
CMOS_PWDN = 134
CMOS_RST = 133
CMOS_VSYNC = 135
CMOS_XCLK = 132
GPIO0 = 56
GPIO1 = 57
GPIO2 = 58
GPIO3 = 59
GPIO4 = 60
GPIO5 = 61
GPIO6 = 62
GPIO7 = 63
GPIOHS0 = 24
GPIOHS1 = 25
GPIOHS10 = 34
GPIOHS11 = 35
GPIOHS12 = 36
GPIOHS13 = 37
GPIOHS14 = 38
GPIOHS15 = 39
GPIOHS16 = 40
GPIOHS17 = 41
GPIOHS18 = 42
GPIOHS19 = 43
GPIOHS2 = 26
GPIOHS20 = 44
GPIOHS21 = 45
GPIOHS22 = 46
GPIOHS23 = 47
GPIOHS24 = 48
GPIOHS25 = 49
GPIOHS26 = 50
GPIOHS27 = 51
GPIOHS28 = 52
GPIOHS29 = 53
GPIOHS3 = 27
GPIOHS30 = 54
GPIOHS31 = 55
GPIOHS4 = 28
GPIOHS5 = 29
GPIOHS6 = 30
GPIOHS7 = 31
GPIOHS8 = 32
GPIOHS9 = 33
I2C0_SCLK = 126
I2C0_SDA = 127
I2C1_SCLK = 128
I2C1_SDA = 129
I2C2_SCLK = 130
I2C2_SDA = 131
I2S0_IN_D0 = 90
I2S0_IN_D1 = 91
I2S0_IN_D2 = 92
I2S0_IN_D3 = 93
I2S0_MCLK = 87
I2S0_OUT_D0 = 94
I2S0_OUT_D1 = 95
I2S0_OUT_D2 = 96
I2S0_OUT_D3 = 97
I2S0_SCLK = 88
I2S0_WS = 89
I2S1_IN_D0 = 101
I2S1_IN_D1 = 102
I2S1_IN_D2 = 103
I2S1_IN_D3 = 104
I2S1_MCLK = 98
I2S1_OUT_D0 = 105
I2S1_OUT_D1 = 106
I2S1_OUT_D2 = 107
I2S1_OUT_D3 = 108
I2S1_SCLK = 99
I2S1_WS = 100
I2S2_IN_D0 = 112
I2S2_IN_D1 = 113
I2S2_IN_D2 = 114
I2S2_IN_D3 = 115
I2S2_MCLK = 109
I2S2_OUT_D0 = 116
I2S2_OUT_D1 = 117
I2S2_OUT_D2 = 118
I2S2_OUT_D3 = 119
I2S2_SCLK = 110
I2S2_WS = 111
JTAG_TCLK = 0
JTAG_TDI = 1
JTAG_TDO = 3
JTAG_TMS = 2
RESV0 = 120
RESV1 = 121
RESV2 = 122
RESV3 = 123
RESV4 = 124
RESV5 = 125
RESV6 = 20
RESV7 = 21
SCCB_SCLK = 146
SCCB_SDA = 147
SPI0_ARB = 16
SPI0_D0 = 4
SPI0_D1 = 5
SPI0_D2 = 6
SPI0_D3 = 7
SPI0_D4 = 8
SPI0_D5 = 9
SPI0_D6 = 10
SPI0_D7 = 11
SPI0_SCLK = 17
SPI0_SS0 = 12
SPI0_SS1 = 13
SPI0_SS2 = 14
SPI0_SS3 = 15
SPI1_ARB = 82
SPI1_D0 = 70
SPI1_D1 = 71
SPI1_D2 = 72
SPI1_D3 = 73
SPI1_D4 = 74
SPI1_D5 = 75
SPI1_D6 = 76
SPI1_D7 = 77
SPI1_SCLK = 83
SPI1_SS0 = 78
SPI1_SS1 = 79
SPI1_SS2 = 80
SPI1_SS3 = 81
SPI_SLAVE_D0 = 84
SPI_SLAVE_SCLK = 86
SPI_SLAVE_SS = 85
TIMER0_TOGGLE1 = 190
TIMER0_TOGGLE2 = 191
TIMER0_TOGGLE3 = 192
TIMER0_TOGGLE4 = 193
TIMER1_TOGGLE1 = 194
TIMER1_TOGGLE2 = 195
TIMER1_TOGGLE3 = 196
TIMER1_TOGGLE4 = 197
TIMER2_TOGGLE1 = 198
TIMER2_TOGGLE2 = 199
TIMER2_TOGGLE3 = 200
TIMER2_TOGGLE4 = 201
UART1_BAUD = 158
UART1_CTS = 148
UART1_DCD = 150
UART1_DE = 160
UART1_DSR = 149
UART1_DTR = 153
UART1_OUT1 = 156
UART1_OUT2 = 155
UART1_RE = 159
UART1_RI = 151
UART1_RS485_EN = 161
UART1_RTS = 154
UART1_RX = 64
UART1_SIR_IN = 152
UART1_SIR_OUT = 157
UART1_TX = 65
UART2_BAUD = 172
UART2_CTS = 162
UART2_DCD = 164
UART2_DE = 174
UART2_DSR = 163
UART2_DTR = 167
UART2_OUT1 = 170
UART2_OUT2 = 169
UART2_RE = 173
UART2_RI = 165
UART2_RS485_EN = 175
UART2_RTS = 168
UART2_RX = 66
UART2_SIR_IN = 166
UART2_SIR_OUT = 171
UART2_TX = 67
UART3_BAUD = 186
UART3_CTS = 176
UART3_DCD = 178
UART3_DE = 188
UART3_DSR = 177
UART3_DTR = 181
UART3_OUT1 = 184
UART3_OUT2 = 183
UART3_RE = 187
UART3_RI = 179
UART3_RS485_EN = 189
UART3_RTS = 182
UART3_RX = 68
UART3_SIR_IN = 180
UART3_SIR_OUT = 185
UART3_TX = 69
UARTHS_RX = 18
UARTHS_TX = 19
def get_Pin_num():
pass
def help():
pass
def set_function():
pass
class fm:
''
fpioa = None
get_gpio_used = None
get_pin_by_function = None
help = None
register = None
def str_function():
pass
unregister = None
| 19.551282
| 255
| 0.582951
|
2bc2bfcf615486f068ed5f13a90cea210ddff6de
| 1,589
|
py
|
Python
|
src/wai/bynning/binners/_MinSizeBinner.py
|
waikato-datamining/bynning
|
01b7368d4dc1094651d7cbe067576dfb3756a1d3
|
[
"MIT"
] | null | null | null |
src/wai/bynning/binners/_MinSizeBinner.py
|
waikato-datamining/bynning
|
01b7368d4dc1094651d7cbe067576dfb3756a1d3
|
[
"MIT"
] | null | null | null |
src/wai/bynning/binners/_MinSizeBinner.py
|
waikato-datamining/bynning
|
01b7368d4dc1094651d7cbe067576dfb3756a1d3
|
[
"MIT"
] | null | null | null |
from typing import List
from .._Binnable import Binnable
from ._TwoPassBinner import TwoPassBinner
class MinSizeBinner(TwoPassBinner[int, int]):
"""
Binner which bins items by their size, placing items in
indexed bins until they exceed a certain minimum total
size.
"""
def __init__(self, min_size: int):
# Minimum size must be positive
if min_size < 1:
raise ValueError(f"Min size of bins must be positive, got {min_size}")
self.min_size: int = min_size
self._bin_index: int = 0
self._remaining_size: int = 0
self._current_size: int = 0
def _configure(self, items: List[Binnable[int]]):
# Calculate the total size of all items
self._remaining_size = sum(Binnable.map_bin_keys(items))
# Check there is enough size available
if self._remaining_size < self.min_size:
raise ValueError(f"Not enough total size in given items ({self._remaining_size}) "
f"to meet minimum size requirement of {self.min_size}")
def _reset(self):
self._bin_index = 0
self._current_size = 0
def _bin(self, key: int) -> int:
# Move to the next bin if the current bin is full and
# we can guarantee to fill another bin
if self._current_size >= self.min_size and self._remaining_size >= self.min_size:
self._bin_index += 1
self._current_size = 0
# Update the sizes
self._current_size += key
self._remaining_size -= key
return self._bin_index
| 32.428571
| 94
| 0.638137
|
3db62ca594a470366b81fcae9762bd06b120655e
| 683
|
py
|
Python
|
tests/runners/lib/env.py
|
CyberFlameGO/tilck
|
4c32541874102e524374ab79d46b68af9d759390
|
[
"BSD-2-Clause"
] | 1,059
|
2018-07-30T14:48:42.000Z
|
2022-03-30T19:54:49.000Z
|
tests/runners/lib/env.py
|
CyberFlameGO/tilck
|
4c32541874102e524374ab79d46b68af9d759390
|
[
"BSD-2-Clause"
] | 15
|
2019-06-17T13:58:08.000Z
|
2021-10-16T18:19:25.000Z
|
tests/runners/lib/env.py
|
CyberFlameGO/tilck
|
4c32541874102e524374ab79d46b68af9d759390
|
[
"BSD-2-Clause"
] | 47
|
2020-03-09T16:54:07.000Z
|
2022-03-12T08:53:56.000Z
|
# SPDX-License-Identifier: BSD-2-Clause
import os
import sys
from .lang_aux import Const, ReloadAsConstModule
def env_bool(x):
return Const(os.environ.get(x, '0') == '1')
def env_int(x, val):
return Const(int(os.environ.get(x, str(val))))
VM_MEMORY_SIZE_IN_MB = env_int('TILCK_VM_MEM', 128)
GEN_TEST_DATA = env_bool('GEN_TEST_DATA')
IN_TRAVIS = env_bool('TRAVIS')
IN_CIRCLECI = env_bool('CIRCLECI')
IN_AZURE = env_bool('AZURE_HTTP_USER_AGENT')
CI = env_bool('CI')
DUMP_COV = env_bool('DUMP_COV')
REPORT_COV = env_bool('REPORT_COV')
VERBOSE = env_bool('VERBOSE')
IN_ANY_CI = Const(IN_TRAVIS.val or IN_CIRCLECI.val or IN_AZURE.val or CI.val)
ReloadAsConstModule(__name__)
| 25.296296
| 77
| 0.751098
|
a50d92ea7ad239e15d642bffd39cff71238d0877
| 3,042
|
py
|
Python
|
main.py
|
guntata/-2-
|
87814dbddc4e95b5413b09ceec6527c896e3eb66
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
guntata/-2-
|
87814dbddc4e95b5413b09ceec6527c896e3eb66
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
guntata/-2-
|
87814dbddc4e95b5413b09ceec6527c896e3eb66
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import matplotlib; matplotlib.use('Agg')
import os
import os.path as osp
import argparse
from train import train
from test import test
from test_beam import test_beam
parser = argparse.ArgumentParser(description='PyTorch Convolutional Image Captioning Model')
parser.add_argument('model_dir', help='output directory to save models & results')
parser.add_argument('-g', '--gpu', type=int, default=1,\
help='gpu device id')
parser.add_argument('--coco_root', type=str, default= './data/coco/',\
help='directory containing coco dataset train2014, val2014, & annotations')
parser.add_argument('-t', '--is_train', type=int, default=1,\
help='use 1 to train model')
parser.add_argument('-e', '--epochs', type=int, default=30,\
help='number of training epochs')
parser.add_argument('-b', '--batchsize', type=int, default=20,\
help='number of images per training batch')
parser.add_argument('-c', '--ncap_per_img', type=int, default=5,\
help='ground-truth captions per image in training batch')
parser.add_argument('-n', '--num_layers', type=int, default=3,\
help='depth of convcap network')
parser.add_argument('-m', '--nthreads', type=int, default=4,\
help='pytorch data loader threads')
parser.add_argument('-ft', '--finetune_after', type=int, default=8,\
help='epochs after which vgg16 is fine-tuned')
parser.add_argument('-lr', '--learning_rate', type=float, default=5e-5,\
help='learning rate for convcap')
parser.add_argument('-st', '--lr_step_size', type=int, default=15,\
help='epochs to decay learning rate after')
parser.add_argument('-sc', '--score_select', type=str, default='CIDEr',\
help='metric to pick best model')
parser.add_argument('--beam_size', type=int, default=1, \
help='beam size to use for test')
parser.add_argument('--attention', dest='attention', action='store_true', \
help='Use this for convcap with attention (by default set)')
parser.add_argument('--no-attention', dest='attention', action='store_false', \
help='Use this for convcap without attention')
parser.set_defaults(attention=True)
args = parser.parse_args()
def main():
"""Train model and run inference on coco test set to output metrics"""
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
if(args.is_train == 1):
train(args)
bestmodelfn = osp.join(args.model_dir, 'bestmodel.pth')
if(osp.exists(bestmodelfn)):
if(args.beam_size == 1):
scores = test(args, 'test', modelfn=bestmodelfn)
else:
scores = test_beam(args, 'test', modelfn=bestmodelfn)
print('TEST set scores')
for k, v in scores[0].iteritems():
print('%s: %f' % (k, v))
else:
raise Exception('No checkpoint found %s' % bestmodelfn)
if __name__ == '__main__':
main()
| 33.8
| 95
| 0.642669
|
19802b903dba28a18c9f316c2f7adb7edac2c1db
| 12,091
|
py
|
Python
|
galloper/api/sequrity_report.py
|
borysvorona/galloper
|
09d5e78f0e17c8f309666db7bcf3f7bf6a766ffa
|
[
"Apache-2.0"
] | 1
|
2020-03-11T13:36:16.000Z
|
2020-03-11T13:36:16.000Z
|
galloper/api/sequrity_report.py
|
borysvorona/galloper
|
09d5e78f0e17c8f309666db7bcf3f7bf6a766ffa
|
[
"Apache-2.0"
] | null | null | null |
galloper/api/sequrity_report.py
|
borysvorona/galloper
|
09d5e78f0e17c8f309666db7bcf3f7bf6a766ffa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 getcarrier.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
from datetime import datetime
from flask import request
from flask_restful import Resource
from sqlalchemy import or_, and_
from galloper.database.models.project import Project
from galloper.database.models.security_details import SecurityDetails
from galloper.database.models.security_reports import SecurityReport
from galloper.database.models.security_results import SecurityResults
from galloper.utils.api_utils import build_req_parser
class SecurityReportAPI(Resource):
get_rules = (
dict(name="offset", type=int, default=0, location="args"),
dict(name="limit", type=int, default=0, location="args"),
dict(name="search", type=str, default="", location="args"),
dict(name="sort", type=str, default="", location="args"),
dict(name="order", type=str, default="", location="args"),
)
delete_rules = (
dict(name="id[]", type=int, action="append", location="args"),
)
post_rules = (
dict(name="project_name", type=str, location="json"),
dict(name="app_name", type=str, location="json"),
dict(name="scan_time", type=float, location="json"),
dict(name="dast_target", type=str, location="json"),
dict(name="sast_code", type=str, location="json"),
dict(name="scan_type", type=str, location="json"),
dict(name="findings", type=int, location="json"),
dict(name="false_positives", type=int, location="json"),
dict(name="excluded", type=int, location="json"),
dict(name="info_findings", type=int, location="json"),
dict(name="environment", type=str, location="json")
)
def __init__(self):
self.__init_req_parsers()
def __init_req_parsers(self):
self._parser_get = build_req_parser(rules=self.get_rules)
self._parser_post = build_req_parser(rules=self.post_rules)
self._parser_delete = build_req_parser(rules=self.delete_rules)
def get(self, project_id):
reports = []
args = self._parser_get.parse_args(strict=False)
search_ = args.get("search")
limit_ = args.get("limit")
offset_ = args.get("offset")
if args.get("sort"):
sort_rule = getattr(getattr(SecurityResults, args["sort"]), args["order"])()
else:
sort_rule = SecurityResults.id.desc()
if not args.get("search") and not args.get("sort"):
total = SecurityResults.query.filter_by(project_id=project_id).order_by(sort_rule).count()
res = SecurityResults.query.filter_by(project_id=project_id).\
order_by(sort_rule).limit(limit_).offset(offset_).all()
else:
filter_ = and_(SecurityResults.project_id==project_id,
or_(SecurityResults.project_name.like(f"%{search_}%"),
SecurityResults.app_name.like(f"%{search_}%"),
SecurityResults.scan_type.like(f"%{search_}%"),
SecurityResults.environment.like(f"%{search_}%")))
res = SecurityResults.query.filter(filter_).order_by(sort_rule).limit(limit_).offset(offset_).all()
total = SecurityResults.query.filter(filter_).order_by(sort_rule).count()
for each in res:
each_json = each.to_json()
each_json["scan_time"] = each_json["scan_time"].replace("T", " ").split(".")[0]
each_json["scan_duration"] = float(each_json["scan_duration"])
reports.append(each_json)
return {"total": total, "rows": reports}
def delete(self, project_id: int):
args = self._parser_delete.parse_args(strict=False)
project = Project.query.get_or_404(project_id)
for each in SecurityReport.query.filter(
and_(SecurityReport.project_id == project.id, SecurityReport.report_id.in_(args["id[]"]))
).order_by(SecurityReport.id.asc()).all():
each.delete()
for each in SecurityResults.query.filter(
SecurityResults.id.in_(args["id[]"])
).order_by(SecurityResults.id.asc()).all():
each.delete()
return {"message": "deleted"}
def post(self, project_id: int):
args = self._parser_post.parse_args(strict=False)
project = Project.query.get_or_404(project_id)
# TODO DAST scans limit check
report = SecurityResults(scan_time=datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
project_id=project.id,
scan_duration=args["scan_time"],
project_name=args["project_name"],
app_name=args["app_name"],
dast_target=args["dast_target"],
sast_code=args["sast_code"],
scan_type=args["scan_type"],
findings=args["findings"],
false_positives=args["false_positives"],
excluded=args["excluded"],
info_findings=args["info_findings"],
environment=args["environment"])
report.insert()
return {"id": report.id}
class FindingsAPI(Resource):
get_rules = (
dict(name="id", type=int, location="args"),
dict(name="type", type=str, location="args")
)
put_rules = (
dict(name="id", type=int, location="json"),
dict(name="action", type=str, location="json"),
dict(name="issue_id", type=int, location="json")
)
def __init__(self):
self.__init_req_parsers()
def __init_req_parsers(self):
self._parser_get = build_req_parser(rules=self.get_rules)
self._parser_put = build_req_parser(rules=self.put_rules)
def get(self, project_id: int):
args = self._parser_get.parse_args(strict=False)
if args["type"] == "false_positives":
filt = and_(SecurityReport.project_id == project_id,
SecurityReport.report_id == args["id"],
SecurityReport.false_positive == 1)
elif args["type"] == "findings":
filt = and_(SecurityReport.project_id == project_id,
SecurityReport.report_id == args["id"],
SecurityReport.info_finding == 0,
SecurityReport.false_positive == 0,
SecurityReport.excluded_finding == 0)
elif args["type"] == "info_findings":
filt = and_(SecurityReport.project_id == project_id,
SecurityReport.report_id == args["id"],
SecurityReport.info_finding == 1)
elif args["type"] == "excluded_finding":
filt = and_(SecurityReport.project_id == project_id,
SecurityReport.report_id == args["id"],
SecurityReport.excluded_finding == 1)
else:
filt = and_(SecurityReport.project_id == project_id,
SecurityReport.report_id == args["id"])
issues = SecurityReport.query.filter(filt).all()
results = []
for issue in issues:
_res = issue.to_json()
_res["details"] = SecurityDetails.query.filter_by(id=_res["details"]).first().details
results.append(_res)
return results
def post(self, project_id: int):
finding_db = None
for finding in request.json:
md5 = hashlib.md5(finding["details"].encode("utf-8")).hexdigest()
hash_id = SecurityDetails.query.filter(
and_(SecurityDetails.project_id == project_id, SecurityDetails.detail_hash == md5)
).first()
if not hash_id:
hash_id = SecurityDetails(detail_hash=md5, project_id=project_id, details=finding["details"])
hash_id.insert()
# Verify issue is false_positive or ignored
finding["details"] = hash_id.id
finding['project_id'] = project_id
entrypoints = ""
if finding.get("endpoints"):
for each in finding.get("endpoints"):
if isinstance(each, list):
entrypoints += "<br />".join(each)
else:
entrypoints += f"<br />{each}"
finding["endpoints"] = entrypoints
if not (finding["false_positive"] == 1 or finding["excluded_finding"] == 1):
# TODO: add validation that finding is a part of project, application. etc.
issues = SecurityReport.query.filter(
and_(SecurityReport.issue_hash == finding["issue_hash"],
or_(SecurityReport.false_positive == 1,
SecurityReport.excluded_finding == 1)
)).all()
false_positive = sum(issue.false_positive for issue in issues)
excluded_finding = sum(issue.excluded_finding for issue in issues)
finding["false_positive"] = 1 if false_positive > 0 else 0
finding["excluded_finding"] = 1 if excluded_finding > 0 else 0
finding_db = SecurityReport(**finding)
finding_db.add()
if finding_db:
finding_db.commit()
def put(self, project_id: int):
args = self._parser_put.parse_args(strict=False)
issue_hash = SecurityReport.query.filter(and_(SecurityReport.project_id == project_id,
SecurityReport.id == args["issue_id"])
).first().issue_hash
if args["action"] in ("false_positive", "excluded_finding"):
upd = {args["action"]: 1}
else:
upd = {"false_positive": 0, "info_finding": 0}
# TODO: add validation that finding is a part of project, application. etc.
SecurityReport.query.filter(and_(
SecurityReport.project_id == project_id,
SecurityReport.issue_hash == issue_hash)
).update(upd)
SecurityReport.commit()
return {"message": "accepted"}
class FindingsAnalysisAPI(Resource):
get_rules = (
dict(name="project_name", type=str, location="args"),
dict(name="app_name", type=str, location="args"),
dict(name="scan_type", type=str, location="args"),
dict(name="type", type=str, default="false-positive", location="args")
)
def __init__(self):
self.__init_req_parsers()
def __init_req_parsers(self):
self._parser_get = build_req_parser(rules=self.get_rules)
def get(self, project_id: int):
args = self._parser_get.parse_args(strict=False)
projects_filter = and_(SecurityResults.project_id == project_id,
SecurityResults.project_name == args["project_name"],
SecurityResults.app_name == args["app_name"],
SecurityResults.scan_type == args["scan_type"])
ids = SecurityResults.query.filter(projects_filter).all()
ids = [each.id for each in ids]
hashs = SecurityReport.query.filter(
and_(SecurityReport.false_positive == 1, SecurityReport.report_id.in_(ids))
).with_entities(SecurityReport.issue_hash).distinct()
return [_.issue_hash for _ in hashs]
| 47.415686
| 111
| 0.592259
|
1673dff8123bbcb4f2d081f9e0d321fb5e76890b
| 1,887
|
py
|
Python
|
src/maintenance/setup.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 1
|
2022-02-18T00:16:47.000Z
|
2022-02-18T00:16:47.000Z
|
src/maintenance/setup.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 9
|
2022-03-25T19:35:49.000Z
|
2022-03-31T06:09:47.000Z
|
src/maintenance/setup.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 1
|
2022-03-10T22:13:02.000Z
|
2022-03-10T22:13:02.000Z
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
# HISTORY.rst entry.
VERSION = '1.3.0'
try:
from azext_maintenance.manual.version import VERSION
except ImportError:
pass
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
try:
from azext_maintenance.manual.dependency import DEPENDENCIES
except ImportError:
pass
with open('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='maintenance',
version=VERSION,
description='Microsoft Azure Command-Line Tools MaintenanceManagementClient Extension',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli-extensions/tree/main/src/maintenance',
long_description=README + '\n\n' + HISTORY,
license='MIT',
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=DEPENDENCIES,
package_data={'azext_maintenance': ['azext_metadata.json']},
)
| 31.983051
| 94
| 0.6354
|
88ba44a50899b0dd7d69b492ce481c7870b373f0
| 56,972
|
py
|
Python
|
clifter_slam/structures/pointclouds.py
|
slowy07/clifter_slam
|
4b4fc2dde07bb4d66084c09f53b88a87c8cbf319
|
[
"MIT"
] | 12
|
2021-09-05T10:56:42.000Z
|
2021-11-21T07:38:17.000Z
|
clifter_slam/structures/pointclouds.py
|
slowy07/clifter_slam
|
4b4fc2dde07bb4d66084c09f53b88a87c8cbf319
|
[
"MIT"
] | null | null | null |
clifter_slam/structures/pointclouds.py
|
slowy07/clifter_slam
|
4b4fc2dde07bb4d66084c09f53b88a87c8cbf319
|
[
"MIT"
] | 2
|
2021-09-05T10:56:46.000Z
|
2021-10-23T00:46:43.000Z
|
from typing import List, Optional, Union
import open3d as o3d
import plotly.graph_objects as go
import torch
from ..geometry import projutils
from . import structutils
__all__ = ["Pointclouds"]
class Pointclouds(object):
r"""Batch of pointclouds (with varying numbers of points), enabling conversion between 2 representations:
- List: Store points of each pointcloud of shape :math:`(N_b, 3)` in a list of length :math:`B`.
- Padded: Store all points in a :math:`(B, max(N_b), 3)` tensor with zero padding as required.
Args:
points (torch.Tensor or list of torch.Tensor or None): :math:`(X, Y, Z)` coordinates of each point.
Default: None
normals (torch.Tensor or list of torch.Tensor or None): Normals :math:`(N_x, N_y, N_z)` of each point.
Default: None
colors (torch.Tensor or list of torch.Tensor or None): :math:`(R, G, B)` color of each point.
Default: None
features (torch.Tensor or list of torch.Tensor or None): :math:`C` features of each point.
Default: None
device (torch.device or str or None): The desired device of internal tensors. If None, sets device to be
same as `points` device. Default: None
Shape:
- points: Can either be a list of tensors of shape :math:`(N_b, 3)` or a padded tensor of shape
:math:`(B, N, 3)`.
- normals: Can either be a list of tensors of shape :math:`(N_b, 3)` or a padded tensor of shape
:math:`(B, N, 3)`.
- colors: Can either be a list of tensors of shape :math:`(N_b, 3)` or a padded tensor of shape
:math:`(B, N, 3)`.
- features: Can either be a list of tensors of shape :math:`(N_b, C)` or a padded tensor of shape
:math:`(B, N, C)`.
Examples::
>>> points_list = [torch.rand(1, 3), torch.rand(4, 3)]
>>> pcs1 = clifter_slam.Pointclouds(points_list)
>>> print(pcs1.points_padded.shape)
torch.Size([2, 4, 3])
>>> print(len(pcs1.points_list))
2
>>> pcs2 = clifter_slam.Pointclouds(torch.rand((2, 4, 3)))
>>> print(pcs2.points_padded.shape)
torch.Size([2, 4, 3])
"""
_INTERNAL_TENSORS = [
"_points_padded",
"_normals_padded",
"_colors_padded",
"_features_padded",
"_nonpad_mask",
"_num_points_per_pointcloud",
]
def __init__(
self,
points: Union[List[torch.Tensor], torch.Tensor, None] = None,
normals: Union[List[torch.Tensor], torch.Tensor, None] = None,
colors: Union[List[torch.Tensor], torch.Tensor, None] = None,
features: Union[List[torch.Tensor], torch.Tensor, None] = None,
device: Union[torch.device, str, None] = None,
):
super().__init__()
# input types: list or tensor or None
if not (points is None or isinstance(points, list) or torch.is_tensor(points)):
msg = "Expected points to be of type list or tensor or None; got %r"
raise TypeError(msg % type(points))
if not (normals is None or isinstance(normals, type(points))):
msg = "Expected normals to be of same type as points (%r); got %r"
raise TypeError(msg % (type(points), type(normals)))
if not (colors is None or isinstance(colors, type(points))):
msg = "Expected colors to be of same type as points (%r); got %r"
raise TypeError(msg % (type(points), type(colors)))
if not (features is None or isinstance(features, type(points))):
msg = "Expected features to be of same type as points (%r); got %r"
raise TypeError(msg % (type(points), type(features)))
if points is not None and len(points) == 0:
raise ValueError("len(points) (= 0) should be > 0")
self._points_list = None
self._normals_list = None
self._colors_list = None
self._features_list = None
self._points_padded = None
self._normals_padded = None
self._colors_padded = None
self._features_padded = None
self._nonpad_mask = None
self._has_points = None
self._has_normals = None
self._has_colors = None
self._has_features = None
self._num_points_per_pointcloud = None
self.equisized = False
if isinstance(points, list):
# points shape check
points_shape_per_pointcloud = [p.shape for p in points]
if any([p.ndim != 2 for p in points]):
raise ValueError("ndim of all tensors in points list should be 2")
if any([x[-1] != 3 for x in points_shape_per_pointcloud]):
raise ValueError(
"last dim of all tensors in points should have shape 3 (X, Y, Z)"
)
self.device = (
torch.Tensor().to(device).device
if device is not None
else points[0].device
)
self._points_list = [p.to(self.device) for p in points]
num_points_per_pointcloud = [x[0] for x in points_shape_per_pointcloud]
# attributes shape check
if not (
normals is None
or [n.shape for n in normals] == points_shape_per_pointcloud
):
raise ValueError(
"normals tensors should have same shape as points tensors, but didn't"
)
if not (
colors is None
or [c.shape for c in colors] == points_shape_per_pointcloud
):
raise ValueError(
"colors tensors should have same shape as points tensors, but didn't"
)
if not (features is None or all([f.ndim == 2 for f in features])):
raise ValueError("ndim of all tensors in features list should be 2")
if not (
features is None
or [len(f) for f in features] == num_points_per_pointcloud
):
raise ValueError(
"number of features per pointcloud has to be equal to number of points"
)
if not (features is None or len(set([f.shape[-1] for f in features])) == 1):
raise ValueError("number of features per pointcloud has to be the same")
self._normals_list = (
None if normals is None else [n.to(self.device) for n in normals]
)
self._colors_list = (
None if colors is None else [c.to(self.device) for c in colors]
)
self._features_list = (
None if features is None else [f.to(self.device) for f in features]
)
self._B = len(self._points_list)
self._num_points_per_pointcloud = torch.tensor(
num_points_per_pointcloud, device=self.device
)
self._N = self._num_points_per_pointcloud.max().item()
self.equisized = len(self._num_points_per_pointcloud.unique()) == 1
elif torch.is_tensor(points):
self.device = (
torch.Tensor().to(device).device
if device is not None
else points.device
)
# check points shape (B, N, 3)
if points.ndim != 3:
msg = "points should have ndim=3, but had ndim={}".format(points.ndim)
raise ValueError(msg)
if points.shape[-1] != 3:
msg = (
"last dim of points should have shape 3 (X, Y, Z) but had shape %r"
)
raise ValueError(msg % (points.shape[-1]))
if points.shape[0] == 0:
msg = "Batch size of 0 not supported yet. Got input points shape {}.".format(
points.shape
)
raise ValueError(msg)
# check attribute shapes match points shape
if not (normals is None or normals.shape == points.shape):
msg = "normals tensor should have same shape as points tensor, but didn't: %r != %r"
raise ValueError(msg % (normals.shape, points.shape))
if not (colors is None or colors.shape == points.shape):
msg = "colors tensor should have same shape as points tensor, but didn't: %r != %r"
raise ValueError(msg % (colors.shape, points.shape))
if not (features is None or features.ndim == 3):
msg = "features should have ndim=3, but had ndim={}".format(
features.ndim
)
raise ValueError(msg)
if not (features is None or features.shape[:-1] == points.shape[:-1]):
msg = "first 2 dims of features tensor and points tensor should have same shape, but didn't: %r != %r"
raise ValueError(msg % (features.shape[:-1], points.shape[:-1]))
self._points_padded = points.to(self.device)
self._normals_padded = None if normals is None else normals.to(self.device)
self._colors_padded = None if colors is None else colors.to(self.device)
self._features_padded = (
None if features is None else features.to(self.device)
)
self._B = self._points_padded.shape[0]
self._N = self._points_padded.shape[1]
self._num_points_per_pointcloud = torch.tensor(
[self._N for _ in range(self._B)], device=self.device
)
self.equisized = True
elif points is None:
self.device = (
torch.Tensor().to(device).device
if device is not None
else torch.device("cpu")
)
self._B = 0
self._N = 0
self._num_points_per_pointcloud = torch.tensor([0], device=self.device)
self.equisized = None
else:
raise ValueError(
"points must either be None, a list, or a tensor with shape (batch_size, N, 3) where N is \
the maximum number of points."
)
def __len__(self):
return self._B
def __getitem__(self, index):
r"""
Args:
index (int or slice or list of int or torch.Tensor): Specifying the index of the pointclouds to retrieve.
Can be an int, slice, list of ints or a boolean tensor.
Returns:
clifter_slam.Pointclouds: Selected pointclouds. The pointclouds tensors are not cloned.
"""
if not self.has_points:
raise IndexError("Cannot index empty pointclouds object")
if isinstance(index, (int, slice)):
points = self.points_list[index]
normals = self.normals_list[index] if self.has_normals else None
colors = self.colors_list[index] if self.has_colors else None
features = self.features_list[index] if self.has_features else None
elif isinstance(index, list):
points = [self.points_list[i] for i in index]
normals = (
[self.normals_list[i] for i in index] if self.has_normals else None
)
colors = [self.colors_list[i] for i in index] if self.has_colors else None
features = (
[self.features_list[i] for i in index] if self.has_features else None
)
elif isinstance(index, torch.Tensor):
if index.dim() != 1 or index.dtype.is_floating_point:
raise IndexError(index)
if index.dtype == torch.bool:
index = index.nonzero()
index = index.squeeze(1) if index.numel() > 0 else index
index = index.tolist()
points = [self.points_list[i] for i in index]
normals = (
[self.normals_list[i] for i in index] if self.has_normals else None
)
colors = [self.colors_list[i] for i in index] if self.has_colors else None
features = (
[self.features_list[i] for i in index] if self.has_features else None
)
else:
raise IndexError(index)
if isinstance(points, list):
return Pointclouds(
points=points, normals=normals, colors=colors, features=features
)
elif torch.is_tensor(points):
points = [points]
normals = None if normals is None else [normals]
colors = None if colors is None else [colors]
features = None if features is None else [features]
return Pointclouds(
points=points, normals=normals, colors=colors, features=features
)
else:
raise ValueError("points not defined correctly")
def __add__(self, other):
r"""Out-of-place implementation of `Pointclouds.offset_`"""
try:
return self.clone().offset_(other)
except TypeError:
raise NotImplementedError(
"Pointclouds + {} currently not implemented.".format(type(other))
)
def __sub__(self, other):
r"""Subtracts `other` from all Pointclouds' points (`Pointclouds` - `other`).
Args:
other (torch.Tensor or float or int): Value(s) to subtract from all points.
returns:
clifter_slam.Pointclouds: Subtracted Pointclouds
"""
try:
return self.clone().offset_(other * -1)
except TypeError:
raise NotImplementedError(
"Pointclouds - {} currently not implemented.".format(type(other))
)
def __mul__(self, other):
r"""Out-of-place implementation of `Pointclouds.scale_`"""
try:
return self.clone().scale_(other)
except TypeError:
raise NotImplementedError(
"Pointclouds * {} currently not implemented.".format(type(other))
)
def __truediv__(self, other):
r"""Divides all Pointclouds' points by `other`.
Args:
other (torch.Tensor or float or int): Value(s) to divide all points by.
Returns:
self
Shape:
- other: Any. Must be compatible with :math:`(B, N, 3)`.
"""
try:
return self.__mul__(1.0 / other)
except TypeError:
raise NotImplementedError(
"Pointclouds / {} currently not implemented.".format(type(other))
)
def __matmul__(self, other):
r"""Post-multiplication :math:`SE(3)` transformation or :math:`SO(3)` rotation of Pointclouds' points and
normals.
Args:
other (torch.Tensor): Either :math:`SE(3)` transformation or :math:`SO(3)` rotation
Returns:
self
Shape:
- other: Either :math:`SE(3)` transformation of shape :math:`(4, 4)` or :math:`(B, 4, 4)`, or :math:`SO(3)`
rotation of shape :math:`(3, 3)` or :math:`(B, 3, 3)`
"""
if not torch.is_tensor(other):
raise NotImplementedError(
"Pointclouds @ {} currently not implemented.".format(type(other))
)
if not (
(other.ndim == 2 or other.ndim == 3)
and (other.shape[-2:] == (3, 3) or other.shape[-2:] == (4, 4))
):
msg = "Unsupported shape for Pointclouds @ operand: {}\n".format(
other.shape
)
msg += "Use tensor of shape (3, 3) or (B, 3, 3) for rotations, or (4, 4) or (B, 4, 4) for transformations"
raise ValueError(msg)
if other.shape[-2:] == (3, 3):
return self.clone().rotate_(other, pre_multiplication=False)
if other.shape[-2:] == (4, 4):
return self.clone().transform_(other, pre_multiplication=False)
def rotate(self, rmat: torch.Tensor, *, pre_multiplication=True):
r"""Out-of-place implementation of `Pointclouds.rotate_`"""
return self.clone().rotate_(rmat, pre_multiplication=pre_multiplication)
def transform(self, transform: torch.Tensor, *, pre_multiplication=True):
r"""Out-of-place implementation of `Pointclouds.transform_`"""
return self.clone().transform_(transform, pre_multiplication=pre_multiplication)
def pinhole_projection(self, intrinsics: torch.Tensor):
r"""Out-of-place implementation of `Pointclouds.pinhole_projection_`"""
return self.clone().pinhole_projection_(intrinsics)
def offset_(self, offset: Union[torch.Tensor, float, int]):
r"""Adds :math:`offset` to all Pointclouds' points. In place operation.
Args:
offset (torch.Tensor or float or int): Value(s) to add to all points.
Returns:
self
Shape:
- offset: Any. Must be compatible with :math:`(B, N, 3)`.
"""
if not (
torch.is_tensor(offset)
or isinstance(offset, float)
or isinstance(offset, int)
):
raise TypeError(
"Operand should be tensor, float or int but was %r instead"
% type(offset)
)
if not self.has_points:
return self
# update padded representation
self._points_padded = self.points_padded + (
offset * self.nonpad_mask.to(self.points_padded.dtype).unsqueeze(-1)
)
# update list representation when inferred
self._points_list = None
return self
def scale_(self, scale: Union[torch.Tensor, float, int]):
r"""Scales all Pointclouds' points by `scale`. In place operation.
Args:
scale (torch.Tensor or float or int): Value(s) to scale all points by.
Returns:
self
Shape:
- scale: Any. Must be compatible with :math:`(B, N, 3)`.
"""
if not (
torch.is_tensor(scale) or isinstance(scale, float) or isinstance(scale, int)
):
raise TypeError(
"Operand should be tensor, float or int but was %r instead"
% type(scale)
)
if not self.has_points:
return self
# update padded representation
self._points_padded = (
self.points_padded
* scale
* self.nonpad_mask.to(self.points_padded.dtype).unsqueeze(-1)
)
# update list representation when inferred
self._points_list = None
return self
def rotate_(self, rmat: torch.Tensor, *, pre_multiplication=True):
r"""Applies batch or single :math:`SO(3)` rotation to all Pointclouds' points and normals. In place operation.
Args:
rmat (torch.Tensor): Either batch or single :math:`SO(3)` rotation matrix
pre_multiplication (torch.Tensor): If True, will pre-multiply the rotation. Otherwise will
post-multiply the rotation. Default: True
Returns:
self
Shape:
- rmat: :math:`(3, 3)` or :math:`(B, 3, 3)`
"""
if not torch.is_tensor(rmat):
raise TypeError(
"Rotation matrix should be tensor, but was %r instead" % type(rmat)
)
if not ((rmat.ndim == 2 or rmat.ndim == 3) and rmat.shape[-2:] == (3, 3)):
raise ValueError(
"Rotation matrix should be of shape (3, 3) or (B, 3, 3), but was {} instead.".format(
rmat.shape
)
)
if rmat.ndim == 3 and rmat.shape[0] != self._B:
raise ValueError(
"Rotation matrix batch size ({}) != Pointclouds batch size ({})".format(
rmat.shape[0], self._B
)
)
if not self.has_points:
return self
if pre_multiplication:
rmat = rmat.transpose(-1, -2)
# update padded representation
if rmat.ndim == 2:
self._points_padded = torch.einsum("bij,jk->bik", self.points_padded, rmat)
self._normals_padded = (
None
if self.normals_padded is None
else torch.einsum("bij,jk->bik", self.normals_padded, rmat)
)
elif rmat.ndim == 3:
self._points_padded = torch.einsum("bij,bjk->bik", self.points_padded, rmat)
self._normals_padded = (
None
if self.normals_padded is None
else torch.einsum("bij,bjk->bik", self.normals_padded, rmat)
)
# force update of list representation
self._points_list = None
self._normals_list = None
return self
def transform_(self, transform: torch.Tensor, *, pre_multiplication=True):
r"""Applies batch or single :math:`SE(3)` transformation to all Pointclouds' points and normals. In place
operation.
Args:
transform (torch.Tensor): Either batch or single :math:`SE(3)` transformation tensor
pre_multiplication (torch.Tensor): If True, will pre-multiply the transformation. Otherwise will
post-multiply the transformation. Default: True
Returns:
self
Shape:
- transform: :math:`(4, 4)` or :math:`(B, 4, 4)`
"""
if not torch.is_tensor(transform):
raise TypeError(
"transform should be tensor, but was %r instead" % type(transform)
)
if not (
(transform.ndim == 2 or transform.ndim == 3)
and transform.shape[-2:] == (4, 4)
):
raise ValueError(
"transform should be of shape (4, 4) or (B, 4, 4), but was {} instead.".format(
transform.shape
)
)
if transform.ndim == 3 and transform.shape[0] != self._B:
raise ValueError(
"transform batch size ({}) != Pointclouds batch size ({})".format(
transform.shape[0], self._B
)
)
if not self.has_points:
return self
# rotation and translation matrix
rmat = transform[..., :3, :3]
tvec = transform[..., :3, 3]
# expand dims to ensure correct broadcasting of offset
while tvec.ndim < self.points_padded.ndim:
tvec = tvec.unsqueeze(-2)
return self.rotate_(rmat, pre_multiplication=pre_multiplication).offset_(tvec)
def pinhole_projection_(self, intrinsics: torch.Tensor):
r"""Projects Pointclouds' points onto :math:`z=1` plane using intrinsics of a pinhole camera. In place
operation.
Args:
intrinsics (torch.Tensor): Either batch or single intrinsics matrix
Returns:
self
Shape:
- intrinsics: :math:`(4, 4)` or :math:`(B, 4, 4)`
"""
if not torch.is_tensor(intrinsics):
raise TypeError(
"intrinsics should be tensor, but was {} instead".format(
type(intrinsics)
)
)
if not (
(intrinsics.ndim == 2 or intrinsics.ndim == 3)
and intrinsics.shape[-2:] == (4, 4)
):
msg = "intrinsics should be of shape (4, 4) or (B, 4, 4), but was {} instead.".format(
intrinsics.shape
)
raise ValueError(msg)
if not self.has_points:
return self
projected_2d = projutils.project_points(self.points_padded, intrinsics)
self._points_padded = projutils.homogenize_points(
projected_2d
) * self.nonpad_mask.to(projected_2d.dtype).unsqueeze(-1)
# force update of list representation
self._points_list = None
return self
@property
def has_points(self):
r"""Determines whether pointclouds have points or not
Returns:
bool
"""
if self._has_points is None:
self._has_points = (
self._points_list is not None or self._points_padded is not None
)
return self._has_points
@property
def has_normals(self):
r"""Determines whether pointclouds have normals or not
Returns:
bool
"""
if self._has_normals is None:
self._has_normals = (
self._normals_list is not None or self._normals_padded is not None
)
return self._has_normals
@property
def has_colors(self):
r"""Determines whether pointclouds have colors or not
Returns:
bool
"""
if self._has_colors is None:
self._has_colors = (
self._colors_list is not None or self._colors_padded is not None
)
return self._has_colors
@property
def has_features(self):
r"""Determines whether pointclouds have features or not
Returns:
bool
"""
if self._has_features is None:
self._has_features = (
self._features_list is not None or self._features_padded is not None
)
return self._has_features
@property
def num_features(self):
r"""Determines number of features in pointclouds
Returns:
int
"""
if not self.has_features:
return 0
if self._features_padded is not None:
return self._features_padded.shape[-1]
if self._features_list is not None:
return self._features_list[0].shape[-1]
@property
def points_list(self):
r"""Gets the list representation of the points.
Returns:
list of torch.Tensor: list of :math:`B` tensors of points of shape :math:`(N_b, 3)`.
"""
if self._points_list is None and self._points_padded is not None:
self._points_list = [
p[0, : self._num_points_per_pointcloud[b]]
for b, p in enumerate(self._points_padded.split([1] * self._B, 0))
]
return self._points_list
@property
def normals_list(self):
r"""Gets the list representation of the point normals.
Returns:
list of torch.Tensor: list of :math:`B` tensors of normals of shape :math:`(N_b, 3)`.
"""
if self._normals_list is None and self._normals_padded is not None:
self._normals_list = [
n[0, : self._num_points_per_pointcloud[b]]
for b, n in enumerate(self._normals_padded.split([1] * self._B, 0))
]
return self._normals_list
@property
def colors_list(self):
r"""Gets the list representation of the point colors.
Returns:
list of torch.Tensor: list of :math:`B` tensors of colors of shape :math:`(N_b, 3)`.
"""
if self._colors_list is None and self._colors_padded is not None:
self._colors_list = [
c[0, : self._num_points_per_pointcloud[b]]
for b, c in enumerate(self._colors_padded.split([1] * self._B, 0))
]
return self._colors_list
@property
def features_list(self):
r"""Gets the list representation of the point features.
Returns:
list of torch.Tensor: list of :math:`B` tensors of features of shape :math:`(N_b, 3)`.
"""
if self._features_list is None and self._features_padded is not None:
self._features_list = [
f[0, : self._num_points_per_pointcloud[b]]
for b, f in enumerate(self._features_padded.split([1] * self._B, 0))
]
return self._features_list
@property
def points_padded(self):
r"""Gets the padded representation of the points.
Returns:
torch.Tensor: tensor representation of points with zero padding as required
Shape:
- Output: :math:`(B, max(N_b), 3)`
"""
self._compute_padded()
return self._points_padded
@property
def normals_padded(self):
r"""Gets the padded representation of the normals.
Returns:
torch.Tensor: tensor representation of normals with zero padding as required
Shape:
- Output: :math:`(B, max(N_b), 3)`
"""
self._compute_padded()
return self._normals_padded
@property
def colors_padded(self):
r"""Gets the padded representation of the colors.
Returns:
torch.Tensor: tensor representation of colors with zero padding as required
Shape:
- Output: :math:`(B, max(N_b), 3)`
"""
self._compute_padded()
return self._colors_padded
@property
def features_padded(self):
r"""Gets the padded representation of the features.
Returns:
torch.Tensor: tensor representation of features with zero padding as required
Shape:
- Output: :math:`(B, max(N_b), C)`
"""
self._compute_padded()
return self._features_padded
@property
def nonpad_mask(self):
r"""Returns tensor of `bool` values which are True wherever points exist and False wherever there is padding.
Returns:
torch.Tensor: 2d `bool` mask
Shape:
- Output: :math:`(B, N)`
"""
if self._nonpad_mask is None and self.has_points:
self._nonpad_mask = torch.ones(
(self._B, self._N), dtype=torch.bool, device=self.device
)
if self.equisized:
self._nonpad_mask[:, self._num_points_per_pointcloud[0] :] = 0
else:
for b in range(self._B):
self._nonpad_mask[b, self._num_points_per_pointcloud[b] :] = 0
return self._nonpad_mask
@property
def num_points_per_pointcloud(self):
r"""Returns a 1D tensor with length equal to the number of pointclouds giving the number of points in each
pointcloud.
Returns:
torch.Tensor: 1D tensor of sizes
Shape:
- Output: tensor of shape :math:`(B)`.
"""
return self._num_points_per_pointcloud
@points_list.setter
def points_list(self, value: List[torch.Tensor]):
r"""Updates `points_list` representation.
.. note:: The number of pointclouds and the number of points per pointcloud can not change.
Args:
value (list of torch.Tensor): list of :math:`B` tensors of points of shape :math:`(N_b, 3)`.
Shape of tensors in `value` and `pointclouds.points_list` must match.
"""
self._assert_set_list(value)
self._points_list = [v.clone().to(self.device) for v in value]
self._points_padded = None
@normals_list.setter
def normals_list(self, value: List[torch.Tensor]):
r"""Updates `normals_list` representation.
.. note:: The number of pointclouds and the number of points per pointcloud can not change.
Args:
value (list of torch.Tensor): list of :math:`B` tensors of points of shape :math:`(N_b, 3)`.
Shape of tensors in `value` and `pointclouds.points_list` must match.
"""
self._assert_set_list(value)
self._normals_list = [v.clone().to(self.device) for v in value]
self._noramls_padded = None
@colors_list.setter
def colors_list(self, value: List[torch.Tensor]):
r"""Updates `colors_list` representation.
.. note:: The number of pointclouds and the number of points per pointcloud can not change.
Args:
value (list of torch.Tensor): list of :math:`B` tensors of points of shape :math:`(N_b, 3)`.
Shape of tensors in `value` and `pointclouds.points_list` must match.
"""
self._assert_set_list(value)
self._colors_list = [v.clone().to(self.device) for v in value]
self._noramls_padded = None
@features_list.setter
def features_list(self, value: List[torch.Tensor]):
r"""Updates `features_list` representation.
.. note:: The number of pointclouds and the number of points per pointcloud can not change.
Args:
value (list of torch.Tensor): list of :math:`B` tensors of points of shape :math:`(N_b, C)`.
Shape of tensors in `value` and `pointclouds.points_list` must match.
"""
self._assert_set_list(value, first_dim_only=True)
self._features_list = [v.clone().to(self.device) for v in value]
self._noramls_padded = None
@points_padded.setter
def points_padded(self, value: torch.Tensor):
r"""Updates `points_padded` representation.
.. note:: The number of pointclouds and the number of points per pointcloud can not change
(can not change the shape or padding of `points_padded`).
Args:
value (torch.Tensor): tensor representation of (zero padded) points with the same shape and number of
points per pointcloud as `self.points_padded`
Shape:
- value: :math:`(B, max(N_b), 3)`
"""
self._assert_set_padded(value)
self._points_padded = value.clone().to(self.device)
self._points_list = None
@normals_padded.setter
def normals_padded(self, value: torch.Tensor):
r"""Updates `normals_padded` representation.
.. note:: The number of pointclouds and the number of points per pointcloud can not change
(can not change the shape or padding of `normals_padded`).
Args:
value (torch.Tensor): tensor representation of (zero padded) normals with the same shape and number of
points per pointcloud as `self.points_padded`
Shape:
- value: :math:`(B, max(N_b), 3)`
"""
self._assert_set_padded(value)
self._normals_padded = value.clone().to(self.device)
self._normals_list = None
@colors_padded.setter
def colors_padded(self, value: torch.Tensor):
r"""Updates `colors_padded` representation.
.. note:: The number of pointclouds and the number of points per pointcloud can not change
(can not change the shape or padding of `colors_padded`).
Args:
value (torch.Tensor): tensor representation of (zero padded) colors with the same shape and number of
points per pointcloud as `self.points_padded`
Shape:
- value: :math:`(B, max(N_b), 3)`
"""
self._assert_set_padded(value)
self._colors_padded = value.clone().to(self.device)
self._colors_list = None
@features_padded.setter
def features_padded(self, value: torch.Tensor):
r"""Updates `features_padded` representation.
.. note:: The number of pointclouds and the number of points per pointcloud can not change
(can not change the shape or padding of `features_padded`).
Args:
value (torch.Tensor): tensor representation of (zero padded) features with the same shape and number of
points per pointcloud as `self.points_padded`
Shape:
- value: :math:`(B, max(N_b), C)`
"""
self._assert_set_padded(value, first_2_dims_only=True)
self._features_padded = value.clone().to(self.device)
self._features_list = None
def _compute_padded(self, refresh: bool = False):
r"""Computes the padded version of pointclouds.
Args:
refresh (bool): If True, will recompute padded representation even if it already exists
"""
if not self.has_points:
return
if not (refresh or self._points_padded is None):
return
self._points_padded = structutils.list_to_padded(
self._points_list,
(self._N, 3),
pad_value=0.0,
equisized=self.equisized,
)
self._normals_padded = (
None
if self._normals_list is None
else structutils.list_to_padded(
self._normals_list,
(self._N, 3),
pad_value=0.0,
equisized=self.equisized,
)
)
self._colors_padded = (
None
if self._colors_list is None
else structutils.list_to_padded(
self._colors_list,
(self._N, 3),
pad_value=0.0,
equisized=self.equisized,
)
)
self._features_padded = (
None
if self._features_list is None
else structutils.list_to_padded(
self._features_list,
(self._N, self.num_features),
pad_value=0.0,
equisized=self.equisized,
)
)
def clone(self):
r"""Returns deep copy of Pointclouds object. All internal tensors are cloned individually.
Returns:
clifter_slam.Pointclouds: cloned clifter_slam.Pointclouds object
"""
if not self.has_points:
return Pointclouds(device=self.device)
elif self._points_list is not None:
new_points = [p.clone() for p in self.points_list]
new_normals = (
None
if self._normals_list is None
else [n.clone() for n in self._normals_list]
)
new_colors = (
None
if self._colors_list is None
else [c.clone() for c in self._colors_list]
)
new_features = (
None
if self._features_list is None
else [f.clone() for f in self._features_list]
)
elif self._points_padded is not None:
new_points = self._points_padded.clone()
new_normals = (
None if self._normals_padded is None else self._normals_padded.clone()
)
new_colors = (
None if self._colors_padded is None else self._colors_padded.clone()
)
new_features = (
None if self._features_padded is None else self._features_padded.clone()
)
other = Pointclouds(
points=new_points,
normals=new_normals,
colors=new_colors,
features=new_features,
)
for k in self._INTERNAL_TENSORS:
v = getattr(self, k)
if torch.is_tensor(v):
setattr(other, k, v.clone())
return other
def detach(self):
r"""Detachs Pointclouds object. All internal tensors are detached individually.
Returns:
clifter_slam.Pointclouds: detached clifter_slam.Pointclouds object
"""
other = self.clone()
if other._points_list is not None:
other._points_list = [p.detach() for p in other._points_list]
if other._normals_list is not None:
other._normals_list = [n.detach() for n in other._normals_list]
if other._colors_list is not None:
other._colors_list = [c.detach() for c in other._colors_list]
if other._features_list is not None:
other._features_list = [f.detach() for f in other._features_list]
for k in self._INTERNAL_TENSORS:
v = getattr(self, k)
if torch.is_tensor(v):
setattr(other, k, v.detach())
return other
def to(self, device: Union[torch.device, str], copy: bool = False):
r"""Match functionality of torch.Tensor.to(device)
If copy = True or the self Tensor is on a different device, the returned tensor is a copy of self with the
desired torch.device.
If copy = False and the self Tensor already has the correct torch.device, then self is returned.
Args:
device (torch.device or str): Device id for the new tensor.
copy (bool): Boolean indicator whether or not to clone self. Default False.
Returns:
clifter_slam.Pointclouds
"""
if not copy and self.device == device:
return self
other = self.clone()
if self.device != device:
# hack to know which gpu is used when device("cuda")
other.device = torch.Tensor().to(device).device
if other._points_list is not None:
other._points_list = [p.to(device) for p in other._points_list]
if other._normals_list is not None:
other._normals_list = [n.to(device) for n in other._normals_list]
if other._colors_list is not None:
other._colors_list = [c.to(device) for c in other._colors_list]
if other._features_list is not None:
other._features_list = [f.to(device) for f in other._features_list]
for k in self._INTERNAL_TENSORS:
v = getattr(self, k)
if torch.is_tensor(v):
setattr(other, k, v.to(device))
return other
def cpu(self):
r"""Match functionality of torch.Tensor.cpu()
Returns:
clifter_slam.Pointclouds
"""
return self.to(torch.device("cpu"))
def cuda(self):
r"""Match functionality of torch.Tensor.cuda()
Returns:
clifter_slam.Pointclouds
"""
return self.to(torch.device("cuda"))
def append_points(self, pointclouds: "Pointclouds"):
r"""Appends points, normals, colors and features of a clifter_slam.Pointclouds object to the current pointclouds.
Both Pointclouds must have/not have the same attributes. In place operation.
Args:
pointclouds (clifter_slam.Pointclouds): Pointclouds to get appended to self. Must have same batch size as self.
Returns:
self
"""
if not isinstance(pointclouds, type(self)):
raise TypeError(
"Append object must be of type clifter_slam.Pointclouds, but was of type {}.".format(
type(pointclouds)
)
)
if not (pointclouds.device == self.device):
raise ValueError(
"Device of pointclouds to append and to be appended must match: ({0} != {1})".format(
pointclouds.device, self.device
)
)
if not pointclouds.has_points:
return self
if not self.has_points:
if pointclouds.has_points:
self._points_list = [
p.clone().to(self.device) for p in pointclouds.points_list
]
if pointclouds.has_normals:
self._normals_list = [
n.clone().to(self.device) for n in pointclouds.normals_list
]
if pointclouds.has_colors:
self._colors_list = [
c.clone().to(self.device) for c in pointclouds.colors_list
]
if pointclouds.has_features:
self._features_list = [
f.clone().to(self.device) for f in pointclouds.features_list
]
self._has_points = pointclouds._has_points
self._has_normals = pointclouds._has_normals
self._has_colors = pointclouds._has_colors
self._has_features = pointclouds._has_features
self._B = pointclouds._B
self._N = pointclouds._N
self.equisized = pointclouds.equisized
for k in self._INTERNAL_TENSORS:
v = getattr(pointclouds, k)
if torch.is_tensor(v):
setattr(self, k, v.clone())
return self
if not (len(pointclouds) == len(self)):
raise ValueError(
"Batch size of pointclouds to append and to be appended must match: ({0} != {1})".format(
len(pointclouds), len(self)
)
)
if self.has_normals != pointclouds.has_normals:
raise ValueError(
"pointclouds to append and to be appended must either both have or not have normals: ({0} != {1})".format(
pointclouds.has_normals, self.has_normals
)
)
if self.has_colors != pointclouds.has_colors:
raise ValueError(
"pointclouds to append and to be appended must either both have or not have colors: ({0} != {1})".format(
pointclouds.has_colors, self.has_colors
)
)
if self.has_features != pointclouds.has_features:
raise ValueError(
"pointclouds to append and to be appended must either both have or not have features: ({0} != {1})".format(
pointclouds.has_features, self.has_features
)
)
if self.has_features and self.num_features != pointclouds.num_features:
raise ValueError(
"pointclouds to append and to be appended must have the same number of features: ({0} != {1})".format(
pointclouds.num_features, self.num_features
)
)
self._points_list = [
torch.cat([self.points_list[b], pointclouds.points_list[b]], 0)
for b in range(self._B)
]
self._points_padded = None
if self.has_normals:
self._normals_list = [
torch.cat([self.normals_list[b], pointclouds.normals_list[b]], 0)
for b in range(self._B)
]
self._normals_padded = None
if self.has_colors:
self._colors_list = [
torch.cat([self.colors_list[b], pointclouds.colors_list[b]], 0)
for b in range(self._B)
]
self._colors_padded = None
if self.has_features:
self._features_list = [
torch.cat([self.features_list[b], pointclouds.features_list[b]], 0)
for b in range(self._B)
]
self._features_padded = None
self._num_points_per_pointcloud = (
self._num_points_per_pointcloud + pointclouds._num_points_per_pointcloud
)
self.equisized = len(self._num_points_per_pointcloud.unique()) == 1
self._N = self._num_points_per_pointcloud.max()
self._nonpad_mask = None
return self
def open3d(
self,
index: int,
include_colors: bool = True,
max_num_points: Optional[int] = None,
include_normals: bool = False,
):
r"""Converts `index`-th pointcloud to a `open3d.geometry.Pointcloud` object (e.g. for visualization).
Args:
index (int): Index of which pointcloud (from the batch of pointclouds) to convert to
`open3d.geometry.Pointcloud`.
include_colors (bool): If True, will include colors in the `o3d.geometry.Pointcloud`
objects. Default: True
max_num_points (int): Maximum number of points to include in the returned object. If None,
will not set a max size (will not downsample). Default: None
include_normals (bool): If True, will include normal vectors in the `o3d.geometry.Pointcloud`
objects. Default: False
Returns:
pcd (open3d.geometry.Pointcloud): `open3d.geometry.Pointcloud` object from `index`-th pointcloud.
"""
if not isinstance(index, int):
raise TypeError("Index should be int, but was {}.".format(type(index)))
pcd = o3d.geometry.PointCloud()
num_points = self.num_points_per_pointcloud[index]
torch_points = self.points_list[index]
subsample = max_num_points is not None and max_num_points < num_points
if subsample:
perm = torch.randperm(num_points)
point_inds = perm[:max_num_points]
torch_points = torch_points[point_inds]
numpy_points = torch_points.detach().cpu().numpy()
pcd.points = o3d.utility.Vector3dVector(numpy_points)
if self.has_colors and include_colors:
torch_colors = self.colors_list[index]
if subsample:
torch_colors = torch_colors[point_inds]
# if colors > 1, assume 255 range
if (torch_colors.max() > 1.1).item():
torch_colors = torch_colors / 255
torch_colors = torch.clamp(torch_colors, min=0.0, max=1.0)
numpy_colors = torch_colors.detach().cpu().numpy()
pcd.colors = o3d.utility.Vector3dVector(numpy_colors)
if self.has_normals and include_normals:
torch_normals = self.normals_list[index]
if subsample:
torch_normals = torch_normals[point_inds]
numpy_normals = torch_normals.detach().cpu().numpy()
pcd.normals = o3d.utility.Vector3dVector(numpy_normals)
return pcd
def plotly(
self,
index: int,
include_colors: bool = True,
max_num_points: Optional[int] = 200000,
as_figure: bool = True,
point_size: int = 2,
):
r"""Converts `index`-th pointcloud to either a `plotly.graph_objects.Figure` or a
`plotly.graph_objects.Scatter3d` object (for visualization).
Args:
index (int): Index of which pointcloud (from the batch of pointclouds) to convert to plotly
representation.
include_colors (bool): If True, will include point colors in the returned object. Default: True
max_num_points (int): Maximum number of points to include in the returned object. If None,
will not set a max size (will not downsample). Default: 200000
as_figure (bool): If True, returns a `plotly.graph_objects.Figure` object which can easily
be visualized by calling `.show()` on. Otherwise, returns a
`plotly.graph_objects.Scatter3d` object. Default: True
point_size (int): Point size radius (for visualization). Default: 2
Returns:
plotly.graph_objects.Figure or plotly.graph_objects.Scatter3d: If `as_figure` is True, will return
`plotly.graph_objects.Figure` object from the `index`-th pointcloud. Else,
returns `plotly.graph_objects.Scatter3d` object from the `index`-th pointcloud.
"""
if not isinstance(index, int):
raise TypeError("Index should be int, but was {}.".format(type(index)))
num_points = self.num_points_per_pointcloud[index]
torch_points = self.points_list[index]
subsample = max_num_points is not None and max_num_points < num_points
if subsample:
perm = torch.randperm(num_points)
point_inds = perm[:max_num_points]
torch_points = torch_points[point_inds]
numpy_points = torch_points.detach().cpu().numpy()
marker_dict = {"size": point_size}
if self.has_colors and include_colors:
torch_colors = self.colors_list[index]
if subsample:
torch_colors = torch_colors[point_inds]
# if colors > 1, assume 255 range
if (torch_colors.max() < 1.1).item():
torch_colors = torch_colors * 255
torch_colors = torch.clamp(torch_colors, min=0.0, max=255.0)
numpy_colors = torch_colors.detach().cpu().numpy().astype("uint8")
marker_dict["color"] = numpy_colors
scatter3d = go.Scatter3d(
x=numpy_points[..., 0],
y=numpy_points[..., 1],
z=numpy_points[..., 2],
mode="markers",
marker=marker_dict,
)
if not as_figure:
return scatter3d
fig = go.Figure(data=[scatter3d])
fig.update_layout(
showlegend=False,
scene=dict(
xaxis=dict(
showticklabels=False,
showgrid=False,
zeroline=False,
visible=False,
),
yaxis=dict(
showticklabels=False,
showgrid=False,
zeroline=False,
visible=False,
),
zaxis=dict(
showticklabels=False,
showgrid=False,
zeroline=False,
visible=False,
),
),
)
return fig
def _assert_set_padded(self, value: torch.Tensor, first_2_dims_only: bool = False):
r"""Checks if value can be set as a padded representation attribute
Args:
value (torch.Tensor): value we want to set as one of the padded representation attributes
first_2_dims_only (bool): If True, will only check if first 2 dimensions of value are the same as
`self.points_padded`. Otherwise will check the entire shape. Default: False
"""
if not isinstance(value, torch.Tensor):
raise TypeError("value must be torch.Tensor. Got {}".format(type(value)))
if not self.has_points:
raise ValueError(
"cannot set padded representation for an empty pointclouds object"
)
if self.device != torch.device(value.device):
raise ValueError(
"value must have the same device as pointclouds object: {} != {}".format(
value.device, torch.device(self.device)
)
)
if value.ndim != 3:
raise ValueError("value.ndim should be 3. Got {}".format(value.ndim))
if first_2_dims_only and self.points_padded.shape[:2] != value.shape[:2]:
raise ValueError(
"first 2 dims of value tensor and points tensor should have same shape, but didn't: {} != {}.".format(
value.shape[:2], self.points_padded.shape[:2]
)
)
if (not first_2_dims_only) and self.points_padded.shape != value.shape:
raise ValueError(
"value tensor and points tensor should have same shape, but didn't: {} != {}.".format(
value.shape, self.points_padded.shape
)
)
if not all(
[
value[b][N_b:].eq(0).all().item()
for b, N_b in enumerate(self.num_points_per_pointcloud)
]
):
raise ValueError(
"value must have zeros wherever pointclouds.points_padded has zero padding."
)
def _assert_set_list(self, value: List[torch.Tensor], first_dim_only: bool = False):
r"""Checks if value can be set as a list representation attribute
Args:
value (list of torch.Tensor): value we want to set as one of the list representation attributes
first_dim_only (bool): If True, will only check if first dimension of value is the same as
`self.points_padded`. Otherwise will check the entire shape. Default: False
"""
if not isinstance(value, list):
raise TypeError(
"value must be list of torch.Tensors. Got {}".format(type(value))
)
if not self.has_points:
raise ValueError(
"cannot set list representation for an empty pointclouds object"
)
if len(self) != len(value):
raise ValueError(
"value must have same length as pointclouds.points_list. Got {} != {}.".format(
len(value), len(self)
)
)
if any([v.ndim != 2 for v in value]):
raise ValueError("ndim of all tensors in value list should be 2")
if first_dim_only and any(
[
self.points_list[b].shape[:1] != value[b].shape[:1]
for b in range(len(self))
]
):
raise ValueError(
"shape of first 2 dims of tensors in value and pointclouds.points_list must match"
)
if (not first_dim_only) and any(
[self.points_list[b].shape != value[b].shape for b in range(len(self))]
):
raise ValueError(
"shape of tensors in value and pointclouds.points_list must match"
)
| 38.809264
| 123
| 0.568771
|
aeb6ca53a6eda4a7ad863c9fdebbcc8ac7c9f404
| 2,036
|
py
|
Python
|
version.py
|
KD-Group/prett
|
b605a5637958eb9a475494bba2622b712b23c680
|
[
"MIT"
] | 1
|
2017-11-28T10:31:45.000Z
|
2017-11-28T10:31:45.000Z
|
version.py
|
KD-Group/prett
|
b605a5637958eb9a475494bba2622b712b23c680
|
[
"MIT"
] | 1
|
2018-05-21T04:53:15.000Z
|
2018-05-21T04:53:15.000Z
|
version.py
|
KD-Group/prett
|
b605a5637958eb9a475494bba2622b712b23c680
|
[
"MIT"
] | 2
|
2018-01-03T12:13:13.000Z
|
2018-01-19T06:19:10.000Z
|
# 获取git的tag并保存在RELEASE-VERSION中, 以便每次打包自动识别最新的tag,
# 并且作为version去发布, 如果不存在RELEASE-VERSION文件且无tag, 默认使用"0.0.1"
# 使用前必须在MANIFEST.in中加入
# include RELEASE-VERSION
# include version.py
__all__ = ("get_git_version")
import subprocess
import os
def get_git_latest_tag():
def _minimal_ext_cmd(cmd: str):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd("git describe --abbrev=0 --tags")
git_tag = out.strip().decode('ascii')
# 去除tag中的v/V
if str(git_tag).startswith("v") or str(git_tag).startswith("V"):
git_tag = str(git_tag)[1:]
if git_tag == "":
git_tag = None
except Exception:
git_tag = None
return git_tag
def read_release_version():
try:
f = open("RELEASE-VERSION", "r")
try:
version = f.readlines()[0].strip()
if version == "":
return None
else:
return version
finally:
f.close()
except Exception:
return None
def write_release_version(version):
f = open("RELEASE-VERSION", "w")
f.write("%s\n" % version)
f.close()
def get_git_version():
release_version = read_release_version()
version = get_git_latest_tag()
if version is None:
version = release_version
# 如果release-version文件没有, 且git没有打tag, 则默认使用"0.0.1"
if version is None:
version = "0.0.1"
# raise ValueError("Cannot find the version number!")
if version != release_version:
write_release_version(version)
return version
if __name__ == "__main__":
print(get_git_version())
| 24.53012
| 96
| 0.582515
|
d9ee75336dc50fa9d021fbb61b5dad52534edc05
| 4,716
|
py
|
Python
|
Virtual_Piano.py
|
PrathameshDeshpande/Virtual_Piano_OpenCV
|
a5587b3ebe795c1fa4e2a816ef02f1e8e700444c
|
[
"Apache-2.0"
] | 6
|
2020-07-30T19:12:02.000Z
|
2020-08-06T16:04:48.000Z
|
Virtual_Piano.py
|
PrathameshDeshpande/Virtual_Piano_OpenCV
|
a5587b3ebe795c1fa4e2a816ef02f1e8e700444c
|
[
"Apache-2.0"
] | null | null | null |
Virtual_Piano.py
|
PrathameshDeshpande/Virtual_Piano_OpenCV
|
a5587b3ebe795c1fa4e2a816ef02f1e8e700444c
|
[
"Apache-2.0"
] | 1
|
2020-08-02T05:09:55.000Z
|
2020-08-02T05:09:55.000Z
|
import cv2
import numpy as np
import time
import pygame
pygame.init()
w,h = 78,110
x1,y1= 10,10
x2,y2 = 10+w,10
x3,y3 = 10+2*w,10
x4,y4 = 10+3*w,10
x5,y5 = 10+4*w,10
x6,y6 = 10+5*w,10
x7,y7 = 10+6*w,10
x8,y8 =10+7*w,10
def draw_piano(frame):
cv2.rectangle(frame, (x1, y1), (x1 + w, y1 + h), (255, 255, 255), -1)
cv2.rectangle(frame, (x2, y2), (x2 + w, y2 + h), (255, 255, 255), -1)
cv2.rectangle(frame, (x3, y3), (x3 + w, y3 + h), (255, 255, 255), -1)
cv2.rectangle(frame, (x4, y4), (x4 + w, y4 + h), (255, 255, 255), -1)
cv2.rectangle(frame, (x5, y5), (x5 + w, y5 + h), (255, 255, 255), -1)
cv2.rectangle(frame, (x6, y6), (x6 + w, y6 + h), (255, 255, 255), -1)
cv2.rectangle(frame, (x7, y7), (x7 + w, y7 + h), (255, 255, 255), -1)
cv2.rectangle(frame, (x8, y8), (x8 + w, y8 + h), (255, 255, 255), -1)
cv2.rectangle(frame,(x1, y1),(x8 + w, y8 + h),(0,0,0),1)
cv2.line(frame, (x2, y2), (x2, y2+h) , (0,0,0) ,1)
cv2.line(frame, (x3, y3), (x3, y3 + h), (0, 0, 0), 1)
cv2.line(frame, (x4, y4), (x4, y4 + h), (0,0,0), 1)
cv2.line(frame, (x5, y5), (x5, y5 + h), (0,0,0,), 1)
cv2.line(frame, (x6, y6), (x6, y6 + h), (0,0,0), 1)
cv2.line(frame, (x7, y7), (x7, y7 + h), (0,0,0), 1)
cv2.line(frame, (x8, y8), (x8, y8 + h), (0, 0, 0), 1)
def key_press(frame,x,y,w1,h1):
if x>x1 and y>y1 and x+w1<(x1 + w) and y+h1<(y1+h):
cv2.rectangle(frame, (x1, y1), (x1 + w, y1 + h), (211,211,211), -1)
pygame.mixer.Sound('wav/a1.wav').play()
time.sleep(0.10)
pygame.mixer.Sound('wav/a1.wav').stop()
elif x>x2 and y>y2 and x+w1<(x2 + w) and y+h1<(y2+h):
cv2.rectangle(frame, (x2, y2), (x2 + w, y2 + h), (211,211,211), -1)
pygame.mixer.Sound('wav/b1.wav').play()
time.sleep(0.10)
pygame.mixer.Sound('wav/b1.wav').stop()
elif x>x3 and y>y3 and x+w1<(x3 + w) and y+h1<(y3+h):
cv2.rectangle(frame, (x3, y3), (x3 + w, y3 + h), (211,211,211), -1)
pygame.mixer.Sound('wav/c1.wav').play()
time.sleep(0.10)
pygame.mixer.Sound('wav/c1.wav').stop()
elif x>x4 and y>y4 and x+w1<(x4 + w) and y+h1<(y4+h):
cv2.rectangle(frame, (x4, y4), (x4 + w, y4 + h), (211,211,211), -1)
pygame.mixer.Sound('wav/c2.wav').play()
time.sleep(0.10)
pygame.mixer.Sound('wav/c2.wav').stop()
elif x>x5 and y>y5 and x+w1<(x5 + w) and y+h1<(y5+h):
cv2.rectangle(frame, (x5, y5), (x5 + w, y5 + h), (211,211,211), -1)
pygame.mixer.Sound('wav/d1.wav').play()
time.sleep(0.10)
pygame.mixer.Sound('wav/d1.wav').stop()
elif x>x6 and y>y6 and x+w1<(x6 + w) and y+h1<(y6+h):
cv2.rectangle(frame, (x6, y6), (x6 + w, y6 + h), (211,211,211), -1)
pygame.mixer.Sound('wav/e1.wav').play()
time.sleep(0.10)
pygame.mixer.Sound('wav/e1.wav').stop()
elif x>x7 and y>y7 and x+w1<(x7 + w) and y+h1<(y7+h):
cv2.rectangle(frame, (x7, y7), (x7 + w, y7 + h), (211,211,211), -1)
pygame.mixer.Sound('wav/f1.wav').play()
time.sleep(0.10)
pygame.mixer.Sound('wav/f1.wav').stop()
elif x>x8 and y>y8 and x+w1<(x8 + w) and y+h1<(y8+h):
cv2.rectangle(frame, (x8, y8), (x8 + w, y8 + h), (211,211,211), -1)
pygame.mixer.Sound('wav/g1.wav').play()
time.sleep(0.10)
pygame.mixer.Sound('wav/g1.wav').stop()
cap = cv2.VideoCapture(0)
while True:
ret,frame = cap.read()
frame = cv2.flip(frame, 1)
frame =cv2.GaussianBlur(frame,(9,9),0)
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
draw_piano(frame)
lower_red = np.array([132, 90, 120]) # creating the mask for red color
upper_red = np.array([179, 255, 255])
mask_1 = cv2.inRange(frame_hsv, lower_red, upper_red)
lower_red = np.array([0, 110, 100])
upper_red = np.array([3, 255, 255])
mask_2 = cv2.inRange(frame_hsv, lower_red, upper_red)
masked = mask_1 + mask_2
kernel_1 = np.ones((4,4),np.uint8)
kernel_2 = np.ones((15,15),np.uint8)
masked=cv2.erode(masked,kernel_1,iterations = 1)
masked=cv2.morphologyEx(masked,cv2.MORPH_CLOSE,kernel_2)
xr, yr, wr, hr = 0, 0, 0, 0
contours, hierarchy = cv2.findContours(masked, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
try:
for i in range(0,10):
xr, yr, wr, hr = cv2.boundingRect(contours[i])
if wr*hr > 1000:
break
except:
pass
cv2.rectangle(frame, (xr, yr), (xr + wr, yr + hr), (0, 0, 255), 2)
key_press(frame, xr, yr, wr, hr)
frame = cv2.resize(frame, (800, 800))
cv2.imshow('frame', frame)
cv2.imshow('mask',masked)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| 43.266055
| 90
| 0.553223
|
d5a1cd165c93547dcc99d7272d1224eefc73e114
| 7,907
|
py
|
Python
|
ctypesgencore/parser/pplexer.py
|
kernsuite-debian/ctypesgen
|
391c5eecac347e91c720295866c9d2431a378ee1
|
[
"BSD-3-Clause"
] | null | null | null |
ctypesgencore/parser/pplexer.py
|
kernsuite-debian/ctypesgen
|
391c5eecac347e91c720295866c9d2431a378ee1
|
[
"BSD-3-Clause"
] | null | null | null |
ctypesgencore/parser/pplexer.py
|
kernsuite-debian/ctypesgen
|
391c5eecac347e91c720295866c9d2431a378ee1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
'''Preprocess a C source file using gcc and convert the result into
a token stream
Reference is C99:
* http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
'''
__docformat__ = 'restructuredtext'
import os, re, shlex, sys, tokenize, lex, yacc, traceback
import ctypes
from lex import TOKEN
tokens = (
'HEADER_NAME', 'IDENTIFIER', 'PP_NUMBER', 'CHARACTER_CONSTANT',
'STRING_LITERAL', 'OTHER',
'PTR_OP', 'INC_OP', 'DEC_OP', 'LEFT_OP', 'RIGHT_OP', 'LE_OP', 'GE_OP',
'EQ_OP', 'NE_OP', 'AND_OP', 'OR_OP', 'MUL_ASSIGN', 'DIV_ASSIGN',
'MOD_ASSIGN', 'ADD_ASSIGN', 'SUB_ASSIGN', 'LEFT_ASSIGN', 'RIGHT_ASSIGN',
'AND_ASSIGN', 'XOR_ASSIGN', 'OR_ASSIGN', 'PERIOD', 'ELLIPSIS',
'LPAREN', 'NEWLINE',
'PP_DEFINE', 'PP_DEFINE_NAME', 'PP_DEFINE_MACRO_NAME', 'PP_MACRO_PARAM',
'PP_STRINGIFY', 'PP_IDENTIFIER_PASTE', 'PP_END_DEFINE'
)
states = [('DEFINE',"exclusive")]
subs = {
'D': '[0-9]',
'L': '[a-zA-Z_]',
'H': '[a-fA-F0-9]',
'E': '[Ee][+-]?\s*{D}+',
'FS': '([FfLl]|d[dfl]|D[DFL]|[fFdD][0-9]+x?)',
'IS': '[uUlL]*',
}
# Helper: substitute {foo} with subs[foo] in string (makes regexes more lexy)
sub_pattern = re.compile('{([^}]*)}')
def sub_repl_match(m):
return subs[m.groups()[0]]
def sub(s):
return sub_pattern.sub(sub_repl_match, s)
# --------------------------------------------------------------------------
# Token value types
# --------------------------------------------------------------------------
# Numbers represented as int and float types.
# For all other tokens, type is just str representation.
class StringLiteral(str):
def __new__(cls, value):
assert value[0] == '"' and value[-1] == '"'
# Unescaping probably not perfect but close enough.
value = value[1:-1].decode('string_escape')
return str.__new__(cls, value)
# --------------------------------------------------------------------------
# Token declarations
# --------------------------------------------------------------------------
punctuators = {
# value: (regex, type)
r'...': (r'\.\.\.', 'ELLIPSIS'),
r'>>=': (r'>>=', 'RIGHT_ASSIGN'),
r'<<=': (r'<<=', 'LEFT_ASSIGN'),
r'+=': (r'\+=', 'ADD_ASSIGN'),
r'-=': (r'-=', 'SUB_ASSIGN'),
r'*=': (r'\*=', 'MUL_ASSIGN'),
r'/=': (r'/=', 'DIV_ASSIGN'),
r'%=': (r'%=', 'MOD_ASSIGN'),
r'&=': (r'&=', 'AND_ASSIGN'),
r'^=': (r'\^=', 'XOR_ASSIGN'),
r'|=': (r'\|=', 'OR_ASSIGN'),
r'>>': (r'>>', 'RIGHT_OP'),
r'<<': (r'<<', 'LEFT_OP'),
r'++': (r'\+\+', 'INC_OP'),
r'--': (r'--', 'DEC_OP'),
r'->': (r'->', 'PTR_OP'),
r'&&': (r'&&', 'AND_OP'),
r'||': (r'\|\|', 'OR_OP'),
r'<=': (r'<=', 'LE_OP'),
r'>=': (r'>=', 'GE_OP'),
r'==': (r'==', 'EQ_OP'),
r'!=': (r'!=', 'NE_OP'),
r'<:': (r'<:', '['),
r':>': (r':>', ']'),
r'<%': (r'<%', '{'),
r'%>': (r'%>', '}'),
r';': (r';', ';'),
r'{': (r'{', '{'),
r'}': (r'}', '}'),
r',': (r',', ','),
r':': (r':', ':'),
r'=': (r'=', '='),
r')': (r'\)', ')'),
r'[': (r'\[', '['),
r']': (r']', ']'),
r'.': (r'\.', 'PERIOD'),
r'&': (r'&', '&'),
r'!': (r'!', '!'),
r'~': (r'~', '~'),
r'-': (r'-', '-'),
r'+': (r'\+', '+'),
r'*': (r'\*', '*'),
r'/': (r'/', '/'),
r'%': (r'%', '%'),
r'<': (r'<', '<'),
r'>': (r'>', '>'),
r'^': (r'\^', '^'),
r'|': (r'\|', '|'),
r'?': (r'\?', '?')
}
def punctuator_regex(punctuators):
punctuator_regexes = [v[0] for v in punctuators.values()]
punctuator_regexes.sort(lambda a, b: -cmp(len(a), len(b)))
return '(%s)' % '|'.join(punctuator_regexes)
# Process line-number directives from the preprocessor
# See http://docs.freebsd.org/info/cpp/cpp.info.Output.html
DIRECTIVE = r'\#\s+(\d+)\s+"([^"]+)"[ \d]*\n'
@TOKEN(DIRECTIVE)
def t_ANY_directive(t):
t.lexer.filename = t.groups[2]
t.lexer.lineno = int(t.groups[1])
return None
@TOKEN(punctuator_regex(punctuators))
def t_ANY_punctuator(t):
t.type = punctuators[t.value][1]
return t
IDENTIFIER = sub('{L}({L}|{D})*')
@TOKEN(IDENTIFIER)
def t_INITIAL_identifier(t):
t.type = 'IDENTIFIER'
return t
@TOKEN(IDENTIFIER)
def t_DEFINE_identifier(t):
if t.lexer.next_is_define_name:
# This identifier is the name of a macro
# We need to look ahead and see if this macro takes parameters or not.
if t.lexpos + len(t.value) < t.lexer.lexlen and \
t.lexer.lexdata[t.lexpos + len(t.value)] == '(':
t.type = 'PP_DEFINE_MACRO_NAME'
# Look ahead and read macro parameter list
lexdata = t.lexer.lexdata
pos = t.lexpos + len(t.value) + 1
while lexdata[pos] not in '\n)':
pos+=1
params = lexdata[t.lexpos+len(t.value)+1 : pos]
paramlist = [x.strip() for x in params.split(",") if x.strip()]
t.lexer.macro_params = paramlist
else:
t.type = 'PP_DEFINE_NAME'
t.lexer.next_is_define_name = False
elif t.value in t.lexer.macro_params:
t.type = 'PP_MACRO_PARAM'
else:
t.type = 'IDENTIFIER'
return t
FLOAT_LITERAL = sub(r"(?P<p1>{D}+)?(?P<dp>[.]?)(?P<p2>(?(p1){D}*|{D}+))" \
r"(?P<exp>(?:[Ee][+-]?{D}+)?)(?P<suf>{FS}?)(?!\w)")
@TOKEN(FLOAT_LITERAL)
def t_ANY_float(t):
t.type = 'PP_NUMBER'
m = t.lexer.lexmatch
p1 = m.group("p1")
dp = m.group("dp")
p2 = m.group("p2")
exp = m.group("exp")
suf = m.group("suf")
if dp or exp or (suf and suf not in ("Ll")):
s = m.group(0)
if suf:
s = s[:-len(suf)]
# Attach a prefix so the parser can figure out if should become an
# integer, float, or long
t.value = "f" + s
elif (suf and suf in ("Ll")):
t.value = "l" + p1
else:
t.value = "i" + p1
return t
INT_LITERAL = sub(r"(?P<p1>(?:0x{H}+)|(?:0[0-7]+)|(?:[1-9]{D}+))(?P<suf>{IS})")
@TOKEN(INT_LITERAL)
def t_ANY_int(t):
t.type = 'PP_NUMBER'
m = t.lexer.lexmatch
if "L" in m.group(3) or "l" in m.group(2):
prefix = "l"
else:
prefix = "i"
g1 = m.group(2)
if g1.startswith("0x"):
# Convert base from hexadecimal
g1 = str(long(g1[2:],16))
elif g1[0]=="0":
# Convert base from octal
g1 = str(long(g1,8))
t.value = prefix + g1
return t
CHARACTER_CONSTANT = sub(r"L?'(\\.|[^\\'])+'")
@TOKEN(CHARACTER_CONSTANT)
def t_ANY_character_constant(t):
t.type = 'CHARACTER_CONSTANT'
return t
STRING_LITERAL = sub(r'L?"(\\.|[^\\"])*"')
@TOKEN(STRING_LITERAL)
def t_ANY_string_literal(t):
t.type = 'STRING_LITERAL'
t.value = StringLiteral(t.value)
return t
@TOKEN(r'\(')
def t_ANY_lparen(t):
if t.lexpos == 0 or t.lexer.lexdata[t.lexpos-1] not in (' \t\f\v\n'):
t.type = 'LPAREN'
else:
t.type = '('
return t
@TOKEN(r'\n')
def t_INITIAL_newline(t):
t.lexer.lineno += 1
return None
@TOKEN(r'\#define')
def t_INITIAL_pp_define(t):
t.type = 'PP_DEFINE'
t.lexer.begin("DEFINE")
t.lexer.next_is_define_name = True
t.lexer.macro_params = set()
return t
@TOKEN(r'\n')
def t_DEFINE_newline(t):
t.type = 'PP_END_DEFINE'
t.lexer.begin("INITIAL")
t.lexer.lineno += 1
del t.lexer.macro_params
# Damage control in case the token immediately after the #define failed
# to handle this
t.lexer.next_is_define_name = False
return t
@TOKEN(r'(\#\#)|(\#)')
def t_DEFINE_pp_param_op(t):
if t.value=='#':
t.type = 'PP_STRINGIFY'
else:
t.type = 'PP_IDENTIFIER_PASTE'
return t
def t_INITIAL_error(t):
t.type = 'OTHER'
return t
def t_DEFINE_error(t):
t.type = 'OTHER'
t.value = t.value[0]
t.lexer.lexpos+=1 # Skip it if it's an error in a #define
return t
t_ANY_ignore = ' \t\v\f\r'
| 27.359862
| 79
| 0.502846
|
e773dd39ce91e41bae2b1b2a5885788456933304
| 13,500
|
py
|
Python
|
test/models/test_model_list_gp_regression.py
|
talesa/botorch
|
ab04dd39a2d4c7734e41c5f26eb2dbba5b0e1771
|
[
"MIT"
] | null | null | null |
test/models/test_model_list_gp_regression.py
|
talesa/botorch
|
ab04dd39a2d4c7734e41c5f26eb2dbba5b0e1771
|
[
"MIT"
] | null | null | null |
test/models/test_model_list_gp_regression.py
|
talesa/botorch
|
ab04dd39a2d4c7734e41c5f26eb2dbba5b0e1771
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import torch
from botorch.acquisition.objective import ScalarizedPosteriorTransform
from botorch.exceptions.errors import BotorchTensorDimensionError
from botorch.exceptions.warnings import OptimizationWarning
from botorch.fit import fit_gpytorch_model
from botorch.models import ModelListGP
from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP
from botorch.models.transforms import Standardize
from botorch.models.transforms.input import Normalize
from botorch.posteriors import GPyTorchPosterior
from botorch.utils.testing import _get_random_data, BotorchTestCase
from gpytorch.distributions import MultitaskMultivariateNormal, MultivariateNormal
from gpytorch.kernels import MaternKernel, ScaleKernel
from gpytorch.likelihoods import LikelihoodList
from gpytorch.means import ConstantMean
from gpytorch.mlls import SumMarginalLogLikelihood
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from gpytorch.priors import GammaPrior
def _get_model(fixed_noise=False, use_octf=False, use_intf=False, **tkwargs):
train_x1, train_y1 = _get_random_data(
batch_shape=torch.Size(), m=1, n=10, **tkwargs
)
train_x2, train_y2 = _get_random_data(
batch_shape=torch.Size(), m=1, n=11, **tkwargs
)
octfs = [Standardize(m=1), Standardize(m=1)] if use_octf else [None, None]
intfs = [Normalize(d=1), Normalize(d=1)] if use_intf else [None, None]
if fixed_noise:
train_y1_var = 0.1 + 0.1 * torch.rand_like(train_y1, **tkwargs)
train_y2_var = 0.1 + 0.1 * torch.rand_like(train_y2, **tkwargs)
model1 = FixedNoiseGP(
train_X=train_x1,
train_Y=train_y1,
train_Yvar=train_y1_var,
outcome_transform=octfs[0],
input_transform=intfs[0],
)
model2 = FixedNoiseGP(
train_X=train_x2,
train_Y=train_y2,
train_Yvar=train_y2_var,
outcome_transform=octfs[1],
input_transform=intfs[1],
)
else:
model1 = SingleTaskGP(
train_X=train_x1,
train_Y=train_y1,
outcome_transform=octfs[0],
input_transform=intfs[0],
)
model2 = SingleTaskGP(
train_X=train_x2,
train_Y=train_y2,
outcome_transform=octfs[1],
input_transform=intfs[1],
)
model = ModelListGP(model1, model2)
return model.to(**tkwargs)
class TestModelListGP(BotorchTestCase):
def test_ModelListGP(self):
for dtype, use_octf in itertools.product(
(torch.float, torch.double), (False, True)
):
tkwargs = {"device": self.device, "dtype": dtype}
model = _get_model(use_octf=use_octf, **tkwargs)
self.assertIsInstance(model, ModelListGP)
self.assertIsInstance(model.likelihood, LikelihoodList)
for m in model.models:
self.assertIsInstance(m.mean_module, ConstantMean)
self.assertIsInstance(m.covar_module, ScaleKernel)
matern_kernel = m.covar_module.base_kernel
self.assertIsInstance(matern_kernel, MaternKernel)
self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
if use_octf:
self.assertIsInstance(m.outcome_transform, Standardize)
# test constructing likelihood wrapper
mll = SumMarginalLogLikelihood(model.likelihood, model)
for mll_ in mll.mlls:
self.assertIsInstance(mll_, ExactMarginalLogLikelihood)
# test model fitting (sequential)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
mll = fit_gpytorch_model(mll, options={"maxiter": 1}, max_retries=1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
# test model fitting (joint)
mll = fit_gpytorch_model(
mll, options={"maxiter": 1}, max_retries=1, sequential=False
)
# test subset outputs
subset_model = model.subset_output([1])
self.assertIsInstance(subset_model, ModelListGP)
self.assertEqual(len(subset_model.models), 1)
sd_subset = subset_model.models[0].state_dict()
sd = model.models[1].state_dict()
self.assertTrue(set(sd_subset.keys()) == set(sd.keys()))
self.assertTrue(all(torch.equal(v, sd[k]) for k, v in sd_subset.items()))
# test posterior
test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
posterior = model.posterior(test_x)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)
if use_octf:
# ensure un-transformation is applied
submodel = model.models[0]
p0 = submodel.posterior(test_x)
tmp_tf = submodel.outcome_transform
del submodel.outcome_transform
p0_tf = submodel.posterior(test_x)
submodel.outcome_transform = tmp_tf
expected_var = tmp_tf.untransform_posterior(p0_tf).variance
self.assertTrue(torch.allclose(p0.variance, expected_var))
# test observation_noise
posterior = model.posterior(test_x, observation_noise=True)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)
# test output_indices
posterior = model.posterior(
test_x, output_indices=[0], observation_noise=True
)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertIsInstance(posterior.mvn, MultivariateNormal)
# test condition_on_observations
f_x = torch.rand(2, 1, **tkwargs)
f_y = torch.rand(2, 2, **tkwargs)
cm = model.condition_on_observations(f_x, f_y)
self.assertIsInstance(cm, ModelListGP)
# test condition_on_observations batched
f_x = torch.rand(3, 2, 1, **tkwargs)
f_y = torch.rand(3, 2, 2, **tkwargs)
cm = model.condition_on_observations(f_x, f_y)
self.assertIsInstance(cm, ModelListGP)
# test condition_on_observations batched (fast fantasies)
f_x = torch.rand(2, 1, **tkwargs)
f_y = torch.rand(3, 2, 2, **tkwargs)
cm = model.condition_on_observations(f_x, f_y)
self.assertIsInstance(cm, ModelListGP)
# test condition_on_observations (incorrect input shape error)
with self.assertRaises(BotorchTensorDimensionError):
model.condition_on_observations(f_x, torch.rand(3, 2, 3, **tkwargs))
# test posterior transform
X = torch.rand(3, 1, **tkwargs)
weights = torch.tensor([1, 2], **tkwargs)
post_tf = ScalarizedPosteriorTransform(weights=weights)
posterior_tf = model.posterior(X, posterior_transform=post_tf)
self.assertTrue(
torch.allclose(
posterior_tf.mean,
model.posterior(X).mean @ weights.unsqueeze(-1),
)
)
def test_ModelListGP_fixed_noise(self):
for dtype, use_octf in itertools.product(
(torch.float, torch.double), (False, True)
):
tkwargs = {"device": self.device, "dtype": dtype}
model = _get_model(fixed_noise=True, use_octf=use_octf, **tkwargs)
self.assertIsInstance(model, ModelListGP)
self.assertIsInstance(model.likelihood, LikelihoodList)
for m in model.models:
self.assertIsInstance(m.mean_module, ConstantMean)
self.assertIsInstance(m.covar_module, ScaleKernel)
matern_kernel = m.covar_module.base_kernel
self.assertIsInstance(matern_kernel, MaternKernel)
self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
# test model fitting
mll = SumMarginalLogLikelihood(model.likelihood, model)
for mll_ in mll.mlls:
self.assertIsInstance(mll_, ExactMarginalLogLikelihood)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
mll = fit_gpytorch_model(mll, options={"maxiter": 1}, max_retries=1)
# test posterior
test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
posterior = model.posterior(test_x)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertIsInstance(posterior.mvn, MultitaskMultivariateNormal)
if use_octf:
# ensure un-transformation is applied
submodel = model.models[0]
p0 = submodel.posterior(test_x)
tmp_tf = submodel.outcome_transform
del submodel.outcome_transform
p0_tf = submodel.posterior(test_x)
submodel.outcome_transform = tmp_tf
expected_var = tmp_tf.untransform_posterior(p0_tf).variance
self.assertTrue(torch.allclose(p0.variance, expected_var))
# test output_indices
posterior = model.posterior(
test_x, output_indices=[0], observation_noise=True
)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertIsInstance(posterior.mvn, MultivariateNormal)
# test condition_on_observations
f_x = torch.rand(2, 1, **tkwargs)
f_y = torch.rand(2, 2, **tkwargs)
noise = 0.1 + 0.1 * torch.rand_like(f_y)
cm = model.condition_on_observations(f_x, f_y, noise=noise)
self.assertIsInstance(cm, ModelListGP)
# test condition_on_observations batched
f_x = torch.rand(3, 2, 1, **tkwargs)
f_y = torch.rand(3, 2, 2, **tkwargs)
noise = 0.1 + 0.1 * torch.rand_like(f_y)
cm = model.condition_on_observations(f_x, f_y, noise=noise)
self.assertIsInstance(cm, ModelListGP)
# test condition_on_observations batched (fast fantasies)
f_x = torch.rand(2, 1, **tkwargs)
f_y = torch.rand(3, 2, 2, **tkwargs)
noise = 0.1 + 0.1 * torch.rand(2, 2, **tkwargs)
cm = model.condition_on_observations(f_x, f_y, noise=noise)
self.assertIsInstance(cm, ModelListGP)
# test condition_on_observations (incorrect input shape error)
with self.assertRaises(BotorchTensorDimensionError):
model.condition_on_observations(
f_x, torch.rand(3, 2, 3, **tkwargs), noise=noise
)
# test condition_on_observations (incorrect noise shape error)
f_y = torch.rand(2, 2, **tkwargs)
with self.assertRaises(BotorchTensorDimensionError):
model.condition_on_observations(
f_x, f_y, noise=torch.rand(2, 3, **tkwargs)
)
def test_ModelListGP_single(self):
tkwargs = {"device": self.device, "dtype": torch.float}
train_x1, train_y1 = _get_random_data(
batch_shape=torch.Size(), m=1, n=10, **tkwargs
)
model1 = SingleTaskGP(train_X=train_x1, train_Y=train_y1)
model = ModelListGP(model1)
model.to(**tkwargs)
test_x = torch.tensor([[0.25], [0.75]], **tkwargs)
posterior = model.posterior(test_x)
self.assertIsInstance(posterior, GPyTorchPosterior)
self.assertIsInstance(posterior.mvn, MultivariateNormal)
def test_transform_revert_train_inputs(self):
tkwargs = {"device": self.device, "dtype": torch.float}
model_list = _get_model(use_intf=True, **tkwargs)
org_inputs = [m.train_inputs[0] for m in model_list.models]
model_list.eval()
for i, m in enumerate(model_list.models):
self.assertTrue(
torch.allclose(
m.train_inputs[0],
m.input_transform.preprocess_transform(org_inputs[i]),
)
)
self.assertTrue(m._has_transformed_inputs)
self.assertTrue(torch.equal(m._original_train_inputs, org_inputs[i]))
model_list.train(mode=True)
for i, m in enumerate(model_list.models):
self.assertTrue(torch.equal(m.train_inputs[0], org_inputs[i]))
self.assertFalse(m._has_transformed_inputs)
model_list.train(mode=False)
for i, m in enumerate(model_list.models):
self.assertTrue(
torch.allclose(
m.train_inputs[0],
m.input_transform.preprocess_transform(org_inputs[i]),
)
)
self.assertTrue(m._has_transformed_inputs)
self.assertTrue(torch.equal(m._original_train_inputs, org_inputs[i]))
| 45.302013
| 85
| 0.625259
|
0f9b908cfe4b72a0ec925317c7359c3bc6b0bc88
| 3,364
|
py
|
Python
|
portality/blog.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | null | null | null |
portality/blog.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | null | null | null |
portality/blog.py
|
gaybro8777/doaj
|
27d9d98ce4f496ae52acbaba6ee8e42c84cf1a58
|
[
"Apache-2.0"
] | null | null | null |
from portality.core import app
import feedparser
from portality.dao import DomainObject as DomainObject
from copy import deepcopy
from datetime import datetime
class FeedError(Exception):
pass
class News(DomainObject):
__type__ = "news"
@classmethod
def by_remote_id(cls, remote_id):
q = NewsQuery(remote_id)
es_result = cls.query(q=q.query())
records = [News(**r.get("_source")) for r in es_result.get("hits", {}).get("hits", [])]
return records
@classmethod
def latest(cls, n):
q = NewsQuery(size=n)
es_result = cls.query(q=q.query())
records = [News(**r.get("_source")) for r in es_result.get("hits", {}).get("hits", [])]
return records
@property
def remote_id(self): return self.data.get("remote_id")
@remote_id.setter
def remote_id(self, rid): self.data["remote_id"] = rid
@property
def url(self): return self.data.get("url")
@url.setter
def url(self, link): self.data["url"] = link
@property
def title(self): return self.data.get("title")
@title.setter
def title(self, t): self.data["title"] = t
@property
def updated(self): return self.data.get("updated")
@updated.setter
def updated(self, date): self.data["updated"] = date
@property
def published(self): return self.data.get("published")
@published.setter
def published(self, date): self.data["published"] = date
@property
def summary(self): return self.data.get("summary")
@summary.setter
def summary(self, s): self.data["summary"] = s
def published_formatted(self, format="%-d %B %Y"):
try:
dt = datetime.strptime(self.published, "%Y-%m-%dT%H:%M:%SZ")
return dt.strftime(format)
except:
return self.published
class NewsQuery(object):
_remote_term = { "term" : { "remote_id.exact" : "<remote id>" } }
def __init__(self, remote_id=None, size=5):
self.remote_id = remote_id
self.size = size
def query(self):
q = {"query" : {}, "size" : self.size, "sort" : {"published" : {"order" : "desc"}}}
if self.remote_id is not None:
rt = deepcopy(self._remote_term)
rt["term"]["remote_id.exact"] = self.remote_id
q["query"].update(rt)
else:
q["query"]["match_all"] = {}
return q
def read_feed():
feed_url = app.config.get("BLOG_FEED_URL")
if feed_url is None:
raise FeedError("No BLOG_FEED_URL defined in settings")
f = feedparser.parse(feed_url)
if f.bozo > 0:
raise FeedError(f.bozo_exception)
for e in f.entries:
save_entry(e)
def save_entry(entry):
news = None
existing = News.by_remote_id(entry.id)
if len(existing) > 1:
raise FeedError("There is more than one object with this id in the index: " + entry.id)
elif len(existing) == 1:
news = existing[0]
else:
news = News()
alts = [l.get("href") for l in entry.links if l.get("rel") == "alternate"]
if len(alts) == 0:
raise FeedError("Unable to get url of post from link@rel=alternate")
news.remote_id = entry.id
news.url = alts[0]
news.title = entry.title
news.updated = entry.updated
news.summary = entry.summary
news.published = entry.published
news.save()
| 29.252174
| 95
| 0.611772
|
73732cf9a7098402f3caf0cf3b6697f868149a08
| 2,319
|
py
|
Python
|
osplugin/config.py
|
mkr1481/mecm-applcm
|
87538ac4aa5d5607597d3bf43b0ac0f675cc292b
|
[
"Apache-2.0"
] | null | null | null |
osplugin/config.py
|
mkr1481/mecm-applcm
|
87538ac4aa5d5607597d3bf43b0ac0f675cc292b
|
[
"Apache-2.0"
] | null | null | null |
osplugin/config.py
|
mkr1481/mecm-applcm
|
87538ac4aa5d5607597d3bf43b0ac0f675cc292b
|
[
"Apache-2.0"
] | null | null | null |
"""
# Copyright 2021 21CN Corporation Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# -*- coding: utf-8 -*-
import os
ssl_enabled = os.getenv('ENABLE_SSL', 'true') != 'false'
listen_ip = os.getenv('LISTEN_IP', '[::]')
base_dir = os.getenv('BASE_DIR', '/usr/app')
log_dir = os.getenv("LOG_DIR", base_dir + '/log')
private_key_certificate_chain_pairs = (
base_dir + '/ssl/server_tls.key',
base_dir + '/ssl/server_tls.crt',
)
root_certificates = base_dir + '/ssl/ca.crt'
_JWT_PUBLIC_KEY_DEF = '-----BEGIN PUBLIC KEY-----\n' \
'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmesVPVWJmsRIzitiu6rs\n' \
'bbIfBbt3t97qiJ4yQH1bCHpYu+ab+Xs5heSnfFjHH8nZDAR0n2zvliztIvTDwl/2\n' \
'NF9+/loFvmQMrSv1dQQCOBc5qZ5rw/0o7Cq3buXHHJ7CwP0NnreK4N1sZ4oLBTQQ\n' \
'e4ERkXhiBNVxAmnbgl7QuhemMV0gxPABSLLKGIrzYR7n8OFDCuSAyOcaoyxJihA/\n' \
'4Tkh+Vs82tWlFglV7UxtU2+3e5sN9u/TJ5J3qRZnYq/NWymix9RRD53vp1RGUMCg\n' \
'kT40wK5Ak9qdVkr82JTR1g7AtXm9SxlgMNr0rD35WSacioFwECWun+VPL4FyzZ30\n' \
'BwIDAQAB\n'\
'-----END PUBLIC KEY-----'
jwt_public_key = os.getenv('JWT_PUBLIC_KEY', _JWT_PUBLIC_KEY_DEF)
db_user = os.getenv('DB_USER', 'osplugin')
db_password = os.getenv('DB_PASSWORD', '')
db_host = os.getenv('DB_HOST', 'mepm-postgres')
db_port = int(os.getenv('DB_PORT', '5432'))
db_name = os.getenv('DB_NAME', 'osplugindb')
# default chunk_size 2M
_DEFAULT_IMAGE_CHUNK_SIZE = 1021 * 1024 * 2
chunk_size = int(os.getenv("IMAGE_CHUNK_SIZE", str(_DEFAULT_IMAGE_CHUNK_SIZE)))
_SERVER_CA_VERIFY = os.getenv('SERVER_CA_VERIFY_DIR', 'false')
if _SERVER_CA_VERIFY == 'false':
_SERVER_CA_VERIFY = False
elif _SERVER_CA_VERIFY == 'true':
_SERVER_CA_VERIFY = True
server_ca_verify = _SERVER_CA_VERIFY
| 38.016393
| 87
| 0.710651
|
04c3908a2fb7ca0310b228c1394661d40d9c5143
| 6,681
|
py
|
Python
|
src/driving_node/src/deeplearning_driving_node.py
|
mommy79/AuDi-GIT-turtlebot3_autorace
|
fd1382246f1ee74ee70857006563184d672a6666
|
[
"Apache-2.0"
] | 1
|
2021-06-13T06:20:15.000Z
|
2021-06-13T06:20:15.000Z
|
src/driving_node/src/deeplearning_driving_node.py
|
taening/AuDi-GIT-turtlebot3_autorace
|
fd1382246f1ee74ee70857006563184d672a6666
|
[
"Apache-2.0"
] | null | null | null |
src/driving_node/src/deeplearning_driving_node.py
|
taening/AuDi-GIT-turtlebot3_autorace
|
fd1382246f1ee74ee70857006563184d672a6666
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from std_msgs.msg import Float32, UInt8
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge
import tensorflow as tf
import numpy as np
import enum
import rospy
import cv2
import threading
import time
# Hyper Prarameters
training_epochs = 10
batch_size = 100
learning_rate = 0.0001
img_height = 64
img_width = 120
img_channel = 3
total_data = 50000
class DeeplearningDrivingNode:
def __init__(self, sess, name):
self.cvBridge = CvBridge()
self.sess = sess
self.name = name
self._build_net()
self.driving_mode_step = enum.Enum('step_of_driving_mode', 'manual_mode lane_mode right_lane_mode left_lane_mode intensity_lane_mode deeplearning_lane_mode tunnel_mode spectate_mode')
self.driving_mode = self.driving_mode_step.manual_mode.value
self.sub_driving_mode = rospy.Subscriber('/mission/mod/driving', UInt8, self.cb_driving_mode, queue_size=1)
self.sub_img_rev = rospy.Subscriber('/controller/image/driving', CompressedImage, self.cb_image_receive, queue_size=1)
self.pub_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
rospy.on_shutdown(self.fn_stop)
def fn_driving_deeplearning(self, linear_vel, angular_vel):
twist_msg = Twist()
twist_msg.linear.x = linear_vel
twist_msg.angular.z = angular_vel
self.pub_vel.publish(twist_msg)
def cb_image_receive(self, msg):
np_arr = np.fromstring(msg.data, np.uint8)
img_ori = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
if self.driving_mode == self.driving_mode_step.deeplearning_lane_mode.value:
driving_time_pre = time.time()
process_time_pre = time.time()
driving_time_now = time.time()
rospy.loginfo('dt: ' + str(driving_time_now - driving_time_pre))
driving_time_pre = driving_time_now
try:
src = img_ori
src = src[240:480, 0:640]
re_frame = cv2.resize(src, (120, 64), interpolation=cv2.INTER_AREA)
re_frame = [re_frame]
result = self.predict(re_frame)
rospy.loginfo(result)
linear_vel = 0.18
angular_vel = result[0][0] * 1.7
self.fn_driving_deeplearning(linear_vel, angular_vel)
except Exception as e:
rospy.logerr("Fail : " + str(e))
process_time_now = time.time()
rospy.loginfo('process time: ' + str(process_time_now - process_time_pre))
def cb_driving_mode(self, msg):
self.driving_mode = msg.data
def fn_stop(self):
rospy.loginfo("[Process End] Shut down")
rospy.sleep(0.3)
twist_msg = Twist()
twist_msg.linear.x = 0.0
twist_msg.angular.z = 0.0
self.pub_vel.publish(twist_msg)
def _build_net(self):
# 입력 받은 이름으로 변수 명을 설정한다.
with tf.variable_scope(self.name):
# Boolean Tensor 생성 for dropout
# tf.layers.dropout( training= True/Fals) True/False에 따라서 학습인지 / 예측인지 선택하게 됨
# default = False
self.training = tf.placeholder(tf.bool)
# 입력 그래프 생성
self.X = tf.placeholder(tf.float32, [None, img_height, img_width, img_channel], name='X_im')
self.Y = tf.placeholder(tf.float32, [None, 1])
# Convolutional Layer1
conv1 = tf.layers.conv2d(inputs=self.X, filters=32, kernel_size=[3, 3], padding='SAME',
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, padding="SAME")
# Convolutional Layer2
conv2 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[3, 3], padding='SAME',
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2, padding='SAME')
# Convolutional Layer3
conv3 = tf.layers.conv2d(inputs=pool2, filters=128, kernel_size=[5, 5], padding='SAME',
activation=tf.nn.relu)
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2, padding='SAME')
# Convolutional Layer4
conv4 = tf.layers.conv2d(inputs=pool3, filters=128, kernel_size=[3, 3], padding='SAME',
activation=tf.nn.relu)
# Dropout Layer
# Dense Layer4 with Relu
flat = tf.reshape(conv4, [-1, 128 * 15 * 8])
dropout1 = tf.layers.dropout(inputs=flat, rate=0.2, training=self.training)
dense4 = tf.layers.dense(inputs=dropout1, units=128, activation=tf.nn.relu)
dropout2 = tf.layers.dropout(inputs=dense4, rate=0.5, training=self.training)
dense5 = tf.layers.dense(inputs=dropout2, units=128, activation=tf.nn.relu)
dropout3 = tf.layers.dropout(inputs=dense5, rate=0.5, training=self.training)
dense6 = tf.layers.dense(inputs=dropout3, units=64, activation=tf.nn.relu)
self.logits = tf.layers.dense(inputs=dense6, units=1, name='logits')
#self.softmax = tf.nn.softmax(self.logits, axis=None, name=None, dim=None)
# Cost Function
self.cost = tf.reduce_mean(tf.square(self.logits-self.Y))
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
def train(self, x_data, y_data, training=False):
return self.sess.run([self.cost, self.optimizer],
feed_dict={self.X: x_data, self.Y: y_data, self.training: training})
def predict(self, x_test, training=False):
return self.sess.run(self.logits, feed_dict={self.X: x_test, self.training: training})
def restore(self, mode):
# save_file = './dcgan_model/DCGAN_cnn.ckpt'
if mode == 'test':
save_file = '/home/nvidia/Auto-Mobile-Robot/deeplearning/driving_model/auto.ckpt'
saver = tf.train.Saver()
saver.restore(self.sess, save_file)
@staticmethod
def main():
rospy.spin()
if __name__ == "__main__":
rospy.init_node('Deeplearning_Driving_Node')
print('\n' + 'Learning started...' + '\n')
sess = tf.Session()
m = DeeplearningDrivingNode(sess, "model")
m.restore('test')
m.main()
# ret, frame = src.read()
# re_frame =cv2.resize(frame,(120,64),interpolation=cv2.INTER_AREA)
# cv2.imshow('d', re_frame)
# re_frame = [re_frame]
#res = m.predict(re_frame)
| 38.177143
| 191
| 0.624457
|
0d08d871cdab4cdc8b0b7bd3d39ec380b184b88d
| 804
|
py
|
Python
|
python-scrips/PDF extraction-elsie/main.py
|
emetikos/chatbot
|
b28378661afda009e8b5e93d856ada6f34ef151a
|
[
"MIT"
] | null | null | null |
python-scrips/PDF extraction-elsie/main.py
|
emetikos/chatbot
|
b28378661afda009e8b5e93d856ada6f34ef151a
|
[
"MIT"
] | null | null | null |
python-scrips/PDF extraction-elsie/main.py
|
emetikos/chatbot
|
b28378661afda009e8b5e93d856ada6f34ef151a
|
[
"MIT"
] | null | null | null |
import PyPDF2
# open the pdf file
pdfFileObj = open("Hello.pdf", "rb")
# creating an object reader to read file
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
# print out no. of pages in the file
print("Number of Pages: ",pdfReader.numPages)
pageObj = pdfReader.getPage(0)
# extract all the text from file/doc
print(pageObj.extractText())
pdfFileObj.close()
# for loop to extract each page from the file/doc
p = 0
while p < pdfReader.getNumPages():
pageinfo = pdfReader.getPage(p)
print(pageinfo.extractText())
p = p+1
# search_keywords=['chatbot', 'testing', 'open']
# # ['project', 'group', 'interest', 'report', 'submitted']
#
# for sentence in sentences :
# lst = []
# for word in search_keywords: if word in sentence:
# lst.append(word);
| 22.971429
| 63
| 0.665423
|
502acc91902f829c346d2af504a0cd0526fc4629
| 5,822
|
py
|
Python
|
rldb/db/paper__acktr/algo__acktr/entries.py
|
seungjaeryanlee/sotarl
|
8c471c4666d6210c68f3cb468e439a2b168c785d
|
[
"MIT"
] | 45
|
2019-05-13T17:39:33.000Z
|
2022-03-07T23:44:13.000Z
|
rldb/db/paper__acktr/algo__acktr/entries.py
|
seungjaeryanlee/sotarl
|
8c471c4666d6210c68f3cb468e439a2b168c785d
|
[
"MIT"
] | 2
|
2019-03-29T01:41:59.000Z
|
2019-07-02T02:48:31.000Z
|
rldb/db/paper__acktr/algo__acktr/entries.py
|
seungjaeryanlee/sotarl
|
8c471c4666d6210c68f3cb468e439a2b168c785d
|
[
"MIT"
] | 2
|
2020-04-07T20:57:30.000Z
|
2020-07-08T12:55:15.000Z
|
atari_entries = [
{
'env-title': 'atari-alien',
'env-variant': 'No-op start',
'score': 3197.1,
},
{
'env-title': 'atari-amidar',
'env-variant': 'No-op start',
'score': 1059.4,
},
{
'env-title': 'atari-assault',
'env-variant': 'No-op start',
'score': 10777.7,
},
{
'env-title': 'atari-asterix',
'env-variant': 'No-op start',
'score': 31583.0,
},
{
'env-title': 'atari-asteroids',
'env-variant': 'No-op start',
'score': 34171.6,
},
{
'env-title': 'atari-atlantis',
'env-variant': 'No-op start',
'score': 3433182.0,
},
{
'env-title': 'atari-bank-heist',
'env-variant': 'No-op start',
'score': 1289.7,
},
{
'env-title': 'atari-battle-zone',
'env-variant': 'No-op start',
'score': 8910.0,
},
{
'env-title': 'atari-beam-rider',
'env-variant': 'No-op start',
'score': 13581.4,
},
{
'env-title': 'atari-berzerk',
'env-variant': 'No-op start',
'score': 927.2,
},
{
'env-title': 'atari-bowling',
'env-variant': 'No-op start',
'score': 24.3,
},
{
'env-title': 'atari-boxing',
'env-variant': 'No-op start',
'score': 1.45,
},
{
'env-title': 'atari-breakout',
'env-variant': 'No-op start',
'score': 735.7,
},
{
'env-title': 'atari-centipede',
'env-variant': 'No-op start',
'score': 7125.28,
},
{
'env-title': 'atari-crazy-climber',
'env-variant': 'No-op start',
'score': 150444.0
},
{
'env-title': 'atari-demon-attack',
'env-variant': 'No-op start',
'score': 274176.7
},
{
'env-title': 'atari-double-dunk',
'env-variant': 'No-op start',
'score': -0.54,
},
{
'env-title': 'atari-enduro',
'env-variant': 'No-op start',
'score': 0.0,
},
{
'env-title': 'atari-fishing-derby',
'env-variant': 'No-op start',
'score': 33.73,
},
{
'env-title': 'atari-freeway',
'env-variant': 'No-op start',
'score': 0.0,
},
{
'env-title': 'atari-gopher',
'env-variant': 'No-op start',
'score': 47730.8,
},
{
'env-title': 'atari-ice-hockey',
'env-variant': 'No-op start',
'score': -4.2,
},
{
'env-title': 'atari-jamesbond',
'env-variant': 'No-op start',
'score': 490.0,
},
{
'env-title': 'atari-kangaroo',
'env-variant': 'No-op start',
'score': 3150.0,
},
{
'env-title': 'atari-krull',
'env-variant': 'No-op start',
'score': 9686.9,
},
{
'env-title': 'atari-kung-fu-master',
'env-variant': 'No-op start',
'score': 34954.0,
},
{
'env-title': 'atari-phoenix',
'env-variant': 'No-op start',
'score': 133433.7,
},
{
'env-title': 'atari-pitfall',
'env-variant': 'No-op start',
'score': -1.1,
},
{
'env-title': 'atari-pong',
'env-variant': 'No-op start',
'score': 20.9,
},
{
'env-title': 'atari-qbert',
'env-variant': 'No-op start',
'score': 23151.5,
},
{
'env-title': 'atari-riverraid',
'env-variant': 'No-op start',
'score': 17762.8,
},
{
'env-title': 'atari-road-runner',
'env-variant': 'No-op start',
'score': 53446.0,
},
{
'env-title': 'atari-robotank',
'env-variant': 'No-op start',
'score': 16.5,
},
{
'env-title': 'atari-seaquest',
'env-variant': 'No-op start',
'score': 1776.0,
},
{
'env-title': 'atari-solaris',
'env-variant': 'No-op start',
'score': 2368.6,
},
{
'env-title': 'atari-space-invaders',
'env-variant': 'No-op start',
'score': 19723.0,
},
{
'env-title': 'atari-star-gunner',
'env-variant': 'No-op start',
'score': 82920.0,
},
{
'env-title': 'atari-time-pilot',
'env-variant': 'No-op start',
'score': 22286.0,
},
{
'env-title': 'atari-tutankham',
'env-variant': 'No-op start',
'score': 314.3,
},
{
'env-title': 'atari-up-n-down',
'env-variant': 'No-op start',
'score': 436665.8,
},
{
'env-title': 'atari-video-pinball',
'env-variant': 'No-op start',
'score': 100496.6,
},
{
'env-title': 'atari-wizard-of-wor',
'env-variant': 'No-op start',
'score': 702.0,
},
{
'env-title': 'atari-yars-revenge',
'env-variant': 'No-op start',
'score': 125169.0,
},
{
'env-title': 'atari-zaxxon',
'env-variant': 'No-op start',
'score': 17448.0,
},
]
mujoco_entries = [
{
'env-title': 'mujoco-ant',
'score': 4621.6,
},
{
'env-title': 'mujoco-half-cheetah',
'score': 5586.3,
},
{
'env-title': 'mujoco-hopper',
'score': 3915.9,
},
{
'env-title': 'mujoco-inverted-pendulum',
'score': 1000.0,
},
{
'env-title': 'mujoco-inverted-double-pendulum',
'score': 9356.0,
},
{
'env-title': 'mujoco-reacher',
'score': -1.5,
},
{
'env-title': 'mujoco-swimmer',
'score': 138.0,
},
{
'env-title': 'mujoco-walker2d',
'score': 6198.8,
},
]
entries = atari_entries + mujoco_entries
| 22.392308
| 55
| 0.437135
|
f1d07b906971c12d57649f79c1464352dac2a01c
| 2,287
|
py
|
Python
|
tests/unit/test_diffusion2d_functions.py
|
LarissaBrencher/testing-python-exercise
|
84787d0ee8905addd6c738e737532465780c0733
|
[
"CC-BY-4.0"
] | null | null | null |
tests/unit/test_diffusion2d_functions.py
|
LarissaBrencher/testing-python-exercise
|
84787d0ee8905addd6c738e737532465780c0733
|
[
"CC-BY-4.0"
] | null | null | null |
tests/unit/test_diffusion2d_functions.py
|
LarissaBrencher/testing-python-exercise
|
84787d0ee8905addd6c738e737532465780c0733
|
[
"CC-BY-4.0"
] | null | null | null |
"""
Tests for functions in class SolveDiffusion2D
"""
from diffusion2d import SolveDiffusion2D
import pytest
import numpy as np
from unittest import TestCase
class TestDiffusion2D(TestCase):
def setUp(self):
# fixture
self.solver = SolveDiffusion2D()
def test_initialize_domain(self):
"""
Check function SolveDiffusion2D.initialize_domain
"""
w = 1.
h = 2.
dx = 0.2
dy = 0.5
# expected result
expected_nx = 5
expected_ny = 4
# actual result
self.solver.initialize_domain(w, h, dx, dy)
# test
self.assertEqual(self.solver.nx, expected_nx)
self.assertEqual(self.solver.ny, expected_ny)
def test_initialize_physical_parameters(self):
"""
Checks function SolveDiffusion2D.initialize_domain
"""
# fixture
d = 2.
T_cold = 350.
T_hot = 420.
self.solver.dx = 1.
self.solver.dy = 1.
# expected result
expected_dt = 0.125
# actual result
self.solver.initialize_physical_parameters(d, T_cold, T_hot)
# test
self.assertAlmostEqual(expected_dt, self.solver.dt)
self.assertAlmostEqual(T_cold, self.solver.T_cold)
self.assertAlmostEqual(T_hot, self.solver.T_hot)
def test_set_initial_condition(self):
"""
Checks function SolveDiffusion2D.get_initial_function
"""
# fixture
self.solver.nx = 2
self.solver.ny = 2
self.solver.dx = 4.5
self.solver.dy = 5.5
self.solver.T_cold = 200.
self.solver.T_hot = 420.
# expected result
# expected_u = np.array([[200., 200.], [200., 420.]])
expected_u = self.solver.T_cold * np.ones((self.solver.nx,self.solver.ny))
expected_u[1,1] = self.solver.T_hot
# actual result
actual_u = self.solver.set_initial_condition()
# expected_u_approx = pytest.approx(expected_u, abs=0.01)
# test
# self.assertAlmostEqual(expected_u_approx, actual_u)
for idx_x in range(self.solver.nx):
for idx_y in range(self.solver.ny):
self.assertEqual(expected_u[idx_x, idx_y], actual_u[idx_x, idx_y])
| 30.493333
| 82
| 0.602973
|
72564a7635f7a6cf2d4863f61957b787f1bfc3e3
| 2,751
|
py
|
Python
|
tensorpack/utils/timer.py
|
ChriPo92/tensorpack
|
45d2155850d3870bbf110c94c73508c707e1ae42
|
[
"Apache-2.0"
] | 121
|
2019-06-04T08:30:53.000Z
|
2021-12-17T13:27:54.000Z
|
tensorpack/utils/timer.py
|
lkn123/tensorpack
|
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
|
[
"Apache-2.0"
] | 1
|
2019-11-21T04:29:09.000Z
|
2019-11-21T04:29:09.000Z
|
tensorpack/utils/timer.py
|
lkn123/tensorpack
|
d7a13cb74c9066bc791d7aafc3b744b60ee79a9f
|
[
"Apache-2.0"
] | 22
|
2019-10-10T15:35:47.000Z
|
2021-09-13T12:46:09.000Z
|
# -*- coding: utf-8 -*-
# File: timer.py
import atexit
from collections import defaultdict
from contextlib import contextmanager
from time import time as timer
import six
from . import logger
from .stats import StatCounter
if six.PY3:
from time import perf_counter as timer # noqa
__all__ = ['total_timer', 'timed_operation',
'print_total_timer', 'IterSpeedCounter']
@contextmanager
def timed_operation(msg, log_start=False):
"""
Surround a context with a timer.
Args:
msg(str): the log to print.
log_start(bool): whether to print also at the beginning.
Example:
.. code-block:: python
with timed_operation('Good Stuff'):
time.sleep(1)
Will print:
.. code-block:: python
Good stuff finished, time:1sec.
"""
assert len(msg)
if log_start:
logger.info('Start {} ...'.format(msg))
start = timer()
yield
msg = msg[0].upper() + msg[1:]
logger.info('{} finished, time:{:.4f} sec.'.format(
msg, timer() - start))
_TOTAL_TIMER_DATA = defaultdict(StatCounter)
@contextmanager
def total_timer(msg):
""" A context which add the time spent inside to TotalTimer. """
start = timer()
yield
t = timer() - start
_TOTAL_TIMER_DATA[msg].feed(t)
def print_total_timer():
"""
Print the content of the TotalTimer, if it's not empty. This function will automatically get
called when program exits.
"""
if len(_TOTAL_TIMER_DATA) == 0:
return
for k, v in six.iteritems(_TOTAL_TIMER_DATA):
logger.info("Total Time: {} -> {:.2f} sec, {} times, {:.3g} sec/time".format(
k, v.sum, v.count, v.average))
atexit.register(print_total_timer)
class IterSpeedCounter(object):
""" Test how often some code gets reached.
Example:
Print the speed of the iteration every 100 times.
.. code-block:: python
speed = IterSpeedCounter(100)
for k in range(1000):
# do something
speed()
"""
def __init__(self, print_every, name=None):
"""
Args:
print_every(int): interval to print.
name(str): name to used when print.
"""
self.cnt = 0
self.print_every = int(print_every)
self.name = name if name else 'IterSpeed'
def reset(self):
self.start = timer()
def __call__(self):
if self.cnt == 0:
self.reset()
self.cnt += 1
if self.cnt % self.print_every != 0:
return
t = timer() - self.start
logger.info("{}: {:.2f} sec, {} times, {:.3g} sec/time".format(
self.name, t, self.cnt, t / self.cnt))
| 23.715517
| 96
| 0.586696
|
1748197936ae4cc554cc49a1cbd241171b134478
| 55,316
|
py
|
Python
|
tests/unit/utils/test_jinja.py
|
ContextLogic/salt
|
f98839c72df2294cdd1670835d10904b12089622
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/utils/test_jinja.py
|
ContextLogic/salt
|
f98839c72df2294cdd1670835d10904b12089622
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/utils/test_jinja.py
|
ContextLogic/salt
|
f98839c72df2294cdd1670835d10904b12089622
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Tests for salt.utils.jinja
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
from jinja2 import Environment, DictLoader, exceptions
import ast
import copy
import datetime
import os
import pprint
import re
import tempfile
# Import Salt Testing libs
from tests.support.unit import skipIf, TestCase
from tests.support.case import ModuleCase
from tests.support.helpers import flaky
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch, MagicMock, Mock
from tests.support.paths import BASE_FILES, TMP, TMP_CONF_DIR
# Import Salt libs
import salt.config
import salt.loader
from salt.exceptions import SaltRenderError
from salt.ext import six
from salt.ext.six.moves import builtins
import salt.utils.json
from salt.utils.decorators.jinja import JinjaFilter
from salt.utils.jinja import (
SaltCacheLoader,
SerializerExtension,
ensure_sequence_filter,
tojson
)
from salt.utils.odict import OrderedDict
from salt.utils.templates import JINJA, render_jinja_tmpl
# dateutils is needed so that the strftime jinja filter is loaded
import salt.utils.dateutils # pylint: disable=unused-import
import salt.utils.files
import salt.utils.stringutils
import salt.utils.yaml
# Import 3rd party libs
try:
import timelib # pylint: disable=W0611
HAS_TIMELIB = True
except ImportError:
HAS_TIMELIB = False
CACHEDIR = os.path.join(TMP, 'jinja-template-cache')
BLINESEP = salt.utils.stringutils.to_bytes(os.linesep)
class JinjaTestCase(TestCase):
def test_tojson(self):
'''
Test the tojson filter for those using Jinja < 2.9. Non-ascii unicode
content should be dumped with ensure_ascii=True.
'''
data = {'Non-ascii words': ['süß', 'спам', 'яйца']}
result = tojson(data)
expected = '{"Non-ascii words": ["s\\u00fc\\u00df", "\\u0441\\u043f\\u0430\\u043c", "\\u044f\\u0439\\u0446\\u0430"]}'
assert result == expected, result
class MockFileClient(object):
'''
Does not download files but records any file request for testing
'''
def __init__(self, loader=None):
if loader:
loader._file_client = self
self.requests = []
def get_file(self, template, dest='', makedirs=False, saltenv='base'):
self.requests.append({
'path': template,
'dest': dest,
'makedirs': makedirs,
'saltenv': saltenv
})
def _setup_test_dir(src_dir, test_dir):
os.makedirs(test_dir)
salt.utils.files.recursive_copy(src_dir, test_dir)
filename = os.path.join(test_dir, 'non_ascii')
with salt.utils.files.fopen(filename, 'wb') as fp:
fp.write(b'Assun\xc3\xa7\xc3\xa3o' + BLINESEP)
filename = os.path.join(test_dir, 'hello_simple')
with salt.utils.files.fopen(filename, 'wb') as fp:
fp.write(b'world' + BLINESEP)
filename = os.path.join(test_dir, 'hello_import')
lines = [
r"{% from 'macro' import mymacro -%}",
r"{% from 'macro' import mymacro -%}",
r"{{ mymacro('Hey') ~ mymacro(a|default('a'), b|default('b')) }}",
]
with salt.utils.files.fopen(filename, 'wb') as fp:
for line in lines:
fp.write(line.encode('utf-8') + BLINESEP)
class TestSaltCacheLoader(TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.template_dir = os.path.join(self.tempdir, 'files', 'test')
_setup_test_dir(
os.path.join(BASE_FILES, 'templates'),
self.template_dir
)
self.opts = {
'cachedir': self.tempdir,
'file_roots': {
'test': [self.template_dir]
},
'pillar_roots': {
'test': [self.template_dir]
}
}
super(TestSaltCacheLoader, self).setUp()
def tearDown(self):
salt.utils.files.rm_rf(self.tempdir)
def test_searchpath(self):
'''
The searchpath is based on the cachedir option and the saltenv parameter
'''
tmp = tempfile.gettempdir()
opts = copy.deepcopy(self.opts)
opts.update({'cachedir': tmp})
loader = self.get_loader(opts=opts, saltenv='test')
assert loader.searchpath == [os.path.join(tmp, 'files', 'test')]
def test_mockclient(self):
'''
A MockFileClient is used that records all file requests normally sent
to the master.
'''
loader = self.get_loader(opts=self.opts, saltenv='test')
res = loader.get_source(None, 'hello_simple')
assert len(res) == 3
# res[0] on Windows is unicode and use os.linesep so it works cross OS
self.assertEqual(six.text_type(res[0]), 'world' + os.linesep)
tmpl_dir = os.path.join(self.template_dir, 'hello_simple')
self.assertEqual(res[1], tmpl_dir)
assert res[2](), 'Template up to date?'
assert len(loader._file_client.requests)
self.assertEqual(loader._file_client.requests[0]['path'], 'salt://hello_simple')
def get_loader(self, opts=None, saltenv='base'):
'''
Now that we instantiate the client in the __init__, we need to mock it
'''
if opts is None:
opts = self.opts
with patch.object(SaltCacheLoader, 'file_client', Mock()):
loader = SaltCacheLoader(opts, saltenv)
# Create a mock file client and attach it to the loader
MockFileClient(loader)
return loader
def get_test_saltenv(self):
'''
Setup a simple jinja test environment
'''
loader = self.get_loader(saltenv='test')
jinja = Environment(loader=loader)
return loader._file_client, jinja
def test_import(self):
'''
You can import and use macros from other files
'''
fc, jinja = self.get_test_saltenv()
result = jinja.get_template('hello_import').render()
self.assertEqual(result, 'Hey world !a b !')
assert len(fc.requests) == 2
self.assertEqual(fc.requests[0]['path'], 'salt://hello_import')
self.assertEqual(fc.requests[1]['path'], 'salt://macro')
def test_relative_import(self):
'''
You can import using relative paths
issue-13889
'''
fc, jinja = self.get_test_saltenv()
tmpl = jinja.get_template('relative/rhello')
result = tmpl.render()
self.assertEqual(result, 'Hey world !a b !')
assert len(fc.requests) == 3
self.assertEqual(fc.requests[0]['path'], 'salt://relative/rhello')
self.assertEqual(fc.requests[1]['path'], 'salt://relative/rmacro')
self.assertEqual(fc.requests[2]['path'], 'salt://macro')
# This must fail when rendered: attempts to import from outside file root
template = jinja.get_template('relative/rescape')
self.assertRaises(exceptions.TemplateNotFound, template.render)
def test_include(self):
'''
You can also include a template that imports and uses macros
'''
fc, jinja = self.get_test_saltenv()
result = jinja.get_template('hello_include').render()
self.assertEqual(result, 'Hey world !a b !')
assert len(fc.requests) == 3
self.assertEqual(fc.requests[0]['path'], 'salt://hello_include')
self.assertEqual(fc.requests[1]['path'], 'salt://hello_import')
self.assertEqual(fc.requests[2]['path'], 'salt://macro')
def test_include_context(self):
'''
Context variables are passes to the included template by default.
'''
_, jinja = self.get_test_saltenv()
result = jinja.get_template('hello_include').render(a='Hi', b='Salt')
self.assertEqual(result, 'Hey world !Hi Salt !')
class TestGetTemplate(TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.template_dir = os.path.join(self.tempdir, 'files', 'test')
_setup_test_dir(
os.path.join(BASE_FILES, 'templates'),
self.template_dir
)
self.local_opts = {
'cachedir': self.tempdir,
'file_client': 'local',
'file_ignore_regex': None,
'file_ignore_glob': None,
'file_roots': {
'test': [self.template_dir]
},
'pillar_roots': {
'test': [self.template_dir]
},
'fileserver_backend': ['roots'],
'hash_type': 'md5',
'extension_modules': os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'extmods'),
}
self.local_salt = {}
super(TestGetTemplate, self).setUp()
def tearDown(self):
salt.utils.files.rm_rf(self.tempdir)
def test_fallback(self):
'''
A Template with a filesystem loader is returned as fallback
if the file is not contained in the searchpath
'''
fn_ = os.path.join(self.template_dir, 'hello_simple')
with salt.utils.files.fopen(fn_) as fp_:
out = render_jinja_tmpl(
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt)
)
self.assertEqual(out, 'world' + os.linesep)
def test_fallback_noloader(self):
'''
A Template with a filesystem loader is returned as fallback
if the file is not contained in the searchpath
'''
filename = os.path.join(self.template_dir, 'hello_import')
with salt.utils.files.fopen(filename) as fp_:
out = render_jinja_tmpl(
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt)
)
self.assertEqual(out, 'Hey world !a b !' + os.linesep)
def test_saltenv(self):
'''
If the template is within the searchpath it can
import, include and extend other templates.
The initial template is expected to be already cached
get_template does not request it from the master again.
'''
fc = MockFileClient()
with patch.object(SaltCacheLoader, 'file_client', MagicMock(return_value=fc)):
filename = os.path.join(self.template_dir, 'hello_import')
with salt.utils.files.fopen(filename) as fp_:
out = render_jinja_tmpl(
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts={'cachedir': self.tempdir, 'file_client': 'remote',
'file_roots': self.local_opts['file_roots'],
'pillar_roots': self.local_opts['pillar_roots']},
a='Hi', b='Salt', saltenv='test', salt=self.local_salt))
self.assertEqual(out, 'Hey world !Hi Salt !' + os.linesep)
self.assertEqual(fc.requests[0]['path'], 'salt://macro')
def test_macro_additional_log_for_generalexc(self):
'''
If we failed in a macro because of e.g. a TypeError, get
more output from trace.
'''
expected = r'''Jinja error:.*division.*
.*macrogeneral\(2\):
---
\{% macro mymacro\(\) -%\}
\{\{ 1/0 \}\} <======================
\{%- endmacro %\}
---.*'''
filename = os.path.join(self.template_dir, 'hello_import_generalerror')
fc = MockFileClient()
with patch.object(SaltCacheLoader, 'file_client', MagicMock(return_value=fc)):
with salt.utils.files.fopen(filename) as fp_:
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
def test_macro_additional_log_for_undefined(self):
'''
If we failed in a macro because of undefined variables, get
more output from trace.
'''
expected = r'''Jinja variable 'b' is undefined
.*macroundefined\(2\):
---
\{% macro mymacro\(\) -%\}
\{\{b.greetee\}\} <-- error is here <======================
\{%- endmacro %\}
---'''
filename = os.path.join(self.template_dir, 'hello_import_undefined')
fc = MockFileClient()
with patch.object(SaltCacheLoader, 'file_client', MagicMock(return_value=fc)):
with salt.utils.files.fopen(filename) as fp_:
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
def test_macro_additional_log_syntaxerror(self):
'''
If we failed in a macro, get more output from trace.
'''
expected = r'''Jinja syntax error: expected token .*end.*got '-'.*
.*macroerror\(2\):
---
# macro
\{% macro mymacro\(greeting, greetee='world'\) -\} <-- error is here <======================
\{\{ greeting ~ ' ' ~ greetee \}\} !
\{%- endmacro %\}
---.*'''
filename = os.path.join(self.template_dir, 'hello_import_error')
fc = MockFileClient()
with patch.object(SaltCacheLoader, 'file_client', MagicMock(return_value=fc)):
with salt.utils.files.fopen(filename) as fp_:
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
def test_non_ascii_encoding(self):
fc = MockFileClient()
with patch.object(SaltCacheLoader, 'file_client', MagicMock(return_value=fc)):
filename = os.path.join(self.template_dir, 'hello_import')
with salt.utils.files.fopen(filename) as fp_:
out = render_jinja_tmpl(
salt.utils.stringutils.to_unicode(fp_.read()),
dict(opts={'cachedir': self.tempdir, 'file_client': 'remote',
'file_roots': self.local_opts['file_roots'],
'pillar_roots': self.local_opts['pillar_roots']},
a='Hi', b='Sàlt', saltenv='test', salt=self.local_salt))
self.assertEqual(out, salt.utils.stringutils.to_unicode('Hey world !Hi Sàlt !' + os.linesep))
self.assertEqual(fc.requests[0]['path'], 'salt://macro')
filename = os.path.join(self.template_dir, 'non_ascii')
with salt.utils.files.fopen(filename, 'rb') as fp_:
out = render_jinja_tmpl(
salt.utils.stringutils.to_unicode(fp_.read(), 'utf-8'),
dict(opts={'cachedir': self.tempdir, 'file_client': 'remote',
'file_roots': self.local_opts['file_roots'],
'pillar_roots': self.local_opts['pillar_roots']},
a='Hi', b='Sàlt', saltenv='test', salt=self.local_salt))
self.assertEqual('Assunção' + os.linesep, out)
self.assertEqual(fc.requests[0]['path'], 'salt://macro')
@skipIf(HAS_TIMELIB is False, 'The `timelib` library is not installed.')
def test_strftime(self):
response = render_jinja_tmpl(
'{{ "2002/12/25"|strftime }}',
dict(
opts=self.local_opts,
saltenv='test',
salt=self.local_salt
))
self.assertEqual(response, '2002-12-25')
objects = (
datetime.datetime(2002, 12, 25, 12, 00, 00, 00),
'2002/12/25',
1040814000,
'1040814000'
)
for object in objects:
response = render_jinja_tmpl(
'{{ object|strftime }}',
dict(
object=object,
opts=self.local_opts,
saltenv='test',
salt=self.local_salt
))
self.assertEqual(response, '2002-12-25')
response = render_jinja_tmpl(
'{{ object|strftime("%b %d, %Y") }}',
dict(
object=object,
opts=self.local_opts,
saltenv='test',
salt=self.local_salt
))
self.assertEqual(response, 'Dec 25, 2002')
response = render_jinja_tmpl(
'{{ object|strftime("%y") }}',
dict(
object=object,
opts=self.local_opts,
saltenv='test',
salt=self.local_salt
))
self.assertEqual(response, '02')
def test_non_ascii(self):
fn = os.path.join(self.template_dir, 'non_ascii')
out = JINJA(
fn,
opts=self.local_opts,
saltenv='test',
salt=self.local_salt
)
with salt.utils.files.fopen(out['data'], 'rb') as fp:
result = salt.utils.stringutils.to_unicode(fp.read(), 'utf-8')
self.assertEqual(salt.utils.stringutils.to_unicode('Assunção' + os.linesep), result)
def test_get_context_has_enough_context(self):
template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf'
context = salt.utils.stringutils.get_context(template, 8)
expected = '---\n[...]\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\n[...]\n---'
self.assertEqual(expected, context)
def test_get_context_at_top_of_file(self):
template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf'
context = salt.utils.stringutils.get_context(template, 1)
expected = '---\n1\n2\n3\n4\n5\n6\n[...]\n---'
self.assertEqual(expected, context)
def test_get_context_at_bottom_of_file(self):
template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf'
context = salt.utils.stringutils.get_context(template, 15)
expected = '---\n[...]\na\nb\nc\nd\ne\nf\n---'
self.assertEqual(expected, context)
def test_get_context_2_context_lines(self):
template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf'
context = salt.utils.stringutils.get_context(template, 8, num_lines=2)
expected = '---\n[...]\n6\n7\n8\n9\na\n[...]\n---'
self.assertEqual(expected, context)
def test_get_context_with_marker(self):
template = '1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf'
context = salt.utils.stringutils.get_context(template, 8, num_lines=2, marker=' <---')
expected = '---\n[...]\n6\n7\n8 <---\n9\na\n[...]\n---'
self.assertEqual(expected, context)
def test_render_with_syntax_error(self):
template = 'hello\n\n{{ bad\n\nfoo'
expected = r'.*---\nhello\n\n{{ bad\n\nfoo <======================\n---'
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt)
)
@skipIf(six.PY3, 'Not applicable to Python 3')
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_render_with_unicode_syntax_error(self):
with patch.object(builtins, '__salt_system_encoding__', 'utf-8'):
template = 'hello\n\n{{ bad\n\nfoo한'
expected = r'.*---\nhello\n\n{{ bad\n\nfoo\xed\x95\x9c <======================\n---'
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt)
)
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_render_with_utf8_syntax_error(self):
with patch.object(builtins, '__salt_system_encoding__', 'utf-8'):
template = 'hello\n\n{{ bad\n\nfoo한'
expected = salt.utils.stringutils.to_str(
r'.*---\nhello\n\n{{ bad\n\nfoo한 <======================\n---'
)
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt)
)
def test_render_with_undefined_variable(self):
template = "hello\n\n{{ foo }}\n\nfoo"
expected = r'Jinja variable \'foo\' is undefined'
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt)
)
def test_render_with_undefined_variable_utf8(self):
template = "hello\xed\x95\x9c\n\n{{ foo }}\n\nfoo"
expected = r'Jinja variable \'foo\' is undefined'
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt)
)
def test_render_with_undefined_variable_unicode(self):
template = 'hello한\n\n{{ foo }}\n\nfoo'
expected = r'Jinja variable \'foo\' is undefined'
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt)
)
class TestJinjaDefaultOptions(TestCase):
def __init__(self, *args, **kws):
TestCase.__init__(self, *args, **kws)
self.local_opts = {
'cachedir': CACHEDIR,
'file_client': 'local',
'file_ignore_regex': None,
'file_ignore_glob': None,
'file_roots': {
'test': [os.path.join(BASE_FILES, 'templates')]
},
'pillar_roots': {
'test': [os.path.join(BASE_FILES, 'templates')]
},
'fileserver_backend': ['roots'],
'hash_type': 'md5',
'extension_modules': os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'extmods'),
'jinja_env': {
'line_comment_prefix': '##',
'line_statement_prefix': '%',
},
}
self.local_salt = {
'myvar': 'zero',
'mylist': [0, 1, 2, 3],
}
def test_comment_prefix(self):
template = """
%- set myvar = 'one'
## ignored comment 1
{{- myvar -}}
{%- set myvar = 'two' %} ## ignored comment 2
{{- myvar }} ## ignored comment 3
%- if myvar == 'two':
%- set myvar = 'three'
%- endif
{{- myvar -}}
"""
rendered = render_jinja_tmpl(template,
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'onetwothree')
def test_statement_prefix(self):
template = """
{%- set mylist = ['1', '2', '3'] %}
%- set mylist = ['one', 'two', 'three']
%- for item in mylist:
{{- item }}
%- endfor
"""
rendered = render_jinja_tmpl(template,
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'onetwothree')
class TestCustomExtensions(TestCase):
def __init__(self, *args, **kws):
super(TestCustomExtensions, self).__init__(*args, **kws)
self.local_opts = {
'cachedir': CACHEDIR,
'file_client': 'local',
'file_ignore_regex': None,
'file_ignore_glob': None,
'file_roots': {
'test': [os.path.join(BASE_FILES, 'templates')]
},
'pillar_roots': {
'test': [os.path.join(BASE_FILES, 'templates')]
},
'fileserver_backend': ['roots'],
'hash_type': 'md5',
'extension_modules': os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'extmods'),
}
self.local_salt = {
# 'dns.A': dnsutil.A,
# 'dns.AAAA': dnsutil.AAAA,
# 'file.exists': filemod.file_exists,
# 'file.basename': filemod.basename,
# 'file.dirname': filemod.dirname
}
def test_regex_escape(self):
dataset = 'foo?:.*/\\bar'
env = Environment(extensions=[SerializerExtension])
env.filters.update(JinjaFilter.salt_jinja_filters)
rendered = env.from_string('{{ dataset|regex_escape }}').render(dataset=dataset)
self.assertEqual(rendered, re.escape(dataset))
def test_unique_string(self):
dataset = 'foo'
unique = set(dataset)
env = Environment(extensions=[SerializerExtension])
env.filters.update(JinjaFilter.salt_jinja_filters)
if six.PY3:
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset).strip("'{}").split("', '")
self.assertEqual(sorted(rendered), sorted(list(unique)))
else:
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset)
self.assertEqual(rendered, "{0}".format(unique))
def test_unique_tuple(self):
dataset = ('foo', 'foo', 'bar')
unique = set(dataset)
env = Environment(extensions=[SerializerExtension])
env.filters.update(JinjaFilter.salt_jinja_filters)
if six.PY3:
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset).strip("'{}").split("', '")
self.assertEqual(sorted(rendered), sorted(list(unique)))
else:
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset)
self.assertEqual(rendered, "{0}".format(unique))
def test_unique_list(self):
dataset = ['foo', 'foo', 'bar']
unique = ['foo', 'bar']
env = Environment(extensions=[SerializerExtension])
env.filters.update(JinjaFilter.salt_jinja_filters)
if six.PY3:
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset).strip("'[]").split("', '")
self.assertEqual(rendered, unique)
else:
rendered = env.from_string('{{ dataset|unique }}').render(dataset=dataset)
self.assertEqual(rendered, "{0}".format(unique))
def test_serialize_json(self):
dataset = {
"foo": True,
"bar": 42,
"baz": [1, 2, 3],
"qux": 2.0
}
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string('{{ dataset|json }}').render(dataset=dataset)
self.assertEqual(dataset, salt.utils.json.loads(rendered))
def test_serialize_yaml(self):
dataset = {
"foo": True,
"bar": 42,
"baz": [1, 2, 3],
"qux": 2.0,
"spam": OrderedDict([
('foo', OrderedDict([
('bar', 'baz'),
('qux', 42)
])
)
])
}
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string('{{ dataset|yaml }}').render(dataset=dataset)
self.assertEqual(dataset, salt.utils.yaml.safe_load(rendered))
def test_serialize_yaml_str(self):
dataset = "str value"
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string('{{ dataset|yaml }}').render(dataset=dataset)
self.assertEqual(dataset, rendered)
def test_serialize_yaml_unicode(self):
dataset = 'str value'
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string('{{ dataset|yaml }}').render(dataset=dataset)
if six.PY3:
self.assertEqual("str value", rendered)
else:
# Due to a bug in the equality handler, this check needs to be split
# up into several different assertions. We need to check that the various
# string segments are present in the rendered value, as well as the
# type of the rendered variable (should be unicode, which is the same as
# six.text_type). This should cover all use cases but also allow the test
# to pass on CentOS 6 running Python 2.7.
self.assertIn('str value', rendered)
self.assertIsInstance(rendered, six.text_type)
def test_serialize_python(self):
dataset = {
"foo": True,
"bar": 42,
"baz": [1, 2, 3],
"qux": 2.0
}
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string('{{ dataset|python }}').render(dataset=dataset)
self.assertEqual(rendered, pprint.pformat(dataset))
def test_load_yaml(self):
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string('{% set document = "{foo: it works}"|load_yaml %}{{ document.foo }}').render()
self.assertEqual(rendered, "it works")
rendered = env.from_string('{% set document = document|load_yaml %}'
'{{ document.foo }}').render(document="{foo: it works}")
self.assertEqual(rendered, "it works")
with self.assertRaises((TypeError, exceptions.TemplateRuntimeError)):
env.from_string('{% set document = document|load_yaml %}'
'{{ document.foo }}').render(document={"foo": "it works"})
def test_load_tag(self):
env = Environment(extensions=[SerializerExtension])
source = '{{ bar }}, ' + \
'{% load_yaml as docu %}{foo: it works, {{ bar }}: baz}{% endload %}' + \
'{{ docu.foo }}'
rendered = env.from_string(source).render(bar="barred")
self.assertEqual(rendered, "barred, it works")
source = '{{ bar }}, {% load_json as docu %}{"foo": "it works", "{{ bar }}": "baz"}{% endload %}' + \
'{{ docu.foo }}'
rendered = env.from_string(source).render(bar="barred")
self.assertEqual(rendered, "barred, it works")
with self.assertRaises(exceptions.TemplateSyntaxError):
env.from_string('{% load_yamle as document %}{foo, bar: it works}{% endload %}').render()
with self.assertRaises(exceptions.TemplateRuntimeError):
env.from_string('{% load_json as document %}{foo, bar: it works}{% endload %}').render()
def test_load_json(self):
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string('{% set document = \'{"foo": "it works"}\'|load_json %}'
'{{ document.foo }}').render()
self.assertEqual(rendered, "it works")
rendered = env.from_string('{% set document = document|load_json %}'
'{{ document.foo }}').render(document='{"foo": "it works"}')
self.assertEqual(rendered, "it works")
# bad quotes
with self.assertRaises(exceptions.TemplateRuntimeError):
env.from_string("{{ document|load_json }}").render(document="{'foo': 'it works'}")
# not a string
with self.assertRaises(exceptions.TemplateRuntimeError):
env.from_string('{{ document|load_json }}').render(document={"foo": "it works"})
def test_load_yaml_template(self):
loader = DictLoader({'foo': '{bar: "my god is blue", foo: [1, 2, 3]}'})
env = Environment(extensions=[SerializerExtension], loader=loader)
rendered = env.from_string('{% import_yaml "foo" as doc %}{{ doc.bar }}').render()
self.assertEqual(rendered, "my god is blue")
with self.assertRaises(exceptions.TemplateNotFound):
env.from_string('{% import_yaml "does not exists" as doc %}').render()
def test_load_json_template(self):
loader = DictLoader({'foo': '{"bar": "my god is blue", "foo": [1, 2, 3]}'})
env = Environment(extensions=[SerializerExtension], loader=loader)
rendered = env.from_string('{% import_json "foo" as doc %}{{ doc.bar }}').render()
self.assertEqual(rendered, "my god is blue")
with self.assertRaises(exceptions.TemplateNotFound):
env.from_string('{% import_json "does not exists" as doc %}').render()
def test_load_text_template(self):
loader = DictLoader({'foo': 'Foo!'})
env = Environment(extensions=[SerializerExtension], loader=loader)
rendered = env.from_string('{% import_text "foo" as doc %}{{ doc }}').render()
self.assertEqual(rendered, "Foo!")
with self.assertRaises(exceptions.TemplateNotFound):
env.from_string('{% import_text "does not exists" as doc %}').render()
def test_catalog(self):
loader = DictLoader({
'doc1': '{bar: "my god is blue"}',
'doc2': '{% import_yaml "doc1" as local2 %} never exported',
'doc3': '{% load_yaml as local3 %}{"foo": "it works"}{% endload %} me neither',
'main1': '{% from "doc2" import local2 %}{{ local2.bar }}',
'main2': '{% from "doc3" import local3 %}{{ local3.foo }}',
'main3': '''
{% import "doc2" as imported2 %}
{% import "doc3" as imported3 %}
{{ imported2.local2.bar }}
''',
'main4': '''
{% import "doc2" as imported2 %}
{% import "doc3" as imported3 %}
{{ imported3.local3.foo }}
''',
'main5': '''
{% from "doc2" import local2 as imported2 %}
{% from "doc3" import local3 as imported3 %}
{{ imported2.bar }}
''',
'main6': '''
{% from "doc2" import local2 as imported2 %}
{% from "doc3" import local3 as imported3 %}
{{ imported3.foo }}
'''
})
env = Environment(extensions=[SerializerExtension], loader=loader)
rendered = env.get_template('main1').render()
self.assertEqual(rendered, "my god is blue")
rendered = env.get_template('main2').render()
self.assertEqual(rendered, "it works")
rendered = env.get_template('main3').render().strip()
self.assertEqual(rendered, "my god is blue")
rendered = env.get_template('main4').render().strip()
self.assertEqual(rendered, "it works")
rendered = env.get_template('main5').render().strip()
self.assertEqual(rendered, "my god is blue")
rendered = env.get_template('main6').render().strip()
self.assertEqual(rendered, "it works")
def test_nested_structures(self):
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string('{{ data }}').render(data="foo")
self.assertEqual(rendered, "foo")
data = OrderedDict([
('foo', OrderedDict([
('bar', 'baz'),
('qux', 42)
])
)
])
rendered = env.from_string('{{ data }}').render(data=data)
self.assertEqual(
rendered,
"{u'foo': {u'bar': u'baz', u'qux': 42}}" if six.PY2
else "{'foo': {'bar': 'baz', 'qux': 42}}"
)
rendered = env.from_string('{{ data }}').render(data=[
OrderedDict(
foo='bar',
),
OrderedDict(
baz=42,
)
])
self.assertEqual(
rendered,
"[{'foo': u'bar'}, {'baz': 42}]" if six.PY2
else "[{'foo': 'bar'}, {'baz': 42}]"
)
def test_sequence(self):
env = Environment()
env.filters['sequence'] = ensure_sequence_filter
rendered = env.from_string('{{ data | sequence | length }}') \
.render(data='foo')
self.assertEqual(rendered, '1')
rendered = env.from_string('{{ data | sequence | length }}') \
.render(data=['foo', 'bar'])
self.assertEqual(rendered, '2')
rendered = env.from_string('{{ data | sequence | length }}') \
.render(data=('foo', 'bar'))
self.assertEqual(rendered, '2')
rendered = env.from_string('{{ data | sequence | length }}') \
.render(data=set(['foo', 'bar']))
self.assertEqual(rendered, '2')
rendered = env.from_string('{{ data | sequence | length }}') \
.render(data={'foo': 'bar'})
self.assertEqual(rendered, '1')
def test_is_ip(self):
'''
Test the `is_ip` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ '192.168.0.1' | is_ip }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'True')
rendered = render_jinja_tmpl("{{ 'FE80::' | is_ip }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'True')
rendered = render_jinja_tmpl("{{ 'random' | is_ip }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'False')
def test_is_ipv4(self):
'''
Test the `is_ipv4` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ '192.168.0.1' | is_ipv4 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'True')
rendered = render_jinja_tmpl("{{ 'FE80::' | is_ipv4 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'False')
rendered = render_jinja_tmpl("{{ 'random' | is_ipv4 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'False')
def test_is_ipv6(self):
'''
Test the `is_ipv6` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ '192.168.0.1' | is_ipv6 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'False')
rendered = render_jinja_tmpl("{{ 'FE80::' | is_ipv6 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'True')
rendered = render_jinja_tmpl("{{ 'random' | is_ipv6 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'False')
def test_ipaddr(self):
'''
Test the `ipaddr` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ '::' | ipaddr }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '::')
rendered = render_jinja_tmpl("{{ '192.168.0.1' | ipaddr }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '192.168.0.1')
# provides a list with valid IP addresses only
rendered = render_jinja_tmpl("{{ ['192.168.0.1', '172.17.17.1', 'foo', 'bar', '::'] | ipaddr | join(', ') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '192.168.0.1, 172.17.17.1, ::')
# return only multicast addresses
rendered = render_jinja_tmpl("{{ ['224.0.0.1', 'FF01::1', '::'] | ipaddr(options='multicast') | join(', ') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '224.0.0.1, ff01::1')
def test_ipv4(self):
'''
Test the `ipv4` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ '192.168.0.1' | ipv4 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '192.168.0.1')
rendered = render_jinja_tmpl("{{ ['192.168.0.1', '172.17.17.1'] | ipv4 | join(', ')}}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '192.168.0.1, 172.17.17.1')
rendered = render_jinja_tmpl("{{ 'fe80::' | ipv4 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'None')
rendered = render_jinja_tmpl("{{ 'random' | ipv4 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'None')
rendered = render_jinja_tmpl("{{ '192.168.0.1' | ipv4(options='lo') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'None')
rendered = render_jinja_tmpl("{{ '127.0.0.1' | ipv4(options='lo') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '127.0.0.1')
def test_ipv6(self):
'''
Test the `ipv6` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ '192.168.0.1' | ipv6 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'None')
rendered = render_jinja_tmpl("{{ 'random' | ipv6 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'None')
# returns the standard format value
rendered = render_jinja_tmpl("{{ 'FE80:0:0::0' | ipv6 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'fe80::')
# fe80:: is link local therefore will be returned
rendered = render_jinja_tmpl("{{ 'fe80::' | ipv6(options='ll') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'fe80::')
# fe80:: is not loopback
rendered = render_jinja_tmpl("{{ 'fe80::' | ipv6(options='lo') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'None')
# returns only IPv6 addresses in the list
rendered = render_jinja_tmpl("{{ ['fe80::', '192.168.0.1'] | ipv6 | join(', ') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'fe80::')
rendered = render_jinja_tmpl("{{ ['fe80::', '::'] | ipv6 | join(', ') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'fe80::, ::')
def test_network_hosts(self):
'''
Test the `network_hosts` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ '192.168.0.1/30' | network_hosts | join(', ') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '192.168.0.1, 192.168.0.2')
def test_network_size(self):
'''
Test the `network_size` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ '192.168.0.1' | network_size }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '1')
rendered = render_jinja_tmpl("{{ '192.168.0.1/8' | network_size }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '16777216')
@flaky
def test_http_query(self):
'''
Test the `http_query` Jinja filter.
'''
for backend in ('requests', 'tornado', 'urllib2'):
rendered = render_jinja_tmpl("{{ 'http://icanhazip.com' | http_query(backend='" + backend + "') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertIsInstance(rendered, six.text_type, 'Failed with backend: {}'.format(backend))
dict_reply = ast.literal_eval(rendered)
self.assertIsInstance(dict_reply, dict, 'Failed with backend: {}'.format(backend))
self.assertIsInstance(dict_reply['body'], six.string_types, 'Failed with backend: {}'.format(backend))
def test_to_bool(self):
'''
Test the `to_bool` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ 1 | to_bool }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'True')
rendered = render_jinja_tmpl("{{ 'True' | to_bool }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'True')
rendered = render_jinja_tmpl("{{ 0 | to_bool }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'False')
rendered = render_jinja_tmpl("{{ 'Yes' | to_bool }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'True')
def test_quote(self):
'''
Test the `quote` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ 'random' | quote }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'random')
def test_regex_search(self):
'''
Test the `regex_search` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ 'abcdefabcdef' | regex_search('BC(.*)', ignorecase=True) }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, "('defabcdef',)") # because search looks only at the beginning
def test_regex_match(self):
'''
Test the `regex_match` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ 'abcdefabcdef' | regex_match('BC(.*)', ignorecase=True)}}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, "None")
def test_regex_replace(self):
'''
Test the `regex_replace` Jinja filter.
'''
rendered = render_jinja_tmpl(r"{{ 'lets replace spaces' | regex_replace('\s+', '__') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'lets__replace__spaces')
def test_uuid(self):
'''
Test the `uuid` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ 'random' | uuid }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '3652b285-26ad-588e-a5dc-c2ee65edc804')
def test_min(self):
'''
Test the `min` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ [1, 2, 3] | min }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '1')
def test_max(self):
'''
Test the `max` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ [1, 2, 3] | max }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '3')
def test_avg(self):
'''
Test the `avg` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ [1, 2, 3] | avg }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '2.0')
def test_union(self):
'''
Test the `union` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ [1, 2, 3] | union([2, 3, 4]) | join(', ') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '1, 2, 3, 4')
def test_intersect(self):
'''
Test the `intersect` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ [1, 2, 3] | intersect([2, 3, 4]) | join(', ') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '2, 3')
def test_difference(self):
'''
Test the `difference` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ [1, 2, 3] | difference([2, 3, 4]) | join(', ') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '1')
def test_symmetric_difference(self):
'''
Test the `symmetric_difference` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ [1, 2, 3] | symmetric_difference([2, 3, 4]) | join(', ') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '1, 4')
def test_md5(self):
'''
Test the `md5` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ 'random' | md5 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, '7ddf32e17a6ac5ce04a8ecbf782ca509')
def test_sha256(self):
'''
Test the `sha256` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ 'random' | sha256 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'a441b15fe9a3cf56661190a0b93b9dec7d04127288cc87250967cf3b52894d11')
def test_sha512(self):
'''
Test the `sha512` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ 'random' | sha512 }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, six.text_type(('811a90e1c8e86c7b4c0eef5b2c0bf0ec1b19c4b1b5a242e6455be93787cb473cb7bc'
'9b0fdeb960d00d5c6881c2094dd63c5c900ce9057255e2a4e271fc25fef1')))
def test_hmac(self):
'''
Test the `hmac` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ 'random' | hmac('secret', 'blah') }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'False')
rendered = render_jinja_tmpl(("{{ 'get salted' | "
"hmac('shared secret', 'eBWf9bstXg+NiP5AOwppB5HMvZiYMPzEM9W5YMm/AmQ=') }}"),
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'True')
def test_base64_encode(self):
'''
Test the `base64_encode` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ 'random' | base64_encode }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'cmFuZG9t')
def test_base64_decode(self):
'''
Test the `base64_decode` Jinja filter.
'''
rendered = render_jinja_tmpl("{{ 'cmFuZG9t' | base64_decode }}",
dict(opts=self.local_opts, saltenv='test', salt=self.local_salt))
self.assertEqual(rendered, 'random')
# def test_print(self):
# env = Environment(extensions=[SerializerExtension])
# source = '{% import_yaml "toto.foo" as docu %}'
# name, filename = None, '<filename>'
# parsed = env._parse(source, name, filename)
# print parsed
# print
# compiled = env._generate(parsed, name, filename)
# print compiled
# return
class TestDotNotationLookup(ModuleCase):
'''
Tests to call Salt functions via Jinja with various lookup syntaxes
'''
def setUp(self, *args, **kwargs):
functions = {
'mocktest.ping': lambda: True,
'mockgrains.get': lambda x: 'jerry',
}
minion_opts = salt.config.minion_config(os.path.join(TMP_CONF_DIR, 'minion'))
render = salt.loader.render(minion_opts, functions)
self.jinja = render.get('jinja')
def tearDown(self):
del self.jinja
def render(self, tmpl_str, context=None):
return self.jinja(tmpl_str, context=context or {}, from_str=True).read()
def test_normlookup(self):
'''
Sanity-check the normal dictionary-lookup syntax for our stub function
'''
tmpl_str = '''Hello, {{ salt['mocktest.ping']() }}.'''
with patch.object(SaltCacheLoader, 'file_client', Mock()):
ret = self.render(tmpl_str)
self.assertEqual(ret, 'Hello, True.')
def test_dotlookup(self):
'''
Check calling a stub function using awesome dot-notation
'''
tmpl_str = '''Hello, {{ salt.mocktest.ping() }}.'''
with patch.object(SaltCacheLoader, 'file_client', Mock()):
ret = self.render(tmpl_str)
self.assertEqual(ret, 'Hello, True.')
def test_shadowed_dict_method(self):
'''
Check calling a stub function with a name that shadows a ``dict``
method name
'''
tmpl_str = '''Hello, {{ salt.mockgrains.get('id') }}.'''
with patch.object(SaltCacheLoader, 'file_client', Mock()):
ret = self.render(tmpl_str)
self.assertEqual(ret, 'Hello, jerry.')
| 41.127138
| 125
| 0.553258
|
44f6ef45a2ddaf43310c6cb20183d83d527b6c2f
| 1,823
|
py
|
Python
|
var/spack/repos/builtin/packages/votca-csgapps/package.py
|
gwagenbreth/spack
|
e10c0a6a340956a8626de747036a745cd10d606d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2021-09-19T10:20:43.000Z
|
2021-09-19T10:20:43.000Z
|
var/spack/repos/builtin/packages/votca-csgapps/package.py
|
gwagenbreth/spack
|
e10c0a6a340956a8626de747036a745cd10d606d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 9
|
2021-05-12T05:42:26.000Z
|
2022-03-30T17:06:14.000Z
|
var/spack/repos/builtin/packages/votca-csgapps/package.py
|
gwagenbreth/spack
|
e10c0a6a340956a8626de747036a745cd10d606d
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class VotcaCsgapps(CMakePackage):
"""Versatile Object-oriented Toolkit for Coarse-graining
Applications (VOTCA) is a package intended to reduce the amount of
routine work when doing systematic coarse-graining of various
systems. The core is written in C++.
This package contains the VOTCA coarse-graining extra apps.
"""
homepage = "http://www.votca.org"
url = "https://github.com/votca/csgapps/tarball/v1.4"
git = "https://github.com/votca/csgapps.git"
maintainers = ['junghans']
version('master', branch='master')
version('stable', branch='stable')
version('1.6.3', sha256='fdb6a94eabdfe1bfae6002da16e364086d036c2dc24700a941b73d5bb1afc422')
version('1.6.2', sha256='f7db0bda27d4419c570f44dc60d04b1fd7b4cdcf10db6301005fca70111fcfe3')
version('1.6.1', sha256='03c7cef2a76e73cf953b2b5ea2cdca765ec1a2627d0a9d8869d46166e63d197c')
version('1.6', sha256='084bbc5b179bb7eb8f6671d2d5fa13e69e68946570c9120a7e4b10aff1866e2e')
version('1.5.1', sha256='b4946711e88a1745688b6cce5aad872e6e2ea200fededf38d77a864883e3750e')
version('1.5', sha256='18b40ce6222509bc70aa9d56b8c538cd5903edf7294d6f95530668e555206d5b')
version('1.4.1', sha256='095d9ee4cd49d2fd79c10e0e84e6890b755e54dec6a5cd580a2b4241ba230a2b')
version('1.4', sha256='4ea8348c2f7de3cc488f48fbd8652e69b52515441952766c06ff67ed1aaf69a0')
for v in ["1.4", "1.4.1", "1.5", "1.5.1", "1.6", "1.6.1", "1.6.2",
"1.6.3", "master", "stable"]:
depends_on('votca-csg@%s' % v, when="@%s:%s.0" % (v, v))
depends_on("boost")
| 47.973684
| 97
| 0.719144
|
1e5ff2c329009d0a50a8daff526b12d4949b452c
| 1,616
|
py
|
Python
|
main/sign.py
|
6688aa/-
|
e810e433fa5088d9df615881a475e5ceeccfe6ff
|
[
"Apache-2.0"
] | 27
|
2021-01-24T15:30:46.000Z
|
2022-03-22T04:28:40.000Z
|
main/sign.py
|
6688aa/-
|
e810e433fa5088d9df615881a475e5ceeccfe6ff
|
[
"Apache-2.0"
] | 13
|
2021-03-22T05:24:20.000Z
|
2022-03-26T18:12:29.000Z
|
main/sign.py
|
6688aa/-
|
e810e433fa5088d9df615881a475e5ceeccfe6ff
|
[
"Apache-2.0"
] | 39
|
2021-02-02T07:45:21.000Z
|
2022-03-25T04:06:02.000Z
|
# coding=utf-8
import requests
from urllib.parse import quote
def login(user, retry=False):
"""获取处理后的数据
:param user:用户信息
:return : 传回登陆成功的cookie
"""
# 姓名,学号,密码,学校编码
name = user.get("name")
stucode = user.get("stucode")
password = user.get("password")
schoolcode = user.get("schoolcode")
api = 'https://api.weishao.com.cn'
# 分析协议得出的
oauth = '/oauth/authorize?client_id=pqZ3wGM07i8R9mR3&redirect_uri=https%3A%2F%2Fyq.weishao.com.cn%2Fcheck%2Fquestionnaire&response_type=code&scope=base_api&state=ruijie'
# 直接获取登陆链接的cookie(该链接极大可能是固定的)
url = api + "/login?source=" + oauth
try:
# 得到初始cookie
session = requests.Session()
cook = session.get(url).headers['set-cookie']
# 提交的个人数据
dat = "schoolcode=" + schoolcode + "&username=" + stucode + "&password=" + quote(password, "utf-8") + "&verifyValue=&verifyKey=" + stucode + "_" + schoolcode + "&ssokey="
head = {
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': cook,
}
# 提交个人信息(模拟登录)
session.post(url, data=dat, headers=head)
url1 = session.get(api + oauth, headers=head, allow_redirects=False).headers['Location']
# 登陆成功,获取登陆cookie
cook = session.get(url1, headers=head, allow_redirects=False).headers['set-cookie']
return cook
except requests.exceptions.ConnectionError:
print("网络错误")
return "网络错误"
except KeyError:
if retry:
print(name + " 登录错误")
return "登录错误"
else:
return login(user, True)
| 35.130435
| 178
| 0.612624
|
a89dd2cf7e546a9353b376e0ce96c56c443ca9bd
| 10,187
|
py
|
Python
|
python/taichi/misc/util.py
|
youyufeng92/taichi
|
c826de521d254745db556835e322dd2e0cfdbfa0
|
[
"MIT"
] | 1
|
2020-07-17T08:59:53.000Z
|
2020-07-17T08:59:53.000Z
|
python/taichi/misc/util.py
|
youyufeng92/taichi
|
c826de521d254745db556835e322dd2e0cfdbfa0
|
[
"MIT"
] | null | null | null |
python/taichi/misc/util.py
|
youyufeng92/taichi
|
c826de521d254745db556835e322dd2e0cfdbfa0
|
[
"MIT"
] | null | null | null |
import sys
import datetime
import platform
import random
import taichi
def get_os_name():
name = platform.platform()
# in python 3.8, platform.platform() uses mac_ver() on macOS
# it will return 'macOS-XXXX' instead of 'Darwin-XXXX'
if name.lower().startswith('darwin') or name.lower().startswith('macos'):
return 'osx'
elif name.lower().startswith('windows'):
return 'win'
elif name.lower().startswith('linux'):
return 'linux'
assert False, "Unknown platform name %s" % name
def get_uuid():
print(
'Warning: get_uuid is deprecated. Please use get_unique_task_id instead.')
return get_unique_task_id()
def get_unique_task_id():
return datetime.datetime.now().strftime('task-%Y-%m-%d-%H-%M-%S-r') + (
'%05d' % random.randint(0, 10000))
import copy
import numpy as np
import ctypes
def config_from_dict(args):
from taichi.core import tc_core
d = copy.copy(args)
for k in d:
if isinstance(d[k], tc_core.Vector2f):
d[k] = '({}, {})'.format(d[k].x, d[k].y)
if isinstance(d[k], tc_core.Vector3f):
d[k] = '({}, {}, {})'.format(d[k].x, d[k].y, d[k].z)
d[k] = str(d[k])
return tc_core.config_from_dict(d)
def make_polygon(points, scale):
import taichi as tc
polygon = tc.core.Vector2fList()
for p in points:
if type(p) == list or type(p) == tuple:
polygon.append(scale * vec(p[0], p[1]))
else:
polygon.append(scale * p)
return polygon
def veci(*args):
from taichi.core import tc_core
if isinstance(args[0], tc_core.Vector2i):
return args[0]
if isinstance(args[0], tc_core.Vector3i):
return args[0]
if isinstance(args[0], tuple):
args = tuple(*args)
if len(args) == 2:
return tc_core.Vector2i(int(args[0]), int(args[1]))
elif len(args) == 3:
return tc_core.Vector3i(int(args[0]), int(args[1]), int(args[2]))
elif len(args) == 4:
return tc_core.Vector4i(
int(args[0]), int(args[1]), int(args[2]), int(args[3]))
else:
assert False, type(args[0])
def vec(*args):
from taichi.core import tc_core
if isinstance(args[0], tc_core.Vector2f):
return args[0]
if isinstance(args[0], tc_core.Vector3f):
return args[0]
if isinstance(args[0], tc_core.Vector4f):
return args[0]
if isinstance(args[0], tc_core.Vector2d):
return args[0]
if isinstance(args[0], tc_core.Vector3d):
return args[0]
if isinstance(args[0], tc_core.Vector4d):
return args[0]
if isinstance(args[0], tuple):
args = tuple(*args)
if tc_core.get_default_float_size() == 4:
if len(args) == 2:
return tc_core.Vector2f(float(args[0]), float(args[1]))
elif len(args) == 3:
return tc_core.Vector3f(float(args[0]), float(args[1]), float(args[2]))
elif len(args) == 4:
return tc_core.Vector4f(
float(args[0]), float(args[1]), float(args[2]), float(args[3]))
else:
assert False, type(args[0])
else:
if len(args) == 2:
return tc_core.Vector2d(float(args[0]), float(args[1]))
elif len(args) == 3:
return tc_core.Vector3d(float(args[0]), float(args[1]), float(args[2]))
elif len(args) == 4:
return tc_core.Vector4d(
float(args[0]), float(args[1]), float(args[2]), float(args[3]))
else:
assert False, type(args[0])
def default_const_or_evaluate(f, default, u, v):
if f == None:
return default
if type(f) in [float, int, tuple]:
return f
return f(u, v)
def const_or_evaluate(f, u, v):
import taichi as tc
if type(f) in [float, int, tuple, tc.core.Vector2, tc.core.Vector3]:
return f
return f(u, v)
# color_255: actual color
# arr: the transparance of the image, if transform is not 'levelset'
# transform: (x0, x1) as rescaling or simply 'levelset'
def array2d_to_image(arr,
width,
height,
color_255=None,
transform='levelset',
alpha_scale=1.0):
from taichi import tc_core
if color_255 is None:
assert isinstance(arr, tc_core.Array2DVector3) or isinstance(
arr, tc_core.Array2DVector4)
import pyglet
rasterized = arr.rasterize(width, height)
raw_data = np.empty((width, height, arr.get_channels()), dtype=np.float32)
rasterized.to_ndarray(raw_data.ctypes.data_as(ctypes.c_void_p).value)
if transform == 'levelset':
raw_data = (raw_data <= 0).astype(np.float32)
else:
x0, x1 = transform
raw_data = (np.clip(raw_data, x0, x1) - x0) / (x1 - x0)
raw_data = raw_data.swapaxes(0, 1).copy()
if isinstance(arr, tc_core.Array2DVector3):
dat = np.stack(
[raw_data,
np.ones(shape=(width, height, 1), dtype=np.float32)],
axis=2).flatten().reshape((height * width, 4))
dat = dat * 255.0
elif isinstance(arr, tc_core.Array2DVector4):
dat = raw_data.flatten().reshape((height * width, 4))
dat = dat * 255.0
else:
raw_data = raw_data.flatten()
dat = np.outer(np.ones_like(raw_data), color_255)
dat[:, 3] = (color_255[3] * raw_data)
dat[:, 3] *= alpha_scale
dat = np.clip(dat, 0.0, 255.0)
dat = dat.astype(np.uint8)
assert dat.shape == (height * width, 4)
image_data = pyglet.image.ImageData(width, height, 'RGBA', dat.tostring())
return image_data
def image_buffer_to_image(arr):
import pyglet
raw_data = np.empty((arr.get_width() * arr.get_height() * 3,),
dtype='float32')
arr.to_ndarray(raw_data.ctypes.data_as(ctypes.c_void_p).value)
dat = (raw_data * 255.0).astype('uint8')
dat.reshape((len(raw_data) / 3, 3))
data_string = dat.tostring()
image_data = pyglet.image.ImageData(arr.get_width(), arr.get_height(), 'RGB',
data_string)
return image_data
def image_buffer_to_ndarray(arr, bgr=False):
channels = arr.get_channels()
raw_data = np.empty((arr.get_width() * arr.get_height() * channels,),
dtype='float32')
arr.to_ndarray(raw_data.ctypes.data_as(ctypes.c_void_p).value)
dat = raw_data.astype('float32')
ret = dat.reshape((arr.get_width(), arr.get_height(), channels))
if bgr:
ret = ret[:, :, ::-1]
return ret
def arange(x, y, d):
while x < y:
yield x
x += d
# TODO: remove this...
def P(**kwargs):
return config_from_dict(kwargs)
def imread(fn, bgr=False):
img = taichi.core.Array2DVector3(taichi.veci(0, 0), taichi.vec(0.0, 0.0, 0.0))
img.read(fn)
return image_buffer_to_ndarray(img, bgr)[::-1]
def read_image(fn, linearize=False):
img = taichi.core.Array2DVector3(taichi.veci(0, 0), taichi.vec(0.0, 0.0, 0.0))
img.read(fn, linearize)
return img
def show_image(window_name, img):
from taichi.gui.image_viewer import show_image
show_image(window_name, img)
def save_image(fn, img):
img.write(fn)
def ndarray_to_array2d(array):
if array.dtype == np.uint8:
array = (array * (1 / 255.0)).astype(np.float32)
assert array.dtype == np.float32
array = array.copy()
input_ptr = array.ctypes.data_as(ctypes.c_void_p).value
if len(array.shape) == 2 or array.shape[2] == 1:
arr = taichi.core.Array2Dreal(Vectori(0, 0))
elif array.shape[2] == 3:
arr = taichi.core.Array2DVector3(Vectori(0, 0), taichi.Vector(0, 0, 0))
elif array.shape[2] == 4:
arr = taichi.core.Array2DVector4(Vectori(0, 0), taichi.Vector(0, 0, 0, 0))
else:
assert False, 'ndarray has to be n*m, n*m*3, or n*m*4'
arr.from_ndarray(input_ptr, array.shape[0], array.shape[1])
return arr
def array2d_to_ndarray(arr):
if isinstance(arr, taichi.core.Array2DVector3):
ndarray = np.empty((arr.get_width(), arr.get_height(), 3), dtype='float32')
elif isinstance(arr, taichi.core.Array2DVector4):
ndarray = np.empty((arr.get_width(), arr.get_height(), 4), dtype='float32')
elif isinstance(arr, taichi.core.Array2Dreal):
ndarray = np.empty((arr.get_width(), arr.get_height()), dtype='float32')
else:
assert False, 'Array2d must have type real, Vector3, or Vector4'
output_ptr = ndarray.ctypes.data_as(ctypes.c_void_p).value
arr.to_ndarray(output_ptr)
return ndarray
def opencv_img_to_taichi_img(img):
return (img.swapaxes(0, 1)[:, ::-1, ::-1] * (1 / 255.0)).astype(np.float32)
def sleep(seconds=-1):
if seconds == -1:
while True:
time.sleep(1) # Wait for Ctrl-C
else:
time.sleep(seconds)
class Tee():
def __init__(self, name):
self.file = open(name, 'w')
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def __del__(self):
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
self.file.flush()
self.stdout.flush()
def write_to_file(self, data):
self.file.write(data)
import inspect
def get_file_name(asc=0):
return inspect.stack()[1 + asc][1]
def get_function_name(asc=0):
return inspect.stack()[1 + asc][3]
def get_line_number(asc=0):
return inspect.stack()[1 + asc][2]
def get_logging(name):
def logger(msg, *args, **kwargs):
# Python inspection takes time (~0.1ms) so avoid it as much as possible
if taichi.tc_core.logging_effective(name):
msg_formatted = msg.format(*args, **kwargs)
func = getattr(taichi.tc_core, name)
frame = inspect.currentframe().f_back.f_back
file_name, lineno, func_name, _, _ = inspect.getframeinfo(frame)
msg = f'[{file_name}:{func_name}@{lineno}] {msg_formatted}'
func(msg)
return logger
DEBUG = 'debug'
TRACE = 'trace'
INFO = 'info'
WARN = 'warn'
ERROR = 'error'
CRITICAL = 'critical'
debug = get_logging(DEBUG)
trace = get_logging(TRACE)
info = get_logging(INFO)
warn = get_logging(WARN)
error = get_logging(ERROR)
critical = get_logging(CRITICAL)
def redirect_print_to_log():
class Logger:
def write(self, msg):
taichi.core.info('[{}:{}@{}] {}'.format(
get_file_name(1), get_function_name(1), get_line_number(1), msg))
def flush(self):
taichi.core.flush_log()
sys.stdout = Logger()
def duplicate_stdout_to_file(fn):
taichi.tc_core.duplicate_stdout_to_file(fn)
def set_logging_level(level):
taichi.tc_core.set_logging_level(level)
def set_gdb_trigger(on=True):
taichi.tc_core.set_core_trigger_gdb_when_crash(on)
| 27.833333
| 80
| 0.652596
|
5f8129d533d8a5052006008e39412acf7863788a
| 752
|
py
|
Python
|
distributed_frontera/messagebus/zeromq/socket_config.py
|
abael/ScrapyFronteraDistributed
|
50a636be9dbff1e27698f55968ffb0a0b53a6123
|
[
"BSD-3-Clause"
] | null | null | null |
distributed_frontera/messagebus/zeromq/socket_config.py
|
abael/ScrapyFronteraDistributed
|
50a636be9dbff1e27698f55968ffb0a0b53a6123
|
[
"BSD-3-Clause"
] | null | null | null |
distributed_frontera/messagebus/zeromq/socket_config.py
|
abael/ScrapyFronteraDistributed
|
50a636be9dbff1e27698f55968ffb0a0b53a6123
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
class SocketConfig(object):
hostname = None
base_port = None
def __init__(self, hostname, base_port):
self.hostname = hostname
self.base_port = base_port
def spiders_in(self):
return 'tcp://%s:%d' % (self.hostname, self.base_port)
def spiders_out(self):
return 'tcp://%s:%d' % (self.hostname, self.base_port + 1)
def sw_in(self):
return 'tcp://%s:%d' % (self.hostname, self.base_port + 2)
def sw_out(self):
return 'tcp://%s:%d' % (self.hostname, self.base_port + 3)
def db_in(self):
return 'tcp://%s:%d' % (self.hostname, self.base_port + 4)
def db_out(self):
return 'tcp://%s:%d' % (self.hostname, self.base_port + 5)
| 25.931034
| 66
| 0.582447
|
1f4a9b194306e44403bff0be0b7b6c948fe73861
| 291
|
py
|
Python
|
flow-control/Demos/add_nums.py
|
WebucatorTraining/classfiles-actionable-python
|
930c154a6dbfa6c54768557a998b4dbafb43df38
|
[
"MIT"
] | 2
|
2022-01-04T22:25:01.000Z
|
2022-01-16T16:50:23.000Z
|
flow-control/Demos/add_nums.py
|
WebucatorTraining/classfiles-actionable-python
|
930c154a6dbfa6c54768557a998b4dbafb43df38
|
[
"MIT"
] | null | null | null |
flow-control/Demos/add_nums.py
|
WebucatorTraining/classfiles-actionable-python
|
930c154a6dbfa6c54768557a998b4dbafb43df38
|
[
"MIT"
] | null | null | null |
def add_nums(num, *nums):
total = sum(nums, num)
nums_joined = ', '.join([str(n) for n in nums])
print(f"The sum of {nums_joined} and {num} is {total}.")
def main():
add_nums(1, 2)
add_nums(1, 2, 3, 4, 5)
add_nums(11, 12, 13, 14)
add_nums(101, 201, 301)
main()
| 22.384615
| 60
| 0.573883
|
4e2feca1d7b9f3231a38137e96b01c6af8ee06ed
| 374
|
py
|
Python
|
bin/uwin-lift-hlp/uwin_lift_hlp/dump_debug.py
|
DCNick3/uwin-remill
|
1434c3e102b781690c764fb8a21cdba3380a8b06
|
[
"Apache-2.0"
] | null | null | null |
bin/uwin-lift-hlp/uwin_lift_hlp/dump_debug.py
|
DCNick3/uwin-remill
|
1434c3e102b781690c764fb8a21cdba3380a8b06
|
[
"Apache-2.0"
] | null | null | null |
bin/uwin-lift-hlp/uwin_lift_hlp/dump_debug.py
|
DCNick3/uwin-remill
|
1434c3e102b781690c764fb8a21cdba3380a8b06
|
[
"Apache-2.0"
] | null | null | null |
from .watcom_debug_info import try_get_watcom_debug_info
import pefile
import sys
def main():
fname = sys.argv[1]
pe = pefile.PE(fname)
debug = try_get_watcom_debug_info(pe, fname)
for x in debug:
lo, hi = debug[x]
print(f"0x{lo:00000000x}{' ' if not hi else f' - 0x{hi:00000000x}'} {x}")
if __name__ == '__main__':
main()
| 23.375
| 93
| 0.622995
|
ed3b21943c2f9baae29dd5ad2e45a0e03bc0e035
| 4,284
|
py
|
Python
|
compliance/verify_submission/mlperf_submission_helper/crypto.py
|
sanyalington/mlperf_training_mitest
|
d07b360e475afb87c7da57f173952822d84ed212
|
[
"Apache-2.0"
] | 1
|
2019-02-19T09:53:42.000Z
|
2019-02-19T09:53:42.000Z
|
compliance/verify_submission/mlperf_submission_helper/crypto.py
|
sanyalington/mlperf_training_mitest
|
d07b360e475afb87c7da57f173952822d84ed212
|
[
"Apache-2.0"
] | 1
|
2018-11-06T06:03:30.000Z
|
2018-11-06T06:03:30.000Z
|
compliance/verify_submission/mlperf_submission_helper/crypto.py
|
sanyalington/mlperf_training_mitest
|
d07b360e475afb87c7da57f173952822d84ed212
|
[
"Apache-2.0"
] | 3
|
2019-01-14T13:57:03.000Z
|
2019-02-22T23:19:41.000Z
|
import fnmatch
import os
import shutil
from Cryptodome.PublicKey import RSA
from Cryptodome.Random import get_random_bytes
from Cryptodome.Cipher import AES, PKCS1_OAEP
def encrypt_file(public_key, src_file, dest_file):
try:
with open(src_file) as f:
rsa_key = RSA.import_key(open(public_key).read())
session_key = get_random_bytes(16)
# Encrypt session key
cipher_rsa = PKCS1_OAEP.new(rsa_key)
encrypted_session_key = cipher_rsa.encrypt(session_key)
# Encrypt data
cipher_aes = AES.new(session_key, AES.MODE_EAX)
ciphertext, tag = cipher_aes.encrypt_and_digest(f.read().encode("utf-8"))
except Exception as e:
print("Unable to encrypt file: {}".format(src_file))
raise e
try:
with open(dest_file, "wb") as f:
for x in (encrypted_session_key, cipher_aes.nonce, tag, ciphertext):
f.write(x)
except Exception as e:
print("Unable to write output file {}".format(dest_file))
raise e
def decrypt_file(private_key, src_file, dest_file):
try:
with open(src_file, "rb") as f:
rsa_key = RSA.import_key(open(private_key).read())
encrypted_session_key = f.read(rsa_key.size_in_bytes())
nonce = f.read(16)
tag = f.read(16)
ciphertext = f.read(-1)
# Decrypt session key
cipher_rsa = PKCS1_OAEP.new(rsa_key)
session_key = cipher_rsa.decrypt(encrypted_session_key)
# Decrypt data
cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)
data = cipher_aes.decrypt_and_verify(ciphertext, tag)
data = data.decode("utf-8")
except Exception as e:
print("Unable to decrypt file: {}".format(src_file))
raise e
try:
with open(dest_file, "w") as f:
f.write(data)
except Exception as e:
print("Unable to write output file: {}".format(dest_file))
raise e
def encrypt_submission(key, src_dir, dest_dir):
if os.path.isdir(dest_dir):
raise Exception("Output directory already exists.")
os.mkdir(dest_dir, mode=0o755)
for root, dirs, files in os.walk(src_dir):
# identify result files and encrypt, else directly copy
if fnmatch.fnmatch(root, os.path.join(src_dir, "results", "*", "*")):
for f in files:
from_file = os.path.join(root, f)
to_file = from_file.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
encrypt_file(key, from_file, to_file)
else:
for d in dirs:
from_dir = os.path.join(root, d)
to_dir = from_dir.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
os.mkdir(to_dir, mode=0o755)
for f in files:
from_file = os.path.join(root, f)
to_file = from_file.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
shutil.copyfile(from_file, to_file)
def decrypt_submission(key, src_dir, dest_dir):
if os.path.isdir(dest_dir):
raise Exception("Output directory already exists.")
os.mkdir(dest_dir, mode=0o755)
for root, dirs, files in os.walk(src_dir):
# identify result files and encrypt, else directly copy
if fnmatch.fnmatch(root, os.path.join(src_dir, "results", "*", "*")):
for f in files:
from_file = os.path.join(root, f)
to_file = from_file.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
decrypt_file(key, from_file, to_file)
else:
for d in dirs:
from_dir = os.path.join(root, d)
to_dir = from_dir.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
os.mkdir(to_dir, mode=0o755)
for f in files:
from_file = os.path.join(root, f)
to_file = from_file.replace(src_dir.rstrip(os.sep),
dest_dir.rstrip(os.sep), 1)
shutil.copyfile(from_file, to_file)
| 39.302752
| 85
| 0.582166
|
1cc546629589d38106c72d66572f36ff499a6f28
| 129,828
|
py
|
Python
|
pandas/indexes/base.py
|
RTBHOUSE/pandas
|
e27b29697f0dcf9359f01a19edb2f20c6d728b6c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | 1
|
2019-10-24T09:00:26.000Z
|
2019-10-24T09:00:26.000Z
|
pandas/indexes/base.py
|
RTBHOUSE/pandas
|
e27b29697f0dcf9359f01a19edb2f20c6d728b6c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/indexes/base.py
|
RTBHOUSE/pandas
|
e27b29697f0dcf9359f01a19edb2f20c6d728b6c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | 3
|
2019-12-24T18:46:58.000Z
|
2021-09-04T11:57:13.000Z
|
import datetime
import warnings
import operator
import numpy as np
import pandas.tslib as tslib
import pandas.lib as lib
import pandas._join as _join
import pandas.algos as _algos
import pandas.index as _index
from pandas.lib import Timestamp, Timedelta, is_datetime_array
from pandas.compat import range, u
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.types.generic import ABCSeries, ABCMultiIndex, ABCPeriodIndex
from pandas.types.missing import isnull, array_equivalent
from pandas.types.common import (_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_object_dtype,
is_categorical_dtype,
is_bool_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.types.cast import _coerce_indexer_dtype
from pandas.core.common import (is_bool_indexer,
_values_from_object,
_asarray_tuplesafe)
from pandas.core.base import (PandasObject, FrozenList, FrozenNDArray,
IndexOpsMixin)
import pandas.core.base as base
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate, deprecate_kwarg)
import pandas.core.common as com
import pandas.types.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
from pandas.formats.printing import pprint_thing
from pandas.core.ops import _comp_method_OBJECT_ARRAY
from pandas.core.strings import StringAccessorMixin
from pandas.core.config import get_option
# simplify
default_pprint = lambda x, max_seq_items=None: \
pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True,
max_seq_items=max_seq_items)
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, StringAccessorMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_arrmap = _algos.arrmap_object
_left_indexer_unique = _join.left_join_indexer_unique_object
_left_indexer = _join.left_join_indexer_object
_inner_indexer = _join.inner_join_indexer_object
_outer_indexer = _join.outer_join_indexer_object
_box_scalars = False
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_allow_index_ops = True
_allow_datetime_index_ops = False
_allow_period_index_ops = False
_is_numeric_dtype = False
_can_hold_na = True
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = _index.ObjectEngine
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, copy=copy, name=name, **kwargs)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.tseries.index import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.tseries.tdi import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
data = np.array(data, copy=copy, dtype=dtype)
elif inferred in ['floating', 'mixed-integer-float']:
# if we are actually all equal to integers
# then coerce to integer
from .numeric import Int64Index, Float64Index
try:
res = data.astype('i8')
if (res == data).all():
return Int64Index(res, copy=copy,
name=name)
except (TypeError, ValueError):
pass
# return an actual float index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError):
pass
# maybe coerce to a sub-class
from pandas.tseries.period import (PeriodIndex,
IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if issubclass(data.dtype.type, np.integer):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.floating):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = _asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
from .numeric import Int64Index
return Int64Index(subarr.astype('i8'), copy=copy,
name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'boolean':
# don't support boolean explicity ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.tseries.index import DatetimeIndex
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except tslib.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.tseries.tdi import TimedeltaIndex
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if (tupleize_cols and isinstance(data, list) and data and
isinstance(data[0], tuple)):
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
try:
# must be orderable in py3
if compat.PY3:
sorted(data)
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
except (TypeError, KeyError):
# python2 - MultiIndex fails on mixed types
pass
# other iterable of some kind
subarr = _asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if values is None and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
def get_values(self):
""" return the underlying data as an ndarray """
return self.values
# ops compat
def tolist(self):
"""
return a list of the Index values
"""
return list(self.values)
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index. Refer to `numpy.ndarray.repeat`
for more information about the `repeats` argument.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
def where(self, cond, other=None):
"""
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean same length as self
other : scalar, or array-like
"""
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
return self._shallow_copy_with_infer(values, dtype=self.dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._values.ravel(order=order)
# construction helpers
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return dict([(k, getattr(self, k, None)) for k in self._attributes])
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
return Index([item], dtype=self.dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
__copy__ = copy
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join([u("%s=%s") % (k, v) for k, v in attrs])
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatted data as a unicode string
"""
return default_pprint
def _format_data(self):
"""
Return the formatted data as a unicode string
"""
from pandas.formats.format import get_console_size, _get_adjustment
display_width, _ = get_console_size()
if display_width is None:
display_width = get_option('display.width') or 80
space1 = "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
space2 = "\n%s" % (' ' * (len(self.__class__.__name__) + 2))
n = len(self)
sep = ','
max_seq_items = get_option('display.max_seq_items') or n
formatter = self._formatter_func
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
# are we a truncated display
is_truncated = n > max_seq_items
# adj can optionaly handle unicode eastern asian width
adj = _get_adjustment()
def _extend_line(s, line, value, display_width, next_line_prefix):
if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
display_width):
s += line.rstrip()
line = next_line_prefix
line += value
return s, line
def best_len(values):
if values:
return max([adj.len(x) for x in values])
else:
return 0
if n == 0:
summary = '[], '
elif n == 1:
first = formatter(self[0])
summary = '[%s], ' % first
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[%s, %s], ' % (first, last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
else:
head = []
tail = [formatter(x) for x in self]
# adjust all values to max length if needed
if is_justify:
# however, if we are not truncated and we are only a single
# line, then don't justify
if (is_truncated or
not (len(', '.join(head)) < display_width and
len(', '.join(tail)) < display_width)):
max_len = max(best_len(head), best_len(tail))
head = [x.rjust(max_len) for x in head]
tail = [x.rjust(max_len) for x in tail]
summary = ""
line = space2
for i in range(len(head)):
word = head[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
if is_truncated:
# remove trailing space of last line
summary += line.rstrip() + space2 + '...'
line = space2
for i in range(len(tail) - 1):
word = tail[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
# last value: no sep added + 1 space of width used for trailing ','
summary, line = _extend_line(summary, line, tail[-1],
display_width - 2, space2)
summary += line
summary += '],'
if len(summary) > (display_width):
summary += space1
else: # one row
summary += ' '
# remove initial space
summary = '[' + summary[len(space2):]
return summary
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = []
attrs.append(('dtype', "'%s'" % self.dtype))
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
def to_series(self, **kwargs):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
return Series(self._to_embed(), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def to_datetime(self, dayfirst=False):
"""
DEPRECATED: use :meth:`pandas.to_datetime` instead.
For an Index containing strings or datetime.datetime objects, attempt
conversion to DatetimeIndex
"""
warnings.warn("to_datetime is deprecated. Use pd.to_datetime(...)",
FutureWarning, stacklevel=2)
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'string':
from dateutil.parser import parse
parser = lambda x: parse(x, dayfirst=dayfirst)
parsed = lib.try_parse_dates(self.values, parser=parser)
return DatetimeIndex(parsed)
else:
return DatetimeIndex(self.values)
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not lib.isscalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
if level is not None and self.nlevels == 1:
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
def reshape(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as reshaping is not
supported for Index objects and will raise an error.
Reshape an Index.
"""
raise NotImplementedError("reshaping is not supported "
"for Index objects")
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def summary(self, name=None):
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self._engine.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
return self._engine.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly(allow_setting=True)
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
return self.inferred_type in ['categorical']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
# validate / convert indexers
def _convert_scalar_indexer(self, key, kind=None):
"""
convert a scalar indexer
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
def _convert_slice_indexer(self, key, kind=None):
"""
convert a slice indexer. disallow floats in the start/stop/step
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_list_indexer(self, keyarr, kind=None):
"""
passed a key that is tuplesafe that is integer based
and we have a mixed index (e.g. number/labels). figure out
the indexer. return None if we can't help
"""
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not isinstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
# missing values are flagged as -1 by get_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = len(self)
from pandas.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
kind=type(key)))
def get_duplicates(self):
from collections import defaultdict
counter = defaultdict(lambda: 0)
for k in self.values:
counter[k] += 1
return sorted(k for k, v in compat.iteritems(counter) if v > 1)
_get_duplicates = get_duplicates
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self._values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __iter__(self):
return iter(self.values)
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
def __contains__(self, key):
hash(key)
# work around some kind of odd cython bug
try:
return key in self._engine
except TypeError:
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if is_scalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if is_bool_indexer(key):
key = np.asarray(key)
key = _values_from_object(key)
result = getitem(key)
if not is_scalar(result):
return promote(result)
else:
return result
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError('all inputs must be Index')
names = set([obj.name for obj in to_concat])
name = None if len(names) > 1 else self.name
if self.is_categorical():
# if calling index is category, don't check dtype of others
from pandas.indexes.category import CategoricalIndex
return CategoricalIndex._append_same_dtype(self, to_concat, name)
typs = _concat.get_dtype_kinds(to_concat)
if len(typs) == 1:
return self._append_same_dtype(to_concat, name=name)
return _concat._concat_index_asobject(to_concat, name=name)
def _append_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
# must be overrided in specific classes
return _concat._concat_index_asobject(to_concat, name)
_index_shared_docs['take'] = """
return a new %(klass)s of the values selected by the indices
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : bool, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError
See also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs['take'])
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(self.values, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=self._na_value)
else:
if allow_fill and fill_value is not None:
msg = 'Unable to fill values because {0} cannot contain NA'
raise ValueError(msg.format(self.__class__.__name__))
taken = self.values.take(indices)
return self._shallow_copy(taken)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
""" Internal method to handle NA filling of take """
indices = _ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = values.take(indices)
mask = indices == -1
if mask.any():
taken[mask] = na_value
else:
taken = values.take(indices)
return taken
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._can_hold_na:
return isnull(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._isnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
if self._can_hold_na:
return self._isnan.any()
else:
return False
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shallow_copy(values)
except (ValueError, TypeError):
# coerces to object
return self.astype(object).putmask(mask, value)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.formats.format import format_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isnull(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
""" slice and dice then format """
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
mask = isnull(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self) and not is_object_dtype(other):
# if other is not object, use other's logic for coercion
return other.equals(self)
try:
return array_equivalent(_values_from_object(self),
_values_from_object(other))
except:
return False
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
get_loc : asof is a thin wrapper around get_loc with method='pad'
"""
try:
loc = self.get_loc(label, method='pad')
except KeyError:
return _get_na_value(self.dtype)
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def order(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
DEPRECATED: use :meth:`Index.sort_values`
"""
warnings.warn("order is deprecated, use sort_values(...)",
FutureWarning, stacklevel=2)
return self.sort_values(return_indexer=return_indexer,
ascending=ascending)
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_values instead")
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
sorted_index : Index
"""
return self.sort_values(return_indexer=True, ascending=ascending)
def shift(self, periods=1, freq=None):
"""
Shift Index containing datetime objects by input number of periods and
DateOffset
Returns
-------
shifted : Index
"""
raise NotImplementedError("Not supported for type %s" %
type(self).__name__)
def argsort(self, *args, **kwargs):
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
return Index(np.array(self) + other)
def __radd__(self, other):
return Index(other + np.array(self))
__iadd__ = __add__
def __sub__(self, other):
raise TypeError("cannot perform __sub__ with this index type: "
"{typ}".format(typ=type(self)))
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def _get_consensus_name(self, other):
"""
Given 2 indexes, give a consensus name meaning
we take the not None one, or None if the names differ.
Return a new object if we are resetting the name
"""
if self.name != other.name:
if self.name is None or other.name is None:
name = self.name or other.name
else:
name = None
if self.name != name:
return self._shallow_copy(name=name)
return self
def union(self, other):
"""
Form the union of two Index objects and sorts if possible.
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if len(other) == 0 or self.equals(other):
return self._get_consensus_name(other)
if len(self) == 0:
return other._get_consensus_name(self)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.union(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(self._values, other._values)[0]
except TypeError:
# incomparable objects
result = list(self._values)
# worth making this faster? a very unusual case
value_set = set(self._values)
result.extend([x for x in other._values if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = algos.take_nd(other._values, indexer,
allow_fill=False)
result = _concat._concat_compat((self._values, other_diff))
try:
self._values[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = self._values
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Sortedness of the result is not guaranteed.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self._get_consensus_name(other)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(self._values, other._values)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = Index(self._values).get_indexer(other._values)
indexer = indexer.take((indexer != -1).nonzero()[0])
except:
# duplicates
indexer = Index(self._values).get_indexer_non_unique(
other._values)[0].unique()
indexer = indexer[indexer != -1]
taken = self.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
Returns
-------
difference : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
"""
self._assert_can_do_setop(other)
if self.equals(other):
return Index([], name=self.name)
other, result_name = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
the_diff = this.values.take(label_diff)
try:
the_diff = algos.safe_sort(the_diff)
except TypeError:
pass
return this._shallow_copy(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
Compute the symmetric difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
result_name : str
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
this = self._get_unique_index()
other = other._get_unique_index()
indexer = this.get_indexer(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,
assume_unique=True)
left_diff = this.values.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.values.take(right_indexer)
the_diff = _concat._concat_compat([left_diff, right_diff])
try:
the_diff = algos.safe_sort(the_diff)
except TypeError:
pass
attribs = self._get_attributes_dict()
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
return self._shallow_copy_with_infer(the_diff, **attribs)
sym_diff = deprecate('sym_diff', symmetric_difference)
def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropna:
return self
values = self.values
if not self.is_unique:
values = self.unique()
if dropna:
try:
if self.hasnans:
values = values[~isnull(values)]
except NotImplementedError:
pass
return self._shallow_copy(values)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
.. versionadded:: 0.17.0
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
if method is None:
if tolerance is not None:
raise ValueError('tolerance argument only valid if using pad, '
'backfill or nearest lookups')
key = _values_from_object(key)
try:
return self._engine.get_loc(key)
except KeyError:
return self._engine.get_loc(self._maybe_cast_indexer(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
s = getattr(series, '_values', None)
if isinstance(s, Index) and is_scalar(key):
try:
return s[key]
except (IndexError, ValueError):
# invalid type as an indexer
pass
s = _values_from_object(series)
k = _values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k,
tz=getattr(series.dtype, 'tz', None))
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
raise
try:
return tslib.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(_values_from_object(arr),
_values_from_object(key), value)
def get_level_values(self, level):
"""
Return vector of label values for requested level, equal to the length
of the index
Parameters
----------
level : int
Returns
-------
values : ndarray
"""
# checks that level number is actually just 1
self._validate_index_level(level)
return self
def get_indexer(self, target, method=None, limit=None, tolerance=None):
"""
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : Index
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
.. versionadded:: 0.17.0
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
tolerance=tolerance)
if not is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == 'nearest':
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if tolerance is not None:
raise ValueError('tolerance argument only valid if doing pad, '
'backfill or nearest reindexing')
if limit is not None:
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._values)
return _ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance):
# override this method on subclasses
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad' else
self._engine.get_backfill_indexer)
indexer = method(target._values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method,
limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target._values, indexer,
tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
target = np.asarray(target)
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
right_indexer = self.get_indexer(target, 'backfill', limit=limit)
target = np.asarray(target)
left_distances = abs(self.values[left_indexer] - target)
right_distances = abs(self.values[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances) |
(right_indexer == -1), left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer,
tolerance)
return indexer
def _filter_indexer_tolerance(self, target, indexer, tolerance):
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
def get_indexer_non_unique(self, target):
""" return an indexer suitable for taking from a non unique index
return the labels in the same order as the target, and
return a missing indexer into the target (missing are marked as -1
in the indexer); target must be an iterable """
target = _ensure_index(target)
pself, ptarget = self._possibly_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target._values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return Index(indexer), missing
def get_indexer_for(self, target, **kwargs):
""" guaranteed return of an indexer even when non-unique """
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
def _possibly_promote(self, other):
# A hack, but it works
from pandas.tseries.index import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if not is_object_dtype(self.dtype):
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, values):
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
from .multi import MultiIndex
if isinstance(values, MultiIndex):
values = values.values
values = _ensure_categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in compat.iteritems(result)}
return result
def map(self, mapper):
"""Apply mapper function to an index.
Parameters
----------
mapper : callable
Function to be applied.
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
from .multi import MultiIndex
mapped_values = self._arrmap(self.values, mapper)
attributes = self._get_attributes_dict()
if mapped_values.size and isinstance(mapped_values[0], tuple):
return MultiIndex.from_tuples(mapped_values,
names=attributes.get('name'))
attributes['copy'] = False
return Index(mapped_values, **attributes)
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or list-like
Sought values.
.. versionadded:: 0.18.1
Support for values as a set
level : str or int, optional
Name or position of the index level to use (if the index is a
MultiIndex).
Notes
-----
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(np.array(self), values)
def _can_reindex(self, indexer):
"""
*this is an internal non-public method*
Check if we are allowing reindexing with this particular indexer
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(None, dtype=self.dtype, **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def _reindex_non_unique(self, target):
"""
*this is an internal non-public method*
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
target = _ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
l = np.arange(len(indexer))
missing = _ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = _ensure_int64(l[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = _ensure_int64(l[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer = indexer.values
indexer[~check] = 0
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer
def join(self, other, how='left', level=None, return_indexers=False):
"""
*this is an internal non-public method*
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
Returns
-------
join_index, (left_indexer, right_indexer)
"""
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if level is None and (self_is_mi or other_is_mi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how,
return_indexers=return_indexers)
# join on the level
if level is not None and (self_is_mi or other_is_mi):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how, return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = [n for n in self.names if n is not None]
other_names = [n for n in other.names if n is not None]
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no "
"overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level "
"overlap on a multi-index is not "
"implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
# flip if join method is right or left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not "
"implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.tools.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self.values],
[other._values], how=how,
sort=True)
left_idx = _ensure_platform_int(left_idx)
right_idx = _ensure_platform_int(right_idx)
join_index = self.values.take(left_idx)
mask = left_idx == -1
np.putmask(join_index, mask, other._values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left', return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from pandas.algos import groupsort_indexer
from .multi import MultiIndex
def _get_leaf_sorter(labels):
"""
returns sorter for the inner most level while preserving the
order of higher levels
"""
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = _ensure_int64(labels[0])
sorter, _ = groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of begining of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = _ensure_int64(labels[-1])
return lib.get_level_sorter(lab, _ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = _ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = algos.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = groupsort_indexer(new_lev_labels,
ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels, labels=new_labels,
names=left.names, verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = algos.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
left_indexer = (None if left_indexer is None
else _ensure_platform_int(left_indexer))
right_indexer = (None if right_indexer is None
else _ensure_platform_int(right_indexer))
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self._values
ov = other._values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
lidx = None if lidx is None else _ensure_platform_int(lidx)
ridx = None if ridx is None else _ensure_platform_int(ridx)
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered Index, compute the slice indexer for input labels and
step
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
kind : string, default None
Returns
-------
indexer : ndarray or slice
Notes
-----
This function assumes that the data is sorted, so use at your own peril
"""
start_slice, end_slice = self.slice_locs(start, end, step=step,
kind=kind)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index
then try to cast to an int if equivalent
"""
if is_float(key) and not self.is_floating():
try:
ckey = int(key)
if ckey == key:
key = ckey
except (ValueError, TypeError):
pass
return key
def _validate_indexer(self, form, key, kind):
"""
if we are positional indexer
validate that we have appropriate typed bounds
must be an integer
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc']
if key is None:
pass
elif is_integer(key):
pass
elif kind in ['iloc', 'getitem']:
self._invalid_indexer(form, key)
return key
def _maybe_cast_slice_bound(self, label, side, kind):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem', None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
if not (kind in ['ix'] and (self.holds_integer() or
self.is_floating())):
self._invalid_indexer('slice', label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
elif is_integer(label):
self._invalid_indexer('slice', label)
return label
def _searchsorted_monotonic(self, label, side='left'):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(label, side='right' if side == 'left'
else 'right')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ['ix', 'loc', 'getitem', None]
if side not in ('left', 'right'):
raise ValueError("Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" %
(side, ))
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side, kind)
# we need to look up the label
try:
slc = self.get_loc(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))
if isinstance(slc, np.ndarray):
raise KeyError("Cannot get %s slice bound for non-unique "
"label: %r" % (side, original_label))
if isinstance(slc, slice):
if side == 'left':
return slc.start
else:
return slc.stop
else:
if side == 'right':
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right', kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._shallow_copy(np.delete(self._data, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
_self = np.asarray(self)
item = self._coerce_scalar_to_index(item)._values
idx = np.concatenate((_self[:loc], item, _self[loc:]))
return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors='raise'):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
"""
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise ValueError('labels %s not contained in axis' %
labels[mask])
indexer = indexer[~mask]
return self.delete(indexer)
@Appender(base._shared_docs['unique'] % _index_doc_kwargs)
def unique(self):
result = super(Index, self).unique()
return self._shallow_copy(result)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@Appender(base._shared_docs['drop_duplicates'] % _index_doc_kwargs)
def drop_duplicates(self, keep='first'):
return super(Index, self).drop_duplicates(keep=keep)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last',
False: 'first'})
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
return super(Index, self).duplicated(keep=keep)
_index_shared_docs['fillna'] = """
Fill NA/NaN values with the specified value
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
Returns
-------
filled : %(klass)s
"""
@Appender(_index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
self._assert_can_do_op(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if
return Index(result, name=self.name)
return self._shallow_copy()
_index_shared_docs['dropna'] = """
Return Index without NA/NaN values
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
valid : Index
"""
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
if how not in ('any', 'all'):
raise ValueError("invalid how option: {0}".format(how))
if self.hasnans:
return self._shallow_copy(self.values[~self._isnan])
return self._shallow_copy()
def _evaluate_with_timedelta_like(self, other, op, opstr):
raise TypeError("can only perform ops with timedelta like values")
def _evaluate_with_datetime_like(self, other, op, opstr):
raise TypeError("can only perform ops with datetime like values")
def _evalute_compare(self, op):
raise base.AbstractMethodError(self)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
def _evaluate_compare(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = _comp_method_OBJECT_ARRAY(
op, self.values, other)
else:
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
return _evaluate_compare
cls.__eq__ = _make_compare(operator.eq)
cls.__ne__ = _make_compare(operator.ne)
cls.__lt__ = _make_compare(operator.lt)
cls.__gt__ = _make_compare(operator.gt)
cls.__le__ = _make_compare(operator.le)
cls.__ge__ = _make_compare(operator.ge)
@classmethod
def _add_numeric_methods_add_sub_disabled(cls):
""" add in the numeric add/sub methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.__add__ = cls.__radd__ = __iadd__ = _make_invalid_op('__add__') # noqa
cls.__sub__ = __isub__ = _make_invalid_op('__sub__') # noqa
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable other than add/sub """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.__pow__ = cls.__rpow__ = _make_invalid_op('__pow__')
cls.__mul__ = cls.__rmul__ = _make_invalid_op('__mul__')
cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('__floordiv__')
cls.__truediv__ = cls.__rtruediv__ = _make_invalid_op('__truediv__')
if not compat.PY3:
cls.__div__ = cls.__rdiv__ = _make_invalid_op('__div__')
cls.__neg__ = _make_invalid_op('__neg__')
cls.__pos__ = _make_invalid_op('__pos__')
cls.__abs__ = _make_invalid_op('__abs__')
cls.__inv__ = _make_invalid_op('__inv__')
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
return attrs
def _validate_for_numeric_unaryop(self, op, opstr):
""" validate if we can perform a numeric unary operation """
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op "
"{opstr} for type: {typ}".format(
opstr=opstr,
typ=type(self))
)
def _validate_for_numeric_binop(self, other, op, opstr):
"""
return valid other, evaluate or raise TypeError
if we are not of the appropriate type
internal method called by ops
"""
from pandas.tseries.offsets import DateOffset
# if we are an inheritor of numeric,
# but not actually numeric (e.g. DatetimeIndex/PeriodInde)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} "
"for type: {typ}".format(
opstr=opstr,
typ=type(self))
)
if isinstance(other, Index):
if not other._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op "
"{opstr} with type: {typ}".format(
opstr=type(self),
typ=type(other))
)
elif isinstance(other, np.ndarray) and not other.ndim:
other = other.item()
if isinstance(other, (Index, ABCSeries, np.ndarray)):
if len(self) != len(other):
raise ValueError("cannot evaluate a numeric op with "
"unequal lengths")
other = _values_from_object(other)
if other.dtype.kind not in ['f', 'i']:
raise TypeError("cannot evaluate a numeric op "
"with a non-numeric dtype")
elif isinstance(other, (DateOffset, np.timedelta64,
Timedelta, datetime.timedelta)):
# higher up to handle
pass
elif isinstance(other, (Timestamp, np.datetime64)):
# higher up to handle
pass
else:
if not (is_float(other) or is_integer(other)):
raise TypeError("can only perform ops with scalar values")
return other
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods """
def _make_evaluate_binop(op, opstr, reversed=False, constructor=Index):
def _evaluate_numeric_binop(self, other):
from pandas.tseries.offsets import DateOffset
other = self._validate_for_numeric_binop(other, op, opstr)
# handle time-based others
if isinstance(other, (DateOffset, np.timedelta64,
Timedelta, datetime.timedelta)):
return self._evaluate_with_timedelta_like(other, op, opstr)
elif isinstance(other, (Timestamp, np.datetime64)):
return self._evaluate_with_datetime_like(other, op, opstr)
# if we are a reversed non-communative op
values = self.values
if reversed:
values, other = other, values
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
with np.errstate(all='ignore'):
result = op(values, other)
return constructor(result, **attrs)
return _evaluate_numeric_binop
cls.__add__ = cls.__radd__ = _make_evaluate_binop(
operator.add, '__add__')
cls.__sub__ = _make_evaluate_binop(
operator.sub, '__sub__')
cls.__rsub__ = _make_evaluate_binop(
operator.sub, '__sub__', reversed=True)
cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(
operator.mul, '__mul__')
cls.__pow__ = cls.__rpow__ = _make_evaluate_binop(
operator.pow, '__pow__')
cls.__mod__ = _make_evaluate_binop(
operator.mod, '__mod__')
cls.__floordiv__ = _make_evaluate_binop(
operator.floordiv, '__floordiv__')
cls.__rfloordiv__ = _make_evaluate_binop(
operator.floordiv, '__floordiv__', reversed=True)
cls.__truediv__ = _make_evaluate_binop(
operator.truediv, '__truediv__')
cls.__rtruediv__ = _make_evaluate_binop(
operator.truediv, '__truediv__', reversed=True)
if not compat.PY3:
cls.__div__ = _make_evaluate_binop(
operator.div, '__div__')
cls.__rdiv__ = _make_evaluate_binop(
operator.div, '__div__', reversed=True)
cls.__divmod__ = _make_evaluate_binop(
divmod,
'__divmod__',
constructor=lambda result, **attrs: (
Index(result[0], **attrs),
Index(result[1], **attrs),
),
)
@classmethod
def _add_numeric_methods_unary(cls):
""" add in numeric unary methods """
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
self._validate_for_numeric_unaryop(op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(op(self.values), **attrs)
return _evaluate_numeric_unary
cls.__neg__ = _make_evaluate_unary(lambda x: -x, '__neg__')
cls.__pos__ = _make_evaluate_unary(lambda x: x, '__pos__')
cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__')
cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__')
@classmethod
def _add_numeric_methods(cls):
cls._add_numeric_methods_unary()
cls._add_numeric_methods_binary()
@classmethod
def _add_logical_methods(cls):
""" add in logical methods """
_doc = """
%(desc)s
Parameters
----------
All arguments to numpy.%(outname)s are accepted.
Returns
-------
%(outname)s : bool or array_like (if axis is specified)
A single element array_like may be converted to bool."""
def _make_logical_function(name, desc, f):
@Substitution(outname=name, desc=desc)
@Appender(_doc)
def logical_func(self, *args, **kwargs):
result = f(self.values)
if (isinstance(result, (np.ndarray, ABCSeries, Index)) and
result.ndim == 0):
# return NumPy type
return result.dtype.type(result.item())
else: # pragma: no cover
return result
logical_func.__name__ = name
return logical_func
cls.all = _make_logical_function('all', 'Return whether all elements '
'are True',
np.all)
cls.any = _make_logical_function('any',
'Return whether any element is True',
np.any)
@classmethod
def _add_logical_methods_disabled(cls):
""" add in logical methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.all = _make_invalid_op('all')
cls.any = _make_invalid_op('any')
Index._add_numeric_methods_disabled()
Index._add_logical_methods()
Index._add_comparison_methods()
def _ensure_index(index_like, copy=False):
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, 'name'):
return Index(index_like, name=index_like.name, copy=copy)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
# 2200 ?
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
from .multi import MultiIndex
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
def _get_na_value(dtype):
return {np.datetime64: tslib.NaT,
np.timedelta64: tslib.NaT}.get(dtype, np.nan)
def _ensure_frozen(array_like, categories, copy=False):
array_like = _coerce_indexer_dtype(array_like, categories)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
return array_like
def _ensure_has_len(seq):
"""If seq is an iterator, put its values into a list."""
try:
len(seq)
except TypeError:
return list(seq)
else:
return seq
def _trim_front(strings):
"""
Trims zeros and decimal points
"""
trimmed = strings
while len(strings) > 0 and all([x[0] == ' ' for x in trimmed]):
trimmed = [x[1:] for x in trimmed]
return trimmed
def _validate_join_method(method):
if method not in ['left', 'right', 'inner', 'outer']:
raise ValueError('do not recognize join method %s' % method)
| 34.937567
| 83
| 0.551068
|
03173d8098eebf2d45f338127756a5883be03511
| 1,040
|
py
|
Python
|
datadog_checks_base/datadog_checks/base/utils/prometheus/functions.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 663
|
2016-08-23T05:23:45.000Z
|
2022-03-29T00:37:23.000Z
|
datadog_checks_base/datadog_checks/base/utils/prometheus/functions.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 6,642
|
2016-06-09T16:29:20.000Z
|
2022-03-31T22:24:09.000Z
|
datadog_checks_base/datadog_checks/base/utils/prometheus/functions.py
|
mchelen-gov/integrations-core
|
81281600b3cc7025a7a32148c59620c9592a564f
|
[
"BSD-3-Clause"
] | 1,222
|
2017-01-27T15:51:38.000Z
|
2022-03-31T18:17:51.000Z
|
# (C) Datadog, Inc. 2016-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from google.protobuf.internal.decoder import _DecodeVarint32 # pylint: disable=E0611,E0401
from . import metrics_pb2
# Deprecated, please use the PrometheusCheck class
def parse_metric_family(buf):
"""
Parse the binary buffer in input, searching for Prometheus messages
of type MetricFamily [0] delimited by a varint32 [1].
[0] https://github.com/prometheus/client_model/blob/086fe7ca28bde6cec2acd5223423c1475a362858/metrics.proto#L76-%20%20L81 # noqa: E501
[1] https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractMessageLite#writeDelimitedTo(java.io.OutputStream) # noqa: E501
"""
n = 0
while n < len(buf):
msg_len, new_pos = _DecodeVarint32(buf, n)
n = new_pos
msg_buf = buf[n : n + msg_len]
n += msg_len
message = metrics_pb2.MetricFamily()
message.ParseFromString(msg_buf)
yield message
| 35.862069
| 167
| 0.714423
|
c6f9855ea24bab4a6fe1a0d931a21d7371e5fa5d
| 4,027
|
py
|
Python
|
src/evaluate.py
|
Fantoni0/RingCTPerformance
|
84fc27f052919625a0db4d905614b52f693e59a3
|
[
"MIT"
] | 2
|
2020-11-14T05:33:06.000Z
|
2021-01-15T10:31:21.000Z
|
src/evaluate.py
|
Fantoni0/RingCTPerformance
|
84fc27f052919625a0db4d905614b52f693e59a3
|
[
"MIT"
] | null | null | null |
src/evaluate.py
|
Fantoni0/RingCTPerformance
|
84fc27f052919625a0db4d905614b52f693e59a3
|
[
"MIT"
] | 1
|
2021-08-12T18:23:16.000Z
|
2021-08-12T18:23:16.000Z
|
# Standard library imports
import secrets
from timeit import default_timer as timer
import os
# Custom imports
from src.ringCT import sign, verify
from src.utils import utils
from src.plot import plot
# Third party library
from joblib import Parallel, delayed
def auxSign(index, parameters, signatures):
"""
Auxiliary function to parallelize the generation of ring signatures.
:param index: Index used to access signatures.
:param parameters: Parameters of the ring signature.
:param signatures: List of crafted signatures. Data structure used to store generated signatures.
:return:
"""
# Create and store signature
signatures[index] = sign(parameters[0], # List of public keys
parameters[1], # Signer index
parameters[2], # Signer's private key
parameters[3], # Message to sign
parameters[4]) # Curve used
return signatures
def auxVerify(index, parameters, signatures, used_keys):
"""
Auxiliary function to parallelize the verification of ring signatures.
:param index: Index used to access signatures.
:param parameters: Parameters of the ring signature.
:param signatures: List of signatures to verify. Data structure used to store generated signatures.
:param used_keys: List of already used keys.
:return: {True, False}
"""
return verify(signatures[index][0], # List of public keys
signatures[index][1], # Key image
signatures[index][2], # Seed
signatures[index][3], # List of random numbers
used_keys,
parameters[3], # Message
parameters[4]) # Curve used
def evaluate(args):
"""
Evaluates the performance of ring signatures (sign and verify algorithms) under
the different parameters provided in args.
:param args: Object containing the parameters such as ring size, curves to evaluate
:return:
"""
total_stimes = []
total_vtimes = []
num_cpus = os.cpu_count()
for c in args.curves:
# Define the used keys
max_size = max(args.ringsizes)
pair_keys = [utils.generateKeyPair(c) for _ in range(max_size)]
public_keys = [pair_keys[i][1] for i in range(len(pair_keys))]
private_keys = [pair_keys[i][0] for i in range(len(pair_keys))]
used_keys = []
stimes = []
vtimes = []
for rs in args.ringsizes:
keys = public_keys[:rs]
signer = secrets.randbelow(rs)
# Simulate signatures and verifications
it = 64 # Number of signatures crafted/verified in parallel
parameters = keys, signer, private_keys[signer], args.message, c
signatures = [None for _ in range(it)]
# Sign
t0 = timer()
signatures = Parallel(n_jobs=num_cpus)(delayed(auxSign)(i, parameters, signatures) for i in range(it))
sign_time = timer() - t0
stimes.append(sign_time / it)
# Each parallel job returns a different list.
# We get a matrix with elements in the diagonal.
# We apply list comprehension to get a single non-empty list.
signatures = [signatures[i][i] for i in range(it)]
# Verify
t0 = timer()
Parallel(n_jobs=num_cpus)(delayed(auxVerify)(i, parameters, signatures, used_keys) for i in range(it))
verify_time = timer() - t0
vtimes.append(verify_time / it)
total_stimes.append(stimes)
total_vtimes.append(vtimes)
# Plot signing times
plot(args.ringsizes, 'Ring size', total_stimes, 'Time in seconds',
args.curves, 'Time to craft a signature', 'graph', save_csv=True)
# Plot verification times
plot(args.ringsizes, 'Ring size', total_vtimes, 'Time in seconds',
args.curves, 'Time to verify a signature', 'graph', save_csv=True)
| 38.721154
| 114
| 0.625528
|
a74f41b8c63e9716f46430fe18d6b543d0682cb3
| 8,258
|
py
|
Python
|
device/app.py
|
panjanek/IotCenter
|
e139617d14617c10a18c35515e2d3aaae797bcac
|
[
"MIT"
] | 2
|
2016-12-12T15:16:16.000Z
|
2018-10-30T02:35:36.000Z
|
device/app.py
|
panjanek/IotCenter
|
e139617d14617c10a18c35515e2d3aaae797bcac
|
[
"MIT"
] | null | null | null |
device/app.py
|
panjanek/IotCenter
|
e139617d14617c10a18c35515e2d3aaae797bcac
|
[
"MIT"
] | null | null | null |
import logging
import threading
import json
import base64
import os
from subprocess import Popen
import glob
import time
import urllib2
import re
import string
import datetime
class DeviceHandler:
logger = logging.getLogger()
def __init__(self, config):
self.service = None
self.tunnel = None
self.video = None
self.config = config
self.first = True
self.counter = 1;
self.uploadfile = '/tmp/upload.txt'
def start(self):
self.logger.info("starting device handler")
def getMessagePayload(self):
self.logger.debug("Preparing client->device message payload")
gputemp = os.popen("vcgencmd measure_temp").readline().replace("temp=","").replace("'C","")
cputemp = os.popen("cat /sys/class/thermal/thermal_zone0/temp").readline()
payloadDict = {"values":{}}
payloadDict["mid"] = self.counter
self.counter += 1
payloadDict["values"]["status"] = 1
payloadDict["values"]["gpu_temp"] = float(gputemp)
payloadDict["values"]["cpu_temp"] = float(cputemp) / 1000
log = self.getLogToUpload()
if log is not None:
payloadDict["log"] = log
payload = json.dumps(payloadDict)
return payload
def getLogToUpload(self):
log = None
if self.first:
self.first = False
with open(self.uploadfile, "a") as upfile:
upfile.write("First message, communucation started\n")
uploadfiletmp = self.uploadfile + ".tmp"
if os.path.exists(self.uploadfile) and os.path.getsize(self.uploadfile) > 0:
with open(self.uploadfile, 'r+') as upfile:
content = upfile.read()
upfile.truncate(0)
self.logger.info("found log data to upload: {0}, moving to {1}".format(content, uploadfiletmp))
with open(uploadfiletmp, "a") as tmpfile:
tmpfile.write(content)
if os.path.exists(uploadfiletmp) and os.path.getsize(uploadfiletmp) > 0:
with open(uploadfiletmp, 'r') as tmpfile:
toupload = tmpfile.read()
log = toupload
return log
def handleServerCall(self, payload):
self.logger.info("Handling server callback with payload {0}".format(payload))
payloadDict = json.loads(payload)
if "ack" in payloadDict:
mid = payloadDict["ack"]
self.logger.info("received ack for mid {0}".format(mid))
uploadfiletmp = self.uploadfile + ".tmp"
if mid == self.counter - 1 and os.path.exists(uploadfiletmp) and os.path.getsize(uploadfiletmp) > 0:
self.logger.info("Removing file {0}".format(uploadfiletmp))
os.remove(uploadfiletmp)
if "command" in payloadDict:
command = payloadDict["command"]
self.logger.info("Received command: {0}".format(command))
if command == "blink":
self.logger.info("Blinking status LED")
os.system("echo none | sudo tee /sys/class/leds/led0/trigger")
os.system("echo 1 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 0 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 1 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 0 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 1 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 0 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 1 | sudo tee /sys/class/leds/led0/brightness")
elif command == "reboot":
self.logger.info("REBOOT!!!")
os.system("sudo reboot")
elif command == "photo":
quality = payloadDict.get("quality", "sd")
self.logger.info("Taking {0} photo".format(quality))
photoFile = "/tmp/snapshot_{0}.jpg".format(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
if quality == "hd":
os.system("raspistill -hf -t 1000 -o {0}".format(photoFile))
else:
os.system("raspistill -hf -t 1000 -w 640 -h 480 -o {0}".format(photoFile))
with open(photoFile, mode='rb') as file:
photoData = file.read()
base64data = base64.b64encode(photoData)
self.service.sendMessage(json.dumps({'image':base64data, 'type':'jpg'}))
elif command == "relay":
state = payloadDict.get("state", 1)
self.logger.info("Changing relay state to: {0}".format(state))
os.system("curl {0}/?relay={1}".format(relay1_addr, state))
elif command == "light":
state = payloadDict.get("state", 1)
self.logger.info("Changing light state to: {0}".format(state))
if state == 0:
led_rgb(0,0,0)
else:
led_rgb(1,1,0)
elif command == "tunnel":
if self.tunnel:
self.logger.warning("Tunnel already active - ingoring command")
else:
remotePort = payloadDict.get("remotePort", 18888)
localPort = payloadDict.get("localPort", 22)
addr = payloadDict["addr"]
self.startTunnel(remotePort, localPort, addr)
elif command == "video":
if self.tunnel:
self.logger.warning("Tunnel already active - ingoring command")
else:
port = payloadDict.get("port", 8081)
addr = payloadDict["addr"]
self.startVideo(port, addr)
elif command == "tunnel-close":
if self.tunnel:
self.logger.info("terminating tunnel process")
self.tunnel.kill()
self.tunnel = None
else:
self.logger.warning("no tunnel process active, ignoring command")
if self.video:
self.logger.info("terminating video process")
self.video.kill()
self.video = None
else:
self.logger.info("Command '{0}' unknown".format(command))
def startTunnel(self, remotePort, localPort, addr):
sshPrivateKeyFile = self.config.get('client', 'sshPrivateKeyFile')
self.logger.info("Opening SSH tunneling session for remotePort={0}, localPort={1}, addr={2} using privateKey={3}".format(remotePort, localPort, addr, sshPrivateKeyFile))
cmd = "/usr/bin/ssh -o BatchMode=yes -o StrictHostKeyChecking=no -i {0} -N -R {1}:localhost:{2} {3}".format(sshPrivateKeyFile, remotePort, localPort, addr)
self.logger.info("Starting process: {0}".format(cmd))
self.tunnel = Popen(cmd.split())
self.logger.info("SSH tunneling process started")
def startVideo(self, port, addr):
sshPrivateKeyFile = self.config.get('client', 'sshPrivateKeyFile')
self.logger.info("Starting video streaming session")
self.logger.info("loading driver bcm2835-v4l2")
os.system("sudo modprobe bcm2835-v4l2")
time.sleep(0.5)
cmdVideo = "sudo motion"
self.logger.info("Starting processes: {0}".format(cmdVideo))
self.video = Popen(cmdVideo.split())
cmdTunnel = "sudo /usr/bin/ssh -o BatchMode=yes -o StrictHostKeyChecking=no -i {0} -N -R {1}:localhost:8081 {2}".format(sshPrivateKeyFile, port, addr)
self.logger.info("Starting processes: {0}".format(cmdTunnel))
self.tunnel = Popen(cmdTunnel.split())
self.logger.info("SSH video tunneling session started")
| 47.45977
| 177
| 0.552434
|
cba18e98a43145e22cc372bdce693139ef160ef3
| 6,333
|
py
|
Python
|
auto_ts/utils/etl.py
|
ahmedgu1/Auto_TS
|
fd40bb6c47e6079b8c9974662e0baea11edf09fa
|
[
"Apache-2.0"
] | 423
|
2020-05-11T10:47:49.000Z
|
2022-03-30T14:14:20.000Z
|
auto_ts/utils/etl.py
|
Moehrenbaum/Auto_TS
|
e0a6634a727e44b4d5bbf6fbfefde99b6b3e8f86
|
[
"Apache-2.0"
] | 70
|
2020-06-05T13:38:49.000Z
|
2022-03-17T11:42:25.000Z
|
auto_ts/utils/etl.py
|
Moehrenbaum/Auto_TS
|
e0a6634a727e44b4d5bbf6fbfefde99b6b3e8f86
|
[
"Apache-2.0"
] | 75
|
2020-02-16T00:55:20.000Z
|
2022-03-22T03:55:09.000Z
|
from typing import List
import numpy as np
import pandas as pd # type: ignore
import copy
import pdb
from sklearn.model_selection import TimeSeriesSplit # type: ignore
import dask
import dask.dataframe as dd
##### This function loads a time series data and sets the index as a time series
def load_ts_data(filename, ts_column, sep, target):
"""
This function loads a given filename into a pandas dataframe and sets the
ts_column as a Time Series index. Note that filename should contain the full
path to the file.
"""
if isinstance(filename, str):
print('First loading %s and then setting %s as date time index...' % (filename, ts_column))
try:
dft = pd.read_csv(filename,index_col=ts_column, parse_dates=True)
print(' Loaded %s into pandas dataframe. Dask dataframe type not working for this file...' % filename)
except:
dft = dd.read_csv(filename, blocksize=100e6)
print(' Too big to fit into pandas. Hence loaded file %s into a Dask dataframe ...' % filename)
else:
### If filename is not a string, it must be a dataframe and can be loaded
if filename.shape[0] < 100000:
dft = copy.deepcopy(filename)
print(' Loaded pandas dataframe...')
else:
dft = dd.from_pandas(filename, npartitions=1)
print(' Converted pandas dataframe into a Dask dataframe ...' )
#### Now check if DFT has an index. If not, set one ############
if type(dft.index) == pd.DatetimeIndex:
return dft
elif dft.index.dtype == '<M8[ns]':
return dft
else:
try:
if type(dft) == dask.dataframe.core.DataFrame:
dft.index = dd.to_datetime(dft[ts_column])
dft = dft.drop(ts_column, axis=1)
else:
dft.index = pd.to_datetime(dft.pop(ts_column))
preds = [x for x in list(dft) if x not in [target]]
dft = dft[[target]+preds]
except Exception as e:
print(e)
print('Error: Could not convert Time Series column to an index. Please check your input and try again')
return ''
return dft
def time_series_split(ts_df):
"""
This utility splits any dataframe sent as a time series split using the sklearn function.
"""
tscv = TimeSeriesSplit(n_splits=2)
train_index, test_index = list(tscv.split(ts_df))[1][0], list(tscv.split(ts_df))[1][1]
ts_train, ts_test = ts_df[ts_df.index.isin(train_index)], ts_df[
ts_df.index.isin(test_index)]
print(ts_train.shape, ts_test.shape)
return ts_train, ts_test
def convert_timeseries_dataframe_to_supervised(df: pd.DataFrame, namevars, target, n_in=1, n_out=0, dropT=True):
"""
Transform a time series in dataframe format into a supervised learning dataset while
keeping dataframe intact.
Returns the transformed pandas DataFrame, the name of the target column and the names of the predictor columns
Arguments:
df: A timeseries dataframe that you want to convert to Supervised dataset.
namevars: columns that you want to lag in the data frame. Other columns will be untouched.
target: this is the target variable you intend to use in supervised learning
n_in: Number of lag periods as input (X).
n_out: Number of future periods (optional) as output for the taget variable (y).
dropT: Boolean - whether or not to drop columns at time 't'.
Returns:
df: This is the transformed data frame with the time series columns laggged.
Note that the original columns are dropped if you set the 'dropT' argument to True.
If not, they are preserved.
This Pandas DataFrame of lagged time series data is immediately available for supervised learning.
rtype: pd.DataFrame, str, List[str]
"""
df = copy.deepcopy(df)
int_vars = df.select_dtypes(include='integer').columns.tolist()
# Notice that we will create a sequence of columns from name vars with suffix (t-n,... t-1), etc.
drops = []
int_changes = []
for i in range(n_in, -1, -1):
if i == 0:
for var in namevars:
addname = var + '(t)'
df = df.rename(columns={var:addname})
drops.append(addname)
if var in int_vars:
int_changes.append(addname)
else:
for var in namevars:
addname = var + '(t-' + str(i) + ')'
df[addname] = df[var].shift(i)
if var in int_vars:
int_changes.append(addname)
## forecast sequence (t, t+1,... t+n)
if n_out == 0:
n_out = False
for i in range(1, n_out):
for var in namevars:
addname = var + '(t+' + str(i) + ')'
df[addname] = df[var].shift(-i)
# drop rows with NaN values
df = df.dropna()
### Make sure that whatever vars came in as integers return back as integers!
df[int_changes] = df[int_changes].astype(np.int64)
# put it all together
df = df.rename(columns={target+'(t)':target})
if dropT:
### If dropT is true, all the "t" series of the target column (in case it is in the namevars)
### will be removed if you don't want the target to learn from its "t" values.
### Similarly, we will also drop all the "t" series of name_vars if you set dropT to Trueself.
try:
drops.remove(target)
except:
pass
df.drop(drops, axis=1, inplace=True)
preds = [x for x in list(df) if x not in [target]]
return df, target, preds
############
def find_max_min_value_in_a_dataframe(df, max_min='min'):
"""
This returns the lowest or highest value in a df and its row value where it can be found.
Unfortunately, it does not return the column where it is found. So not used much.
"""
if max_min == 'min':
return df.loc[:, list(df)].min(axis=1).min(), df.loc[:, list(df)].min(axis=1).idxmin()
else:
return df.loc[:, list(df)].max(axis=1).max(), df.loc[:, list(df)].min(axis=1).idxmax()
| 43.979167
| 118
| 0.605558
|
cf3ee8fdc1038f141c16ab31743278cb4c2b9637
| 21,326
|
py
|
Python
|
lib/lib-python/2.7/site.py
|
ojii/sandlib
|
f822eb308a86e413076c185724bd28a450c59187
|
[
"BSD-3-Clause"
] | 1
|
2019-04-11T22:53:51.000Z
|
2019-04-11T22:53:51.000Z
|
lib/lib-python/2.7/site.py
|
ojii/sandlib
|
f822eb308a86e413076c185724bd28a450c59187
|
[
"BSD-3-Clause"
] | null | null | null |
lib/lib-python/2.7/site.py
|
ojii/sandlib
|
f822eb308a86e413076c185724bd28a450c59187
|
[
"BSD-3-Clause"
] | null | null | null |
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages as well as lib/site-python.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import __builtin__
import traceback
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions, through the main() function when Python starts.
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if hasattr(m, '__loader__'):
continue # don't mess with a PEP 302-supplied __file__
try:
prev = m.__file__
new = os.path.abspath(m.__file__)
if prev != new:
m.__file__ = new
except (AttributeError, OSError):
pass
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from sysconfig import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path.pop()), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception as err:
print >>sys.stderr, "Error processing line {:d} of {}:\n".format(
n+1, fullname)
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print >>sys.stderr, ' '+line
print >>sys.stderr, "\nRemainder of file ignored"
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
user_base = getuserbase() # this will also set USER_BASE
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
import os
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python).
For each directory present in the global ``PREFIXES``, this function
will find its `site-packages` subdirectory depending on the system
environment, and will return a list of full paths.
"""
is_pypy = '__pypy__' in sys.builtin_module_names
sitepackages = []
seen = set()
for prefix in PREFIXES:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos'):
sitepackages.append(os.path.join(prefix, "Lib", "site-packages"))
elif is_pypy:
from distutils.sysconfig import get_python_lib
sitedir = get_python_lib(standard_lib=False, prefix=prefix)
sitepackages.append(sitedir)
elif os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
sitepackages.append(os.path.join(prefix, "lib", "site-python"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations.
from sysconfig import get_config_var
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
sitepackages.append(
os.path.join("/Library", framework,
sys.version[:3], "site-packages"))
return sitepackages
def addsitepackages(known_paths):
"""Add site-packages (and possibly site-python) to sys.path"""
for sitedir in getsitepackages():
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new builtins 'quit' and 'exit'.
These are objects which make the interpreter exit when called.
The repr of each object contains a hint at how it works.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = file(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
##def setcopyright():
## """Set 'copyright' and 'credits' in __builtin__"""
## __builtin__.copyright = _Printer("copyright", sys.copyright)
## if sys.platform[:4] == 'java':
## __builtin__.credits = _Printer(
## "credits",
## "Jython is maintained by the Jython developers (www.jython.org).")
## else:
## __builtin__.credits = _Printer("credits", """\
## Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
## for supporting Python development. See www.python.org for more information.""")
## here = os.path.dirname(os.__file__)
## __builtin__.license = _Printer(
## "license", "See http://www.python.org/%.3s/license.html" % sys.version,
## ["LICENSE.txt", "LICENSE"],
## [os.path.join(here, os.pardir), here, os.curdir])
def setcopyright():
# XXX this is the PyPy-specific version. Should be unified with the above.
__builtin__.copyright = _Printer("copyright", sys.copyright)
__builtin__.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://pypy.org/")
__builtin__.license = _Printer(
"license",
"See https://bitbucket.org/pypy/pypy/src/default/LICENSE")
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
__builtin__.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc is not None and enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >>sys.stderr, \
"'import sitecustomize' failed; use -v for traceback"
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print>>sys.stderr, \
"'import usercustomize' failed; use -v for traceback"
def import_builtin_stuff():
"""PyPy specific: pre-import a few built-in modules, because
some programs actually rely on them to be in sys.modules :-("""
import exceptions
if 'zipimport' in sys.builtin_module_names:
import zipimport
def main():
global ENABLE_USER_SITE
import_builtin_stuff()
abs__file__()
known_paths = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print "sys.path = ["
for dir in sys.path:
print " %r," % (dir,)
print "]"
print "USER_BASE: %r (%s)" % (USER_BASE,
"exists" if os.path.isdir(USER_BASE) else "doesn't exist")
print "USER_SITE: %r (%s)" % (USER_SITE,
"exists" if os.path.isdir(USER_SITE) else "doesn't exist")
print "ENABLE_USER_SITE: %r" % ENABLE_USER_SITE
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print os.pathsep.join(buffer)
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print textwrap.dedent(help % (sys.argv[0], os.pathsep))
sys.exit(10)
if __name__ == '__main__':
_script()
| 33.166407
| 86
| 0.615352
|
311a0ae5e08f83bc782f3290b4d360ef06216e5c
| 679
|
py
|
Python
|
scout/commands/export/exon.py
|
szilvajuhos/scout
|
2f4a03fb3192a57c99fd62be626e8c22051e81af
|
[
"BSD-3-Clause"
] | null | null | null |
scout/commands/export/exon.py
|
szilvajuhos/scout
|
2f4a03fb3192a57c99fd62be626e8c22051e81af
|
[
"BSD-3-Clause"
] | null | null | null |
scout/commands/export/exon.py
|
szilvajuhos/scout
|
2f4a03fb3192a57c99fd62be626e8c22051e81af
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import click
from flask.cli import with_appcontext
from scout.commands.utils import builds_option
from scout.export.exon import export_exons
from scout.server.extensions import store
LOG = logging.getLogger(__name__)
@click.command('exons', short_help='Export exons')
@builds_option
@with_appcontext
def exons(build):
"""Export all exons to chanjo compatible .bed like format"""
LOG.info("Running scout export exons")
adapter = store
header = ["#Chrom\tStart\tEnd\tExonId\tTranscripts\tHgncIDs\tHgncSymbols"]
for line in header:
click.echo(line)
for exon_line in export_exons(adapter, build):
click.echo(exon_line)
| 23.413793
| 78
| 0.748159
|
e8d067db9ebb42e3a7ca556fc0cc24a9131fbb36
| 8,938
|
py
|
Python
|
cogs/background_tasks.py
|
Dakskihedron/snakebot
|
5770aed8663df47a3182bdf56c4202b2874c056f
|
[
"MIT"
] | null | null | null |
cogs/background_tasks.py
|
Dakskihedron/snakebot
|
5770aed8663df47a3182bdf56c4202b2874c056f
|
[
"MIT"
] | null | null | null |
cogs/background_tasks.py
|
Dakskihedron/snakebot
|
5770aed8663df47a3182bdf56c4202b2874c056f
|
[
"MIT"
] | null | null | null |
import aiohttp
import os
import asyncio
import subprocess
import orjson
from discord.ext import commands, tasks
import discord
import cogs.utils.database as DB
class background_tasks(commands.Cog):
"""Commands related to the background tasks of the bot."""
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.start_tasks()
def cog_unload(self):
"""When the cog is unloaded stop all running tasks."""
for task in self.tasks:
self.tasks[task].cancel()
def start_tasks(self):
"""Finds all the tasks in the cog and starts them."""
task_dict = {}
for task in dir(background_tasks):
task_obj = getattr(self, task)
if isinstance(task_obj, tasks.Loop):
task_obj.start()
task_dict[task] = task_obj
self.tasks = task_dict
@commands.group(hidden=True)
@commands.is_owner()
async def task(self, ctx):
"""The task command group."""
if ctx.invoked_subcommand is None:
await ctx.send("```No subcommand passed```")
@task.command()
async def restart(self, ctx, task):
"""Restarts a background task.
task: str
The name of the task to restart.
"""
try:
getattr(self, task).restart()
await ctx.send(f"{task} restarted")
except AttributeError:
return await ctx.send("```Task not found```")
@task.command()
async def start(self, ctx, task):
"""Starts a background task.
task: str
The name of the task to start.
"""
try:
getattr(self, task).start()
await ctx.send(f"{task} started")
except AttributeError:
return await ctx.send("```Task not found```")
@task.command()
async def stop(self, ctx, task):
"""Stops a background task.
task: str
The name of the task to stop.
"""
try:
getattr(self, task).stop()
await ctx.send(f"{task} stopped")
except AttributeError:
return await ctx.send("```Task not found```")
@task.command()
async def list(self, ctx):
"""Lists background tasks.
Example
Name: Interval: Running/Failed/Count
backup_bot 2h 0m 0s True/False/10
check_end_dates 0h 0m 10s True/False/7200
update_bot 0h 5m 0s True/False/240
update_languages 0h 0m 0s False/False/1
update_stocks 0h 30m 0s True/False/40
"""
embed = discord.Embed(color=discord.Color.blurple())
msg = "Name: Interval: Running/Failed/Count:\n\n"
for task in self.tasks:
task_obj = self.tasks[task]
msg += "{:<20}{:<15}{}/{}/{}\n".format(
task,
f"{task_obj.hours}h {task_obj.minutes}m {task_obj.seconds}s",
task_obj.is_running(),
task_obj.failed(),
task_obj.current_loop,
)
embed.description = f"```\n{msg}```"
await ctx.send(embed=embed)
@tasks.loop(minutes=10)
async def update_stocks(self):
"""Updates stock data every 10 minutes."""
url = "https://api.nasdaq.com/api/screener/stocks?limit=50000"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36",
"accept-language": "en-US,en;q=0.9",
}
async with aiohttp.ClientSession(headers=headers) as session, session.get(
url
) as response:
stocks = await response.json()
with DB.stocks.write_batch() as wb:
for stock in stocks["data"]["table"]["rows"]:
stock_data = {
"name": stock["name"],
"price": stock["lastsale"][1:],
"change": stock["netchange"],
"%change": stock["pctchange"][:-1]
if stock["pctchange"] != "--"
else 0,
"cap": stock["marketCap"],
}
wb.put(
stock["symbol"].encode(),
orjson.dumps(stock_data),
)
async def run_process(self, command):
"""Runs a shell command and returns the output.
command: str
The command to run.
"""
try:
process = await asyncio.create_subprocess_shell(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
result = await process.communicate()
except NotImplementedError:
process = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
result = await self.bot.loop.run_in_executor(None, process.communicate)
return "".join([output.decode() for output in result]).split()
@tasks.loop(minutes=5)
async def update_bot(self):
"""Tries to update every 5 minutes and then reloads if needed."""
pull = await self.run_process("git pull")
if pull[:4] == ["Already", "up", "to", "date."]:
return
diff = await self.run_process("git diff --name-only HEAD@{0} HEAD@{1}")
if "poetry.lock" in diff:
await self.run_process("poetry install")
for ext in [f[:-3] for f in os.listdir("cogs") if f.endswith(".py")]:
try:
self.bot.reload_extension(f"cogs.{ext}")
except Exception as e:
if isinstance(e, commands.errors.ExtensionNotLoaded):
self.bot.load_extension(f"cogs.{ext}")
@tasks.loop(hours=6)
async def backup_bot(self):
"""Makes a backup of the db every 6 hours."""
if DB.db.get(b"restart") == b"1":
DB.db.delete(b"restart")
return
number = DB.db.get(b"backup_number")
if not number:
number = -1
else:
number = int(number.decode())
number += 1
if number == 11:
number = 0
DB.db.put(b"backup_number", str(number).encode())
os.makedirs("backup/", exist_ok=True)
with open(f"backup/{number}backup.json", "w", encoding="utf-8") as file:
# I don't know why I did this as a jumbled mess but I did
# Basically it just formats the db to json
json = "".join(
[
f'"{key.decode()}": "{value.decode()}", '
if '"' not in value.decode()
else f'"{key.decode()}": {value.decode()}, '
for key, value in DB.db
if not key.startswith(b"crypto-") and not key.startswith(b"stocks-")
]
)
file.write(f"{{{json[:-3]}}}")
@tasks.loop(count=1)
async def update_languages(self):
"""Updates pistons supported languages for the run command."""
url = "https://emkc.org/api/v1/piston/versions"
async with aiohttp.ClientSession() as session, session.get(url) as page:
data = await page.json()
languages = set()
for language in data:
languages.update(set(language["aliases"]))
languages.add(language["name"])
DB.db.put(b"languages", orjson.dumps(list(languages)))
@tasks.loop(minutes=10)
async def crypto_update(self):
"""Updates crypto currency data every 10 minutes."""
url = "https://api.coinmarketcap.com/data-api/v3/cryptocurrency/listing?limit=50000&convert=NZD&cryptoType=coins"
async with aiohttp.ClientSession() as session, session.get(url) as response:
crypto = await response.json()
with DB.crypto.write_batch() as wb:
for coin in crypto["data"]["cryptoCurrencyList"]:
if "price" not in coin["quotes"][0]:
continue
wb.put(
coin["symbol"].encode(),
orjson.dumps(
{
"name": coin["name"],
"id": coin["id"],
"price": coin["quotes"][0]["price"],
"circulating_supply": coin["circulatingSupply"],
"max_supply": coin.get("maxSupply", 0),
"market_cap": coin["quotes"][0].get("marketCap", 0),
"change_24h": coin["quotes"][0]["percentChange24h"],
"volume_24h": coin["quotes"][0].get("volume24h", 0),
}
),
)
def setup(bot):
bot.add_cog(background_tasks(bot))
| 34.114504
| 143
| 0.522264
|
9fce33945ff2f01bd2e396a3b4626104ee578462
| 18,645
|
py
|
Python
|
tests/test_process.py
|
ducdk90/tilequeue
|
c664b5c89a9f0e6743405ab266aa9ca80b57806e
|
[
"MIT"
] | 29
|
2016-11-03T18:39:21.000Z
|
2022-02-27T17:42:37.000Z
|
tests/test_process.py
|
ducdk90/tilequeue
|
c664b5c89a9f0e6743405ab266aa9ca80b57806e
|
[
"MIT"
] | 146
|
2016-07-07T16:41:07.000Z
|
2021-12-11T00:27:20.000Z
|
tests/test_process.py
|
ducdk90/tilequeue
|
c664b5c89a9f0e6743405ab266aa9ca80b57806e
|
[
"MIT"
] | 28
|
2016-08-19T16:08:52.000Z
|
2021-07-26T10:16:29.000Z
|
from ModestMaps.Core import Coordinate
import unittest
class TestProcess(unittest.TestCase):
def _make_json_tiles(
self, coord, post_process_data={}, db_features=[], cut_coords=[],
buffer_cfg={}):
from tilequeue.process import process_coord
from tilequeue.tile import coord_to_mercator_bounds
from tilequeue.format import json_format
unpadded_bounds = coord_to_mercator_bounds(coord)
feature_layers = [dict(
layer_datum=dict(
name='fake_layer',
geometry_types=['Point'],
transform_fn_names=[],
sort_fn_name=None,
is_clipped=False
),
padded_bounds=dict(point=unpadded_bounds),
features=db_features
)]
formats = [json_format]
def _test_output_fn(*args):
return dict(foo='bar', min_zoom=0)
output_calc_mapping = dict(fake_layer=_test_output_fn)
all_coords = [coord] + cut_coords
tiles, extra = process_coord(
coord, coord.zoom, feature_layers, post_process_data, formats,
unpadded_bounds, all_coords, buffer_cfg, output_calc_mapping)
return tiles
def _make_json_tile(self, coord, **kwargs):
from tilequeue.format import json_format
import json
tiles = self._make_json_tiles(coord, **kwargs)
self.assertEqual(1, len(tiles))
tile = tiles[0]
self.assertEqual(coord, tile['coord'])
self.assertEqual(json_format, tile['format'])
self.assertEqual('all', tile['layer'])
return json.loads(tile['tile'])
def test_process_coord_empty(self):
from tilequeue.process import process_coord
from tilequeue.tile import coord_to_mercator_bounds
coord = Coordinate(0, 0, 0)
feature_layers = []
post_process_data = {}
formats = []
unpadded_bounds = coord_to_mercator_bounds(coord)
cut_coords = [coord]
buffer_cfg = {}
def _test_output_fn(*args):
return dict(foo='bar')
output_calc_mapping = dict(fake_layer=_test_output_fn)
tiles, extra = process_coord(
coord, coord.zoom, feature_layers, post_process_data, formats,
unpadded_bounds, cut_coords, buffer_cfg, output_calc_mapping)
self.assertEqual([], tiles)
self.assertEqual({'size': {}}, extra)
def test_process_coord_single_layer(self):
self.maxDiff = 10000
def _check(coord, post_process_name, should_have_point):
features = [dict(
__id__=1,
# this is a point at (90, 40) in mercator
__geometry__='\x01\x01\x00\x00\x00\xd7\xa3pE\xf8\x1b' + \
'cA\x1f\x85\xeb\x91\xe5\x8fRA',
__properties__=dict(foo='bar'),
)]
post_process_data = [
dict(
fn_name=('tests.test_process.%s' % post_process_name),
params={},
resources={}
)
]
json_data = {
'type': 'FeatureCollection',
'features': []
}
if should_have_point:
json_data['features'] = [{
'geometry': {
'type': 'Point',
'coordinates': [90.0, 40.0]
},
'type': 'Feature',
'properties': {
'foo': 'bar',
'min_zoom': 0,
'tags': dict(foo='bar'),
},
'id': 1
}]
tile = self._make_json_tile(
coord, post_process_data=post_process_data,
db_features=features)
self.assertEqual(json_data, tile)
_check(Coordinate(0, 0, 0), '_only_zoom_zero', True)
_check(Coordinate(0, 0, 0), '_only_zoom_one', False)
_check(Coordinate(0, 1, 1), '_only_zoom_one', True)
_check(Coordinate(0, 1, 1), '_only_zoom_zero', False)
def test_process_coord_cut_coords(self):
import json
self.maxDiff = 10000
coord = Coordinate(0, 0, 0)
cut_coord = Coordinate(0, 1, 1)
features = [dict(
__id__=1,
# this is a point at (90, 40) in mercator
__geometry__='\x01\x01\x00\x00\x00\xd7\xa3pE\xf8\x1b' + \
'cA\x1f\x85\xeb\x91\xe5\x8fRA',
__properties__=dict(foo='bar'),
)]
post_process_data = [
dict(
fn_name='tests.test_process._only_zoom_zero',
params={},
resources={}
)
]
tiles = self._make_json_tiles(
coord, post_process_data=post_process_data,
db_features=features, cut_coords=[cut_coord])
tiles_0 = [t for t in tiles if t['coord'] == coord]
self.assertEqual(1, len(tiles_0))
tile_0 = json.loads(tiles_0[0]['tile'])
self.assertEqual(1, len(tile_0['features']))
self.assertEqual([90.0, 40.0],
tile_0['features'][0]['geometry']['coordinates'])
# cut coord at zoom 1 is currently implemented as being re-processed
# from the original feature data, so will run the post-processor stuff
# at a different zoom level, and drop the point.
tiles_1 = [t for t in tiles if t['coord'] == cut_coord]
self.assertEqual(1, len(tiles_1))
tile_1 = json.loads(tiles_1[0]['tile'])
self.assertEqual(1, len(tile_1['features']))
self.assertEqual([90.0, 40.0],
tile_1['features'][0]['geometry']['coordinates'])
def test_cut_coord_exclusive(self):
# test that cut coords are the only ones in the response, and that
# the coordinate itself can be omitted.
from tilequeue.process import process_coord
from tilequeue.tile import coord_to_mercator_bounds
from tilequeue.format import json_format
coord = Coordinate(0, 0, 0)
db_features = []
cut_coords = [
Coordinate(zoom=1, column=0, row=0),
Coordinate(zoom=1, column=1, row=0),
Coordinate(zoom=1, column=0, row=1),
]
buffer_cfg = {}
post_process_data = {}
unpadded_bounds = coord_to_mercator_bounds(coord)
feature_layers = [dict(
layer_datum=dict(
name='fake_layer',
geometry_types=['Point'],
transform_fn_names=[],
sort_fn_name=None,
is_clipped=False
),
padded_bounds=dict(point=unpadded_bounds),
features=db_features
)]
formats = [json_format]
def _test_output_fn(*args):
return dict(foo='bar', min_zoom=0)
output_calc_mapping = dict(fake_layer=_test_output_fn)
tiles, extra = process_coord(
coord, coord.zoom, feature_layers, post_process_data, formats,
unpadded_bounds, cut_coords, buffer_cfg, output_calc_mapping)
self.assertEqual(len(cut_coords), len(tiles))
self.assertNotIn(coord, [t['coord'] for t in tiles])
class TestCalculateCutZooms(unittest.TestCase):
def test_max_zoom(self):
from tilequeue.process import calculate_sizes_by_zoom
from tilequeue.tile import metatile_zoom_from_size
def _calc(metatile_size, tile_sizes, max_zoom):
metatile_zoom = metatile_zoom_from_size(metatile_size)
coord = Coordinate(zoom=max_zoom - metatile_zoom, row=0, column=0)
return calculate_sizes_by_zoom(
coord, metatile_zoom, tile_sizes, max_zoom - metatile_zoom)
# sweep max zoom to check the output is the same nominal max zoom.
self.assertEqual({16: [256]}, _calc(8, [256], 16))
self.assertEqual({15: [256]}, _calc(8, [256], 15))
self.assertEqual({14: [256]}, _calc(8, [256], 14))
# check we get 256 tiles as well as 512 at max zoom, even when the
# configured tile size is only 512.
self.assertEqual({16: [512, 256]}, _calc(8, [512], 16))
# we should get _both_ 512 and 256 tiles if we've configured to only
# have 1024 tiles at mid zooms.
self.assertEqual({16: [1024, 512, 256]}, _calc(8, [1024], 16))
def test_only_overzoom_at_max_zoom(self):
from tilequeue.process import calculate_sizes_by_zoom
# constants
metatile_zoom = 3
cfg_tile_sizes = [512]
max_zoom = 13
# zoom 13 (nominal 16) tile should contain everything
sizes = calculate_sizes_by_zoom(
Coordinate(zoom=13, column=0, row=0),
metatile_zoom, cfg_tile_sizes, max_zoom)
self.assertEquals(sizes, {16: [512, 256]})
# zoom 12 (nominal 15) should be 512 only
sizes = calculate_sizes_by_zoom(
Coordinate(zoom=12, column=0, row=0),
metatile_zoom, cfg_tile_sizes, max_zoom)
self.assertEquals(sizes, {15: [512]})
def test_mid_zoom(self):
from tilequeue.process import calculate_sizes_by_zoom
from tilequeue.tile import metatile_zoom_from_size
tile_sizes = [512]
metatile_size = 8
metatile_zoom = metatile_zoom_from_size(metatile_size)
max_zoom = 16 - metatile_zoom
for zoom in range(1, max_zoom - metatile_zoom):
coord = Coordinate(zoom=zoom, row=0, column=0)
sizes_by_zoom = calculate_sizes_by_zoom(
coord, metatile_zoom, tile_sizes, max_zoom)
nominal_zoom = zoom + metatile_zoom
self.assertEqual({nominal_zoom: tile_sizes}, sizes_by_zoom)
def test_zoom_zero(self):
from tilequeue.process import calculate_sizes_by_zoom
from tilequeue.tile import metatile_zoom_from_size
def _calc(metatile_size, tile_sizes):
coord = Coordinate(zoom=0, row=0, column=0)
metatile_zoom = metatile_zoom_from_size(metatile_size)
max_zoom = 16 - metatile_zoom
return calculate_sizes_by_zoom(
coord, metatile_zoom, tile_sizes, max_zoom)
# for an 8x8 metatile configured for 512 tiles, then by default we
# would get a 0/0/0 metatile with 4x4 nominal zoom 3 512px tiles. we
# want to extend that "upwards" towards nominal zoom 0, so we should
# also get: 2x2 nominal zoom 2 512px tiles plus 1x1 nominal zoom 1
# 512px tile.
self.assertEqual({
1: [512],
2: [512],
3: [512],
}, _calc(8, [512]))
# when we do the same with 256px tiles, we should get a nominal zoom
# zero tile.
self.assertEqual({
0: [256],
1: [256],
2: [256],
3: [256],
}, _calc(8, [256]))
# when we configure both 256 and 512px tiles, we should only get the
# 256 ones at the largest nominal zoom.
self.assertEqual({
1: [512],
2: [512],
3: [512, 256],
}, _calc(8, [512, 256]))
self.assertEqual({
2: [1024],
3: [1024, 512, 256],
}, _calc(8, [1024, 512, 256]))
# with a smaller metatile, we just get fewer nominal zooms in the range
# inside the metatile.
self.assertEqual({
1: [512],
2: [512, 256],
}, _calc(4, [512, 256]))
# with a 1x1 metatile (i.e: not really a metatile) then we just get
# the configured size.
for z in xrange(0, 3):
meta_sz = 1 << z
tile_sz = 256 * meta_sz
self.assertEqual({z: [tile_sz]}, _calc(meta_sz, [tile_sz]))
class TestMetatileChildrenWithSize(unittest.TestCase):
def test_single_tile(self):
from tilequeue.process import metatile_children_with_size
coord = Coordinate(zoom=0, column=0, row=0)
result = metatile_children_with_size(coord, 0, 0, 256)
self.assertEqual([coord], result)
def test_2x2_tile(self):
from tilequeue.process import metatile_children_with_size
coord = Coordinate(zoom=0, column=0, row=0)
result = metatile_children_with_size(coord, 1, 1, 256)
self.assertEqual(set([
Coordinate(zoom=1, column=0, row=0),
Coordinate(zoom=1, column=1, row=0),
Coordinate(zoom=1, column=0, row=1),
Coordinate(zoom=1, column=1, row=1),
]), set(result))
def test_8x8_512_tile(self):
from tilequeue.process import metatile_children_with_size
coord = Coordinate(zoom=0, column=0, row=0)
result = metatile_children_with_size(coord, 3, 3, 512)
self.assertEqual(set([
Coordinate(zoom=2, column=0, row=0),
Coordinate(zoom=2, column=1, row=0),
Coordinate(zoom=2, column=2, row=0),
Coordinate(zoom=2, column=3, row=0),
Coordinate(zoom=2, column=0, row=1),
Coordinate(zoom=2, column=1, row=1),
Coordinate(zoom=2, column=2, row=1),
Coordinate(zoom=2, column=3, row=1),
Coordinate(zoom=2, column=0, row=2),
Coordinate(zoom=2, column=1, row=2),
Coordinate(zoom=2, column=2, row=2),
Coordinate(zoom=2, column=3, row=2),
Coordinate(zoom=2, column=0, row=3),
Coordinate(zoom=2, column=1, row=3),
Coordinate(zoom=2, column=2, row=3),
Coordinate(zoom=2, column=3, row=3),
]), set(result))
def test_2x2_tile_nominal_1(self):
from tilequeue.process import metatile_children_with_size
coord = Coordinate(zoom=0, column=0, row=0)
result = metatile_children_with_size(coord, 1, 0, 256)
self.assertEqual(set([
Coordinate(zoom=0, column=0, row=0),
]), set(result))
class TestCalculateCutCoords(unittest.TestCase):
def test_1x1(self):
from tilequeue.process import calculate_cut_coords_by_zoom
# note! not using zoom level 0 because that has special properties!
coord = Coordinate(zoom=1, column=0, row=0)
cut_coords = calculate_cut_coords_by_zoom(
coord, 0, [256], 16)
self.assertEqual({1: [coord]}, cut_coords)
def test_2x2_256(self):
from tilequeue.process import calculate_cut_coords_by_zoom
def _c(z, x, y):
return Coordinate(zoom=z, column=x, row=y)
# note! not using zoom level 0 because that has special properties!
cut_coords = calculate_cut_coords_by_zoom(
_c(1, 0, 0), 1, [256], 16)
self.assertEqual({
2: [
_c(2, 0, 0),
_c(2, 0, 1),
_c(2, 1, 0),
_c(2, 1, 1),
]
}, cut_coords)
def test_4x4_512(self):
from tilequeue.process import calculate_cut_coords_by_zoom
def _c(z, x, y):
return Coordinate(zoom=z, column=x, row=y)
# note! not using zoom level 0 because that has special properties!
cut_coords = calculate_cut_coords_by_zoom(
_c(1, 0, 0), 2, [512], 16)
self.assertEqual({
3: [ # <- note nominal zoom is _3_ here.
_c(2, 0, 0),
_c(2, 0, 1),
_c(2, 1, 0),
_c(2, 1, 1),
]
}, cut_coords)
def test_4x4_512_max(self):
from tilequeue.process import calculate_cut_coords_by_zoom
def _c(z, x, y):
return Coordinate(zoom=z, column=x, row=y)
# even though we only configured 512 tiles, we get 256 ones as well at
# max zoom.
max_zoom = 16
metatile_zoom = 2
cut_coords = calculate_cut_coords_by_zoom(
_c(max_zoom - metatile_zoom, 0, 0), metatile_zoom, [512],
max_zoom - metatile_zoom)
self.assertEqual([max_zoom], cut_coords.keys())
self.assertEqual(set([
# some 512 tiles
_c(max_zoom - 1, 0, 0),
_c(max_zoom - 1, 0, 1),
_c(max_zoom - 1, 1, 0),
_c(max_zoom - 1, 1, 1),
# some 256 tiles
_c(max_zoom, 0, 0),
_c(max_zoom, 1, 0),
_c(max_zoom, 2, 0),
_c(max_zoom, 3, 0),
_c(max_zoom, 0, 1),
_c(max_zoom, 1, 1),
_c(max_zoom, 2, 1),
_c(max_zoom, 3, 1),
_c(max_zoom, 0, 2),
_c(max_zoom, 1, 2),
_c(max_zoom, 2, 2),
_c(max_zoom, 3, 2),
_c(max_zoom, 0, 3),
_c(max_zoom, 1, 3),
_c(max_zoom, 2, 3),
_c(max_zoom, 3, 3),
]), set(cut_coords[max_zoom]))
def test_8x8_512_min(self):
from tilequeue.process import calculate_cut_coords_by_zoom
def _c(z, x, y):
return Coordinate(zoom=z, column=x, row=y)
# we get the 512px tiles at nominal zoom 3, plus additional ones at 2
# & 1.
metatile_zoom = 3
cut_coords = calculate_cut_coords_by_zoom(
_c(0, 0, 0), metatile_zoom, [512], 16 - metatile_zoom)
self.assertEqual([1, 2, 3], cut_coords.keys())
# we get 1x1 nominal zoom 1 tile
self.assertEqual(set([
_c(0, 0, 0),
]), set(cut_coords[1]))
# we get 2x2 nominal zoom 2 tiles
self.assertEqual(set([
_c(1, 0, 0),
_c(1, 0, 1),
_c(1, 1, 0),
_c(1, 1, 1),
]), set(cut_coords[2]))
# we get 4x4 nominal zoom 3 tiles
self.assertEqual(set([
_c(2, 0, 0),
_c(2, 0, 1),
_c(2, 0, 2),
_c(2, 0, 3),
_c(2, 1, 0),
_c(2, 1, 1),
_c(2, 1, 2),
_c(2, 1, 3),
_c(2, 2, 0),
_c(2, 2, 1),
_c(2, 2, 2),
_c(2, 2, 3),
_c(2, 3, 0),
_c(2, 3, 1),
_c(2, 3, 2),
_c(2, 3, 3),
]), set(cut_coords[3]))
def _only_zoom(ctx, zoom):
layer = ctx.feature_layers[0]
if ctx.nominal_zoom != zoom:
layer['features'] = []
return layer
# a "post process" function which deletes all data except at zoom zero. this
# is used to check that the nominal zoom passed in the context is the same as
# what we expect.
def _only_zoom_zero(ctx):
return _only_zoom(ctx, 0)
def _only_zoom_one(ctx):
return _only_zoom(ctx, 1)
| 34.91573
| 79
| 0.560472
|
26f69dab763dc0ae696381120f40417ab76f59dc
| 1,608
|
py
|
Python
|
expert.py
|
bbrighttaer/guided-irl
|
470302c272af1226aa268ffe81737fc14c5a1a50
|
[
"MIT"
] | 9
|
2020-02-19T10:11:40.000Z
|
2021-07-21T03:16:24.000Z
|
expert.py
|
bbrighttaer/guided-irl
|
470302c272af1226aa268ffe81737fc14c5a1a50
|
[
"MIT"
] | null | null | null |
expert.py
|
bbrighttaer/guided-irl
|
470302c272af1226aa268ffe81737fc14c5a1a50
|
[
"MIT"
] | 3
|
2020-05-08T04:50:04.000Z
|
2021-07-12T21:58:23.000Z
|
from collections import namedtuple
import pickle
import gym
import ptan
from ptan.agent import float32_preprocessor
import torch
import numpy as np
from util import PGN
GAMMA = 0.99
NUM_TRAJS = 100
EpisodeStep = namedtuple('EpisodeStep', field_names=['state', 'action', 'reward', 'next_state'])
Trajectory = namedtuple('Trajectory', field_names=['prob', 'episode_steps'])
if __name__ == '__main__':
env = gym.make('CartPole-v1')
net = PGN(env.observation_space.shape[0], env.action_space.n)
net.load_state_dict(torch.load('cartpole_expert.mod'))
net.eval()
agent = ptan.agent.PolicyAgent(net, apply_softmax=True, preprocessor=float32_preprocessor)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=GAMMA)
trajectories = []
for ep in range(NUM_TRAJS):
episode = []
qt = 1.0
for step_idx, exp in enumerate(exp_source):
probs = torch.softmax(net(float32_preprocessor(exp.state).view(1, -1)), dim=1)
probs = probs.squeeze()[int(exp.action)].item()
qt *= probs
episode.append(EpisodeStep(state=exp.state, action=int(exp.action), reward=exp.reward,
next_state=exp.last_state))
if exp.last_state is None:
break
print(np.prod())
trajectories.append(Trajectory(prob=qt, episode_steps=episode))
print(f'Number of trajectories: {len(trajectories)}')
with open('demonstrations.list.pkl', 'wb') as f:
pickle.dump(trajectories, f)
env.close()
| 38.285714
| 99
| 0.647388
|
b5714b01b740d4103ad31394410f195fc60ced0c
| 9,273
|
py
|
Python
|
radiobear/brightness.py
|
david-deboer/radiobear
|
aedc716c7acf0c69d278988842407545f1043c17
|
[
"BSD-2-Clause"
] | 3
|
2019-05-13T21:03:57.000Z
|
2021-04-22T05:33:33.000Z
|
radiobear/brightness.py
|
david-deboer/radiobear
|
aedc716c7acf0c69d278988842407545f1043c17
|
[
"BSD-2-Clause"
] | 1
|
2020-02-11T20:34:49.000Z
|
2020-02-11T20:34:49.000Z
|
radiobear/brightness.py
|
david-deboer/radiobear
|
aedc716c7acf0c69d278988842407545f1043c17
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2018 David DeBoer
# Licensed under the 2-clause BSD license.
import numpy as np
from scipy.special import expn
import os.path
from . import utils
from . import raypath as ray
from . import logging
class Brightness():
def __init__(self, config=None, log=None, verbose=True, **kwargs):
"""This calculates the brightness temperature of the planets.
It must be used with atmosphere and alpha"""
self.verbose = verbose
self.log = logging.setup(log)
if config is None or isinstance(config, str):
from . import config as pcfg
config = pcfg.planetConfig('x', configFile=config)
config.update_config(**kwargs)
self.config = config
def single(self, b, freqs, atm, alpha, orientation=None, taulimit=20.0):
"""This computes the brightness temperature along one ray path"""
disc_average = utils.b_type(b).startswith('dis')
if disc_average:
b = [0.0, 0.0]
self.alpha = alpha
self.freqs = freqs
self.b = b
if self.alpha.layers is None:
self.alpha.get_layers(freqs, atm)
# get path lengths (ds_layer) vs layer number (num_layer)
# currently frequency independent refractivity
print_meta = (self.verbose == 'loud')
travel = ray.compute_ds(atm, b, orientation, gtype=None, verbose=print_meta)
self.travel = travel
if travel.ds is None:
print('Off planet')
self.Tb = []
for j in range(len(freqs)):
self.Tb.append(utils.T_cmb)
return self.Tb
# set and initialize arrays
integrated_W = [0.0 for f in freqs]
self.tau = [[0.0 for f in freqs]]
self.Tb_lyr = [[0.0 for f in freqs]]
self.W = [[0.0 for f in freqs]]
P_layers = atm.gas[atm.config.C['P']]
T_layers = atm.gas[atm.config.C['T']]
z_layers = atm.gas[atm.config.C['Z']]
self.P = [P_layers[travel.layer4ds[0]]]
self.z = [z_layers[travel.layer4ds[0]]]
for i in range(len(travel.ds) - 1):
ds = travel.ds[i] * utils.Units[utils.atmLayerUnit] / utils.Units['cm']
taus = []
Ws = []
Tbs = []
ii = travel.layer4ds[i]
ii1 = travel.layer4ds[i + 1]
T1 = T_layers[ii1]
T0 = T_layers[ii]
self.P.append((P_layers[ii] + P_layers[ii1]) / 2.0)
self.z.append((z_layers[ii] + z_layers[ii1]) / 2.0)
if self.alpha.layers is None:
print("is None at ", i)
for j, f in enumerate(freqs):
if not alpha.config.Doppler:
a1 = self.alpha.layers[j][ii1]
a0 = self.alpha.layers[j][ii]
else:
print("\n\nDoppler currently broken since the get_alpha call is different.")
fshifted = [[f / travel.doppler[i]], [f / travel.doppler[i + 1]]]
print('\rdoppler corrected frequency at layer', i, end='')
a1 = alpha.get_alpha(fshifted[0], T_layers[ii1], P_layers[ii1], atm.gas[:, ii1],
atm.config.C, atm.cloud[:, ii1],
atm.config.Cl, units=utils.alphaUnit)
a0 = alpha.get_alpha(fshifted[1], T_layers[ii], P_layers[ii], atm.gas[:, ii],
atm.config.C, atm.cloud[:, ii],
atm.config.Cl, units=utils.alphaUnit)
dtau = (a0 + a1) * ds / 2.0
taus.append(self.tau[i][j] + dtau) # this is tau_(i+1)
if disc_average:
Ws.append(2.0 * a1 * expn(2, taus[j])) # this is W_(i+1) for disc average
else:
Ws.append(a1 * np.exp(-taus[j])) # this is W_(i+1) for non disc average
integrated_W[j] += (Ws[j] + self.W[i][j]) * ds / 2.0
dTb = (T1 * Ws[j] + T0 * self.W[i][j]) * ds / 2.0
Tbs.append(self.Tb_lyr[i][j] + dTb)
self.tau.append(taus)
self.W.append(Ws)
self.Tb_lyr.append(Tbs)
# final spectrum
self.Tb = []
for j in range(len(freqs)):
top_Tb_lyr = self.Tb_lyr[-1][j]
if top_Tb_lyr < utils.T_cmb:
top_Tb_lyr = utils.T_cmb
else:
top_Tb_lyr /= integrated_W[j] # Normalize by integrated weights (makes assumptions)
if integrated_W[j] < 0.96 and self.verbose:
print("Weight correction at {:.2f} is {:.4f} (showing below 0.96)"
.format(freqs[j], integrated_W[j]))
self.Tb.append(top_Tb_lyr)
self.tau = np.array(self.tau).transpose()
self.W = np.array(self.W).transpose()
self.Tb_lyr = np.array(self.Tb_lyr).transpose()
self.P = np.array(self.P)
self.z = np.array(self.z)
self.integrated_W = np.array(integrated_W)
del taus, Tbs, Ws, travel
return self.Tb
def savertm(self, tag=None, path=None):
if tag is None:
filename = None
else:
filename = 'alpha_' + tag + '.out'
self.saveAlpha(filename, self.config.output_directory)
if tag is None:
filename = None
else:
filename = 'wgt_' + tag + '.out'
self.saveWeight(filename, path)
if tag is None:
filename = None
else:
filename = 'tau_' + tag + '.out'
self.saveTau(filename, path)
if tag is None:
filename = None
else:
filename = 'tblayer_' + tag + '.out'
self.saveTblayer(filename, path)
def saveit(self):
for i, f in enumerate(self.freqs):
filename = 'pawtt_{:.3f}.out'.format(f)
fp = open(filename, 'w')
print("{}: Pressure, alpha, weight, tau, Tb".format(filename))
for j in range(len(self.P)):
s = '{}\t'.format(repr(self.P[j]))
s += '{}\t'.format(repr(self.alpha.layers[i][j]))
s += '{}\t'.format(repr(self.W[i][j]))
s += '{}\t'.format(repr(self.tau[i][j]))
s += '{}\n'.format(repr(self.Tb_lyr[i][j]))
fp.write(s)
fp.close()
def saveAlpha(self, filename=None, path='.'):
if filename is None:
filename = 'alpha.out'
filename = os.path.join(path, filename)
fp = open(filename, 'w')
s = '#P \tz \t'
for f in self.freqs:
s += '{:.2f}\t'.format(f)
s += 'GHz\n'
fp.write(s)
for j in range(len(self.P)):
s = ('{}\t{:.2f}\t').format(repr(self.P[j]), self.z[j])
for i in range(len(self.freqs)):
s += '{}\t'.format(repr(self.alpha.layers[i][j]))
s += '\n'
fp.write(s)
s = ('{} ({} x {})').format(filename, i + 1, j + 1)
def saveWeight(self, norm=False, filename=None, path='.'):
if filename is None:
filename = 'wgt.out'
fp = open(filename, 'w')
s = '#P \tz \t'
for f in self.freqs:
s += ('{:.2f}\t').format(f)
s = s.strip() + 'GHz\n'
fp.write(s)
scale = []
for i in range(len(self.freqs)):
if norm:
scale.append(np.max(self.W[i]))
else:
scale.append(1.0)
for j in range(len(self.P)):
s = ('{}\t{:.2f}\t').format(repr(self.P[j]), self.z[j])
for i in range(len(self.freqs)):
s += ('{}\t').format(repr(self.W[i][j] / scale[i]))
s = s.strip() + '\n'
fp.write(s)
s = ('{} ({} x {})').format(filename, i + 1, j + 1)
return s
def saveTau(self, filename=None, path='.'):
if filename is None:
filename = 'tau.out'
os.path.join(path, filename)
fp = open(filename, 'w')
s = '#P \tz \t'
for f in self.freqs:
s += '{:.2f}\t'.format(f)
s += 'GHz\n'
fp.write(s)
for j in range(len(self.P)):
s = ('{}\t{:.2f}\t').format(repr(self.P[j]), self.z[j])
for i in range(len(self.freqs)):
s += ('{}\t').format(repr(self.tau[i][j]))
s += '\n'
fp.write(s)
s = ('{} ({} x {})').format(filename, i + 1, j + 1)
return s
def saveTblayer(self, filename=None, path='.'):
if filename is None:
filename = 'tblayer.out'
os.path.join(path, filename)
fp = open(filename, 'w')
s = '#P \tz \t'
for f in self.freqs:
s += ('{:.2f}\t').format(f)
s += 'GHz\n'
fp.write(s)
for j in range(len(self.P)):
s = ('{}\t{:.2f}\t').format(repr(self.P[j]), self.z[j])
for i in range(len(self.freqs)):
s += ('{}\t').format(repr(self.Tb_lyr[i][j]))
s += '\n'
fp.write(s)
s = ('{} ({} x {})').format(filename, i + 1, j + 1)
return s
| 38.799163
| 100
| 0.485496
|
7055d4bfb9e72fa7d3b37c9c57004539fe049216
| 10,050
|
py
|
Python
|
examples/example.py
|
godchen0212/pymilvus
|
09848e14206d956e3728131e73da1cc870f3c19b
|
[
"Apache-2.0"
] | null | null | null |
examples/example.py
|
godchen0212/pymilvus
|
09848e14206d956e3728131e73da1cc870f3c19b
|
[
"Apache-2.0"
] | null | null | null |
examples/example.py
|
godchen0212/pymilvus
|
09848e14206d956e3728131e73da1cc870f3c19b
|
[
"Apache-2.0"
] | null | null | null |
import random
from pprint import pprint
from milvus import Milvus, DataType
# ------
# Setup:
# First of all, you need a runing Milvus(0.11.x). By default, Milvus runs on localhost in port 19530.
# Then, you can use pymilvus(0.3.x) to connect to the server, You can change the _HOST and _PORT accordingly.
# ------
_HOST = '127.0.0.1'
_PORT = '19530'
client = Milvus(_HOST, _PORT)
# ------
# Basic create collection:
# You already have a Milvus instance running, and pymilvus connecting to Milvus.
# The first thing we will do is to create a collection `demo_films`. Incase we've already had a collection
# named `demo_films`, we drop it before we create.
# ------
collection_name = 'demo_films'
if collection_name in client.list_collections():
client.drop_collection(collection_name)
# ------
# Basic create collection:
# For a specific field, you can provide extra infos by a dictionary with `key = "params"`. If the field
# has a type of `FLOAT_VECTOR` and `BINARY_VECTOR`, "dim" must be provided in extra infos. Otherwise
# you can provide customed infos like `{"unit": "minutes"}` for you own need.
#
# In our case, the extra infos in "duration" field means the unit of "duration" field is "minutes".
# And `auto_id` in the parameter is set to `False` so that we can provide our own unique ids.
# For more information you can refer to the pymilvus
# documentation (https://milvus-io.github.io/milvus-sdk-python/pythondoc/v0.3.0/index.html).
# ------
collection_param = {
"fields": [
# Milvus doesn't support string type now, but we are considering supporting it soon.
# {"name": "title", "type": DataType.STRING},
{"name": "duration", "type": DataType.INT32, "params": {"unit": "minute"}},
{"name": "release_year", "type": DataType.INT32},
{"name": "embedding", "type": DataType.FLOAT_VECTOR, "params": {"dim": 8}},
],
"segment_row_limit": 4096,
"auto_id": False
}
# ------
# Basic create collection:
# After create collection `demo_films`, we create a partition tagged "American", it means the films we
# will be inserted are from American.
# ------
client.create_collection(collection_name, collection_param)
client.create_partition(collection_name, "American")
# ------
# Basic create collection:
# You can check the collection info and partitions we've created by `get_collection_info` and
# `list_partitions`
# ------
print("--------get collection info--------")
collection = client.get_collection_info(collection_name)
pprint(collection)
partitions = client.list_partitions(collection_name)
print("\n----------list partitions----------")
pprint(partitions)
# ------
# Basic insert entities:
# We have three films of The_Lord_of_the_Rings serises here with their id, duration release_year
# and fake embeddings to be inserted. They are listed below to give you a overview of the structure.
# ------
The_Lord_of_the_Rings = [
{
"title": "The_Fellowship_of_the_Ring",
"id": 1,
"duration": 208,
"release_year": 2001,
"embedding": [random.random() for _ in range(8)]
},
{
"title": "The_Two_Towers",
"id": 2,
"duration": 226,
"release_year": 2002,
"embedding": [random.random() for _ in range(8)]
},
{
"title": "The_Return_of_the_King",
"id": 3,
"duration": 252,
"release_year": 2003,
"embedding": [random.random() for _ in range(8)]
}
]
# ------
# Basic insert entities:
# To insert these films into Milvus, we have to group values from the same field together like below.
# Then these grouped data are used to create `hybrid_entities`.
# ------
ids = [k.get("id") for k in The_Lord_of_the_Rings]
durations = [k.get("duration") for k in The_Lord_of_the_Rings]
release_years = [k.get("release_year") for k in The_Lord_of_the_Rings]
embeddings = [k.get("embedding") for k in The_Lord_of_the_Rings]
hybrid_entities = [
# Milvus doesn't support string type yet, so we cannot insert "title".
{"name": "duration", "values": durations, "type": DataType.INT32},
{"name": "release_year", "values": release_years, "type": DataType.INT32},
{"name": "embedding", "values": embeddings, "type": DataType.FLOAT_VECTOR},
]
# ------
# Basic insert entities:
# We insert the `hybrid_entities` into our collection, into partition `American`, with ids we provide.
# If succeed, ids we provide will be returned.
# ------
ids = client.insert(collection_name, hybrid_entities, ids, partition_tag="American")
print("\n----------insert----------")
print("Films are inserted and the ids are: {}".format(ids))
# ------
# Basic insert entities:
# After insert entities into collection, we need to flush collection to make sure its on disk,
# so that we are able to retrive it.
# ------
before_flush_counts = client.count_entities(collection_name)
client.flush([collection_name])
after_flush_counts = client.count_entities(collection_name)
print("\n----------flush----------")
print("There are {} films in collection `{}` before flush".format(before_flush_counts, collection_name))
print("There are {} films in collection `{}` after flush".format(after_flush_counts, collection_name))
# ------
# Basic insert entities:
# We can get the detail of collection statistics info by `get_collection_stats`
# ------
info = client.get_collection_stats(collection_name)
print("\n----------get collection stats----------")
pprint(info)
# ------
# Basic search entities:
# Now that we have 3 films inserted into our collection, it's time to obtain them.
# We can get films by ids, if milvus can't find entity for a given id, `None` will be returned.
# In the case we provide below, we will only get 1 film with id=1 and the other is `None`
# ------
films = client.get_entity_by_id(collection_name, ids=[1, 200])
print("\n----------get entity by id = 1, id = 200----------")
for film in films:
if film is not None:
print(" > id: {},\n > duration: {}m,\n > release_years: {},\n > embedding: {}"
.format(film.id, film.duration, film.release_year, film.embedding))
# ------
# Basic hybrid search entities:
# Getting films by id is not enough, we are going to get films based on vector similarities.
# Let's say we have a film with its `embedding` and we want to find `top3` films that are most similar
# with it by L2 distance.
# Other than vector similarities, we also want to obtain films that:
# `released year` term in 2002 or 2003,
# `duration` larger than 250 minutes.
#
# Milvus provides Query DSL(Domain Specific Language) to support structured data filtering in queries.
# For now milvus suppots TermQuery and RangeQuery, they are structured as below.
# For more information about the meaning and other options about "must" and "bool",
# please refer to DSL chapter of our pymilvus documentation
# (https://milvus-io.github.io/milvus-sdk-python/pythondoc/v0.3.0/index.html).
# ------
query_embedding = [random.random() for _ in range(8)]
query_hybrid = {
"bool": {
"must": [
{
"term": {"release_year": [2002, 2003]}
},
{
# "GT" for greater than
"range": {"duration": {"GT": 250}}
},
{
"vector": {
"embedding": {"topk": 3, "query": [query_embedding], "metric_type": "L2"}
}
}
]
}
}
# ------
# Basic hybrid search entities:
# And we want to get all the fields back in reasults, so fields = ["duration", "release_year", "embedding"].
# If searching successfully, results will be returned.
# `results` have `nq`(number of queries) seperate results, since we only query for 1 film, The length of
# `results` is 1.
# We ask for top 3 in-return, but our condition is too strict while the database is too small, so we can
# only get 1 film, which means length of `entities` in below is also 1.
#
# Now we've gotten the results, and known it's a 1 x 1 structure, how can we get ids, distances and fields?
# It's very simple, for every `topk_film`, it has three properties: `id, distance and entity`.
# All fields are stored in `entity`, so you can finally obtain these data as below:
# And the result should be film with id = 3.
# ------
results = client.search(collection_name, query_hybrid, fields=["duration", "release_year", "embedding"])
print("\n----------search----------")
for entities in results:
for topk_film in entities:
current_entity = topk_film.entity
print("- id: {}".format(topk_film.id))
print("- distance: {}".format(topk_film.distance))
print("- release_year: {}".format(current_entity.release_year))
print("- duration: {}".format(current_entity.duration))
print("- embedding: {}".format(current_entity.embedding))
# ------
# Basic delete:
# Now let's see how to delete things in Milvus.
# You can simply delete entities by their ids.
# ------
client.delete_entity_by_id(collection_name, ids=[1, 2])
client.flush() # flush is important
result = client.get_entity_by_id(collection_name, ids=[1, 2])
counts_delete = sum([1 for entity in result if entity is not None])
counts_in_collection = client.count_entities(collection_name)
print("\n----------delete id = 1, id = 2----------")
print("Get {} entities by id 1, 2".format(counts_delete))
print("There are {} entities after delete films with 1, 2".format(counts_in_collection))
# ------
# Basic delete:
# You can drop partitions we create, and drop the collection we create.
# ------
client.drop_partition(collection_name, partition_tag='American')
if collection_name in client.list_collections():
client.drop_collection(collection_name)
# ------
# Summary:
# Now we've went through all basic communications pymilvus can do with Milvus server, hope it's helpful!
# ------
| 41.020408
| 112
| 0.657313
|
b78abe04e41b87829e6b9954ba648e9ca6d697d2
| 1,688
|
py
|
Python
|
pdf_to_scan/make_pdfs_look_scanned.py
|
apurvmishra99/pdf-to-scan
|
20ae5423224174cc2800f62318e2313e649957ab
|
[
"MIT"
] | 42
|
2020-05-13T22:02:34.000Z
|
2022-02-27T18:35:14.000Z
|
pdf_to_scan/make_pdfs_look_scanned.py
|
apurvmishra99/pdf-to-scan
|
20ae5423224174cc2800f62318e2313e649957ab
|
[
"MIT"
] | 1
|
2020-12-11T16:29:01.000Z
|
2020-12-11T16:29:01.000Z
|
pdf_to_scan/make_pdfs_look_scanned.py
|
apurvmishra99/pdf-to-scan
|
20ae5423224174cc2800f62318e2313e649957ab
|
[
"MIT"
] | 2
|
2021-06-08T11:49:02.000Z
|
2021-08-03T14:18:31.000Z
|
#! /usr/bin/env python
# Copyright (c) 2020 apurv
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import os
import sys
import subprocess
from pathlib import Path
import click
import locale
import ghostscript
from wand.image import Image
@click.command()
@click.argument(
"file_name",
type=click.Path(exists=True)
)
def convert(file_name):
try:
orig_file = Path(file_name).resolve()
output_path = Path(f"{file_name.split('.')[0]}_.pdf").resolve()
output_path_temp = Path(f"{file_name.split('.')[0]}__.pdf").resolve()
with Image(filename=str(orig_file), resolution=150) as img:
img.transform_colorspace('gray')
img.linear_stretch(black_point=0.035, white_point=0.1)
img.blur(radius=0, sigma=0.5)
img.noise(noise_type='gaussian', attenuate=0.25)
img.rotate(0.5)
img.save(filename=str(output_path))
cmd_gs = ['gs', '-dSAFER', '-dBATCH', '-dNOPAUSE', '-dNOCACHE', '-sDEVICE=pdfwrite', '-sColorConversionStrategy=LeaveColorUnchanged', '-dAutoFilterColorImages=true',
'-dAutoFilterGrayImages=true', '-dDownsampleMonoImages=true', '-dDownsampleGrayImages=true', '-dDownsampleColorImages=true', f'-sOutputFile={str(output_path_temp)}', str(output_path)]
encoding = locale.getpreferredencoding()
cmd_gs = [a.encode(encoding) for a in cmd_gs]
ghostscript.Ghostscript(*cmd_gs)
os.remove(str(output_path_temp))
click.secho("File processed and saved", fg="green")
except Exception as e:
print(e)
if __name__ == "__main__":
convert()
| 35.914894
| 199
| 0.658768
|
1c8101f3524f904dfb19c5f6e85c42d23132ed8a
| 570
|
py
|
Python
|
ontask/migrations/0015_auto_20180530_0914.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 33
|
2017-12-02T04:09:24.000Z
|
2021-11-07T08:41:57.000Z
|
ontask/migrations/0015_auto_20180530_0914.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 189
|
2017-11-16T04:06:29.000Z
|
2022-03-11T23:35:59.000Z
|
ontask/migrations/0015_auto_20180530_0914.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 30
|
2017-11-30T03:35:44.000Z
|
2022-01-31T03:08:08.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-29 23:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ontask', '0014_auto_20180530_0754'),
]
operations = [
migrations.AlterModelOptions(
name='sqlconnection',
options={'ordering': ('name',)},
),
migrations.RenameField(
model_name='sqlconnection',
old_name='Connection name',
new_name='name',
),
]
| 22.8
| 49
| 0.587719
|
0c2610f8d10e9f0be45e18eb51e2edd56804faa8
| 1,317
|
py
|
Python
|
mindspore/ops/_op_impl/tbe/sqrt.py
|
GuoSuiming/mindspore
|
48afc4cfa53d970c0b20eedfb46e039db2a133d5
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
mindspore/ops/_op_impl/tbe/sqrt.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
mindspore/ops/_op_impl/tbe/sqrt.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sqrt op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
sqrt_op_info = TBERegOp("Sqrt") \
.fusion_type("ELEMWISE") \
.async_flag(False) \
.binfile_name("sqrt.so") \
.compute_cost(10) \
.kernel_name("sqrt") \
.partial_flag(True) \
.input(0, "x", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.op_pattern("formatAgnostic") \
.dtype_format(DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None) \
.get_op_info()
@op_info_register(sqrt_op_info)
def _sqrt_tbe():
"""Sqrt TBE register"""
return
| 34.657895
| 79
| 0.668185
|
e186226e0895d9787f179f8f29faabc35d7d8a8f
| 6,415
|
py
|
Python
|
tfx/extensions/google_cloud_big_query/example_gen/executor_test.py
|
Anon-Artist/tfx
|
2692c9ab437d76b5d9517996bfe2596862e0791d
|
[
"Apache-2.0"
] | 2
|
2021-05-10T21:39:48.000Z
|
2021-11-17T11:24:29.000Z
|
tfx/extensions/google_cloud_big_query/example_gen/executor_test.py
|
Anon-Artist/tfx
|
2692c9ab437d76b5d9517996bfe2596862e0791d
|
[
"Apache-2.0"
] | 1
|
2021-01-28T13:44:51.000Z
|
2021-04-28T16:15:47.000Z
|
tfx/extensions/google_cloud_big_query/example_gen/executor_test.py
|
Anon-Artist/tfx
|
2692c9ab437d76b5d9517996bfe2596862e0791d
|
[
"Apache-2.0"
] | 1
|
2021-01-28T13:41:51.000Z
|
2021-01-28T13:41:51.000Z
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.extensions.google_cloud_big_query.example_gen.executor."""
import os
import random
import apache_beam as beam
from apache_beam.testing import util
from google.cloud import bigquery
import mock
import tensorflow as tf
from tfx.dsl.components.base import base_executor
from tfx.dsl.io import fileio
from tfx.extensions.google_cloud_big_query import utils
from tfx.extensions.google_cloud_big_query.example_gen import executor
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import proto_utils
@beam.ptransform_fn
def _MockReadFromBigQuery(pipeline, query):
del query # Unused arg
mock_query_results = []
for i in range(10000):
mock_query_result = {
'i': None if random.randrange(10) == 0 else i,
'f': None if random.randrange(10) == 0 else float(i),
's': None if random.randrange(10) == 0 else str(i)
}
mock_query_results.append(mock_query_result)
return pipeline | beam.Create(mock_query_results)
@beam.ptransform_fn
def _MockReadFromBigQuery2(pipeline, query):
del query # Unused arg
mock_query_results = [{
'i': 1,
'i2': [2, 3],
'b': True,
'f': 2.0,
'f2': [2.7, 3.8],
's': 'abc',
's2': ['abc', 'def']
}]
return pipeline | beam.Create(mock_query_results)
class ExecutorTest(tf.test.TestCase):
def setUp(self):
# Mock BigQuery result schema.
self._schema = [
bigquery.SchemaField('i', 'INTEGER', mode='REQUIRED'),
bigquery.SchemaField('i2', 'INTEGER', mode='REPEATED'),
bigquery.SchemaField('b', 'BOOLEAN', mode='REQUIRED'),
bigquery.SchemaField('f', 'FLOAT', mode='REQUIRED'),
bigquery.SchemaField('f2', 'FLOAT', mode='REPEATED'),
bigquery.SchemaField('s', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('s2', 'STRING', mode='REPEATED'),
]
super(ExecutorTest, self).setUp()
@mock.patch.multiple(
utils,
ReadFromBigQuery=_MockReadFromBigQuery2,
)
@mock.patch.object(bigquery, 'Client')
def testBigQueryToExample(self, mock_client):
# Mock query result schema for _BigQueryConverter.
mock_client.return_value.query.return_value.result.return_value.schema = self._schema
with beam.Pipeline() as pipeline:
examples = (
pipeline | 'ToTFExample' >> executor._BigQueryToExample(
exec_properties={'_beam_pipeline_args': []},
split_pattern='SELECT i, i2, b, f, f2, s, s2 FROM `fake`'))
feature = {}
feature['i'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[1]))
feature['i2'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[2, 3]))
feature['b'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[1]))
feature['f'] = tf.train.Feature(
float_list=tf.train.FloatList(value=[2.0]))
feature['f2'] = tf.train.Feature(
float_list=tf.train.FloatList(value=[2.7, 3.8]))
feature['s'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes('abc')]))
feature['s2'] = tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.compat.as_bytes('abc'),
tf.compat.as_bytes('def')]))
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature))
util.assert_that(examples, util.equal_to([example_proto]))
@mock.patch.multiple(
utils,
ReadFromBigQuery=_MockReadFromBigQuery,
)
@mock.patch.object(bigquery, 'Client')
def testDo(self, mock_client):
# Mock query result schema for _BigQueryConverter.
mock_client.return_value.query.return_value.result.return_value.schema = self._schema
output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
# Create output dict.
examples = standard_artifacts.Examples()
examples.uri = output_data_dir
output_dict = {'examples': [examples]}
# Create exe properties.
exec_properties = {
'input_config':
proto_utils.proto_to_json(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='bq', pattern='SELECT i, b, f, s FROM `fake`'),
])),
'output_config':
proto_utils.proto_to_json(
example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(
name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(
name='eval', hash_buckets=1)
])))
}
# Run executor.
big_query_example_gen = executor.Executor(
base_executor.BaseExecutor.Context(
beam_pipeline_args=['--project=test-project']))
big_query_example_gen.Do({}, output_dict, exec_properties)
mock_client.assert_called_with(project='test-project')
self.assertEqual(
artifact_utils.encode_split_names(['train', 'eval']),
examples.split_names)
# Check BigQuery example gen outputs.
train_output_file = os.path.join(examples.uri, 'train',
'data_tfrecord-00000-of-00001.gz')
eval_output_file = os.path.join(examples.uri, 'eval',
'data_tfrecord-00000-of-00001.gz')
self.assertTrue(fileio.exists(train_output_file))
self.assertTrue(fileio.exists(eval_output_file))
self.assertGreater(
fileio.open(train_output_file).size(),
fileio.open(eval_output_file).size())
if __name__ == '__main__':
tf.test.main()
| 36.448864
| 89
| 0.656898
|
e0449d2d865ed7916fb9278262d7ae47ff780f25
| 105,614
|
py
|
Python
|
sympy/solvers/tests/test_solveset.py
|
MaqOwais/sympy
|
c14ff3308aa416b4e9412af6f6682bff7a24e376
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/solvers/tests/test_solveset.py
|
MaqOwais/sympy
|
c14ff3308aa416b4e9412af6f6682bff7a24e376
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/solvers/tests/test_solveset.py
|
MaqOwais/sympy
|
c14ff3308aa416b4e9412af6f6682bff7a24e376
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy.core.containers import Tuple
from sympy.core.function import (Function, Lambda, nfloat, diff)
from sympy.core.mod import Mod
from sympy.core.numbers import (E, I, Rational, oo, pi)
from sympy.core.relational import (Eq, Gt,
Ne)
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, symbols)
from sympy.functions.elementary.complexes import (Abs, arg, im, re, sign)
from sympy.functions.elementary.exponential import (LambertW, exp, log)
from sympy.functions.elementary.hyperbolic import (HyperbolicFunction,
sinh, tanh, cosh, sech, coth)
from sympy.functions.elementary.miscellaneous import sqrt, Min, Max
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import (
TrigonometricFunction, acos, acot, acsc, asec, asin, atan, atan2,
cos, cot, csc, sec, sin, tan)
from sympy.functions.special.error_functions import (erf, erfc,
erfcinv, erfinv)
from sympy.logic.boolalg import And
from sympy.matrices.dense import MutableDenseMatrix as Matrix
from sympy.matrices.immutable import ImmutableDenseMatrix
from sympy.polys.polytools import Poly
from sympy.polys.rootoftools import CRootOf
from sympy.sets.contains import Contains
from sympy.sets.conditionset import ConditionSet
from sympy.sets.fancysets import ImageSet
from sympy.sets.sets import (Complement, EmptySet, FiniteSet,
Intersection, Interval, Union, imageset, ProductSet)
from sympy.simplify import simplify
from sympy.tensor.indexed import Indexed
from sympy.utilities.iterables import numbered_symbols
from sympy.testing.pytest import (XFAIL, raises, skip, slow, SKIP)
from sympy.testing.randtest import verify_numerically as tn
from sympy.physics.units import cm
from sympy.solvers.solveset import (
solveset_real, domain_check, solveset_complex, linear_eq_to_matrix,
linsolve, _is_function_class_equation, invert_real, invert_complex,
solveset, solve_decomposition, substitution, nonlinsolve, solvify,
_is_finite_with_finite_vars, _transolve, _is_exponential,
_solve_exponential, _is_logarithmic,
_solve_logarithm, _term_factors, _is_modular, _is_lambert, NonlinearError)
from sympy.abc import (a, b, c, d, e, f, g, h, i, j, k, l, m, n, q, r,
t, w, x, y, z)
def dumeq(i, j):
if type(i) in (list, tuple):
return all(dumeq(i, j) for i, j in zip(i, j))
return i == j or i.dummy_eq(j)
def test_invert_real():
x = Symbol('x', real=True)
def ireal(x, s=S.Reals):
return Intersection(s, x)
# issue 14223
assert invert_real(x, 0, x, Interval(1, 2)) == (x, S.EmptySet)
assert invert_real(exp(x), z, x) == (x, ireal(FiniteSet(log(z))))
y = Symbol('y', positive=True)
n = Symbol('n', real=True)
assert invert_real(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_real(x*3, y, x) == (x, FiniteSet(y / 3))
assert invert_real(exp(x), y, x) == (x, FiniteSet(log(y)))
assert invert_real(exp(3*x), y, x) == (x, FiniteSet(log(y) / 3))
assert invert_real(exp(x + 3), y, x) == (x, FiniteSet(log(y) - 3))
assert invert_real(exp(x) + 3, y, x) == (x, ireal(FiniteSet(log(y - 3))))
assert invert_real(exp(x)*3, y, x) == (x, FiniteSet(log(y / 3)))
assert invert_real(log(x), y, x) == (x, FiniteSet(exp(y)))
assert invert_real(log(3*x), y, x) == (x, FiniteSet(exp(y) / 3))
assert invert_real(log(x + 3), y, x) == (x, FiniteSet(exp(y) - 3))
assert invert_real(Abs(x), y, x) == (x, FiniteSet(y, -y))
assert invert_real(2**x, y, x) == (x, FiniteSet(log(y)/log(2)))
assert invert_real(2**exp(x), y, x) == (x, ireal(FiniteSet(log(log(y)/log(2)))))
assert invert_real(x**2, y, x) == (x, FiniteSet(sqrt(y), -sqrt(y)))
assert invert_real(x**S.Half, y, x) == (x, FiniteSet(y**2))
raises(ValueError, lambda: invert_real(x, x, x))
raises(ValueError, lambda: invert_real(x**pi, y, x))
raises(ValueError, lambda: invert_real(S.One, y, x))
assert invert_real(x**31 + x, y, x) == (x**31 + x, FiniteSet(y))
lhs = x**31 + x
base_values = FiniteSet(y - 1, -y - 1)
assert invert_real(Abs(x**31 + x + 1), y, x) == (lhs, base_values)
assert dumeq(invert_real(sin(x), y, x),
(x, imageset(Lambda(n, n*pi + (-1)**n*asin(y)), S.Integers)))
assert dumeq(invert_real(sin(exp(x)), y, x),
(x, imageset(Lambda(n, log((-1)**n*asin(y) + n*pi)), S.Integers)))
assert dumeq(invert_real(csc(x), y, x),
(x, imageset(Lambda(n, n*pi + (-1)**n*acsc(y)), S.Integers)))
assert dumeq(invert_real(csc(exp(x)), y, x),
(x, imageset(Lambda(n, log((-1)**n*acsc(y) + n*pi)), S.Integers)))
assert dumeq(invert_real(cos(x), y, x),
(x, Union(imageset(Lambda(n, 2*n*pi + acos(y)), S.Integers), \
imageset(Lambda(n, 2*n*pi - acos(y)), S.Integers))))
assert dumeq(invert_real(cos(exp(x)), y, x),
(x, Union(imageset(Lambda(n, log(2*n*pi + acos(y))), S.Integers), \
imageset(Lambda(n, log(2*n*pi - acos(y))), S.Integers))))
assert dumeq(invert_real(sec(x), y, x),
(x, Union(imageset(Lambda(n, 2*n*pi + asec(y)), S.Integers), \
imageset(Lambda(n, 2*n*pi - asec(y)), S.Integers))))
assert dumeq(invert_real(sec(exp(x)), y, x),
(x, Union(imageset(Lambda(n, log(2*n*pi + asec(y))), S.Integers), \
imageset(Lambda(n, log(2*n*pi - asec(y))), S.Integers))))
assert dumeq(invert_real(tan(x), y, x),
(x, imageset(Lambda(n, n*pi + atan(y)), S.Integers)))
assert dumeq(invert_real(tan(exp(x)), y, x),
(x, imageset(Lambda(n, log(n*pi + atan(y))), S.Integers)))
assert dumeq(invert_real(cot(x), y, x),
(x, imageset(Lambda(n, n*pi + acot(y)), S.Integers)))
assert dumeq(invert_real(cot(exp(x)), y, x),
(x, imageset(Lambda(n, log(n*pi + acot(y))), S.Integers)))
assert dumeq(invert_real(tan(tan(x)), y, x),
(tan(x), imageset(Lambda(n, n*pi + atan(y)), S.Integers)))
x = Symbol('x', positive=True)
assert invert_real(x**pi, y, x) == (x, FiniteSet(y**(1/pi)))
def test_invert_complex():
assert invert_complex(x + 3, y, x) == (x, FiniteSet(y - 3))
assert invert_complex(x*3, y, x) == (x, FiniteSet(y / 3))
assert dumeq(invert_complex(exp(x), y, x),
(x, imageset(Lambda(n, I*(2*pi*n + arg(y)) + log(Abs(y))), S.Integers)))
assert invert_complex(log(x), y, x) == (x, FiniteSet(exp(y)))
raises(ValueError, lambda: invert_real(1, y, x))
raises(ValueError, lambda: invert_complex(x, x, x))
raises(ValueError, lambda: invert_complex(x, x, 1))
# https://github.com/skirpichev/omg/issues/16
assert invert_complex(sinh(x), 0, x) != (x, FiniteSet(0))
def test_domain_check():
assert domain_check(1/(1 + (1/(x+1))**2), x, -1) is False
assert domain_check(x**2, x, 0) is True
assert domain_check(x, x, oo) is False
assert domain_check(0, x, oo) is False
def test_issue_11536():
assert solveset(0**x - 100, x, S.Reals) == S.EmptySet
assert solveset(0**x - 1, x, S.Reals) == FiniteSet(0)
def test_issue_17479():
from sympy.solvers.solveset import nonlinsolve
f = (x**2 + y**2)**2 + (x**2 + z**2)**2 - 2*(2*x**2 + y**2 + z**2)
fx = f.diff(x)
fy = f.diff(y)
fz = f.diff(z)
sol = nonlinsolve([fx, fy, fz], [x, y, z])
assert len(sol) >= 4 and len(sol) <= 20
# nonlinsolve has been giving a varying number of solutions
# (originally 18, then 20, now 19) due to various internal changes.
# Unfortunately not all the solutions are actually valid and some are
# redundant. Since the original issue was that an exception was raised,
# this first test only checks that nonlinsolve returns a "plausible"
# solution set. The next test checks the result for correctness.
@XFAIL
def test_issue_18449():
x, y, z = symbols("x, y, z")
f = (x**2 + y**2)**2 + (x**2 + z**2)**2 - 2*(2*x**2 + y**2 + z**2)
fx = diff(f, x)
fy = diff(f, y)
fz = diff(f, z)
sol = nonlinsolve([fx, fy, fz], [x, y, z])
for (xs, ys, zs) in sol:
d = {x: xs, y: ys, z: zs}
assert tuple(_.subs(d).simplify() for _ in (fx, fy, fz)) == (0, 0, 0)
# After simplification and removal of duplicate elements, there should
# only be 4 parametric solutions left:
# simplifiedsolutions = FiniteSet((sqrt(1 - z**2), z, z),
# (-sqrt(1 - z**2), z, z),
# (sqrt(1 - z**2), -z, z),
# (-sqrt(1 - z**2), -z, z))
# TODO: Is the above solution set definitely complete?
def test_is_function_class_equation():
from sympy.abc import x, a
assert _is_function_class_equation(TrigonometricFunction,
tan(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + sin(x) - a, x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x + a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
sin(x)*tan(x*a) + sin(x), x) is True
assert _is_function_class_equation(TrigonometricFunction,
a*tan(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x)**2 + sin(x) - 1, x) is True
assert _is_function_class_equation(TrigonometricFunction,
tan(x) + x, x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x**2), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x**2) + sin(x), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(x)**sin(x), x) is False
assert _is_function_class_equation(TrigonometricFunction,
tan(sin(x)) + sin(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + sinh(x) - a, x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x + a) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
sinh(x)*tanh(x*a) + sinh(x), x) is True
assert _is_function_class_equation(HyperbolicFunction,
a*tanh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x)**2 + sinh(x) - 1, x) is True
assert _is_function_class_equation(HyperbolicFunction,
tanh(x) + x, x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x**2), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x**2) + sinh(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(x)**sinh(x), x) is False
assert _is_function_class_equation(HyperbolicFunction,
tanh(sinh(x)) + sinh(x), x) is False
def test_garbage_input():
raises(ValueError, lambda: solveset_real([y], y))
x = Symbol('x', real=True)
assert solveset_real(x, 1) == S.EmptySet
assert solveset_real(x - 1, 1) == FiniteSet(x)
assert solveset_real(x, pi) == S.EmptySet
assert solveset_real(x, x**2) == S.EmptySet
raises(ValueError, lambda: solveset_complex([x], x))
assert solveset_complex(x, pi) == S.EmptySet
raises(ValueError, lambda: solveset((x, y), x))
raises(ValueError, lambda: solveset(x + 1, S.Reals))
raises(ValueError, lambda: solveset(x + 1, x, 2))
def test_solve_mul():
assert solveset_real((a*x + b)*(exp(x) - 3), x) == \
Union({log(3)}, Intersection({-b/a}, S.Reals))
anz = Symbol('anz', nonzero=True)
bb = Symbol('bb', real=True)
assert solveset_real((anz*x + bb)*(exp(x) - 3), x) == \
FiniteSet(-bb/anz, log(3))
assert solveset_real((2*x + 8)*(8 + exp(x)), x) == FiniteSet(S(-4))
assert solveset_real(x/log(x), x) == EmptySet()
def test_solve_invert():
assert solveset_real(exp(x) - 3, x) == FiniteSet(log(3))
assert solveset_real(log(x) - 3, x) == FiniteSet(exp(3))
assert solveset_real(3**(x + 2), x) == FiniteSet()
assert solveset_real(3**(2 - x), x) == FiniteSet()
assert solveset_real(y - b*exp(a/x), x) == Intersection(
S.Reals, FiniteSet(a/log(y/b)))
# issue 4504
assert solveset_real(2**x - 10, x) == FiniteSet(1 + log(5)/log(2))
def test_errorinverses():
assert solveset_real(erf(x) - S.Half, x) == \
FiniteSet(erfinv(S.Half))
assert solveset_real(erfinv(x) - 2, x) == \
FiniteSet(erf(2))
assert solveset_real(erfc(x) - S.One, x) == \
FiniteSet(erfcinv(S.One))
assert solveset_real(erfcinv(x) - 2, x) == FiniteSet(erfc(2))
def test_solve_polynomial():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert solveset_real(3*x - 2, x) == FiniteSet(Rational(2, 3))
assert solveset_real(x**2 - 1, x) == FiniteSet(-S.One, S.One)
assert solveset_real(x - y**3, x) == FiniteSet(y ** 3)
a11, a12, a21, a22, b1, b2 = symbols('a11, a12, a21, a22, b1, b2')
assert solveset_real(x**3 - 15*x - 4, x) == FiniteSet(
-2 + 3 ** S.Half,
S(4),
-2 - 3 ** S.Half)
assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)
assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)
assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)
assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)
assert len(solveset_real(x**5 + x**3 + 1, x)) == 1
assert len(solveset_real(-2*x**3 + 4*x**2 - 2*x + 6, x)) > 0
assert solveset_real(x**6 + x**4 + I, x) is S.EmptySet
def test_return_root_of():
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = list(solveset_complex(f, x))
for root in s:
assert root.func == CRootOf
# if one uses solve to get the roots of a polynomial that has a CRootOf
# solution, make sure that the use of nfloat during the solve process
# doesn't fail. Note: if you want numerical solutions to a polynomial
# it is *much* faster to use nroots to get them than to solve the
# equation only to get CRootOf solutions which are then numerically
# evaluated. So for eq = x**5 + 3*x + 7 do Poly(eq).nroots() rather
# than [i.n() for i in solve(eq)] to get the numerical roots of eq.
assert nfloat(list(solveset_complex(x**5 + 3*x**3 + 7, x))[0],
exponent=False) == CRootOf(x**5 + 3*x**3 + 7, 0).n()
sol = list(solveset_complex(x**6 - 2*x + 2, x))
assert all(isinstance(i, CRootOf) for i in sol) and len(sol) == 6
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = list(solveset_complex(f, x))
for root in s:
assert root.func == CRootOf
s = x**5 + 4*x**3 + 3*x**2 + Rational(7, 4)
assert solveset_complex(s, x) == \
FiniteSet(*Poly(s*4, domain='ZZ').all_roots())
# Refer issue #7876
eq = x*(x - 1)**2*(x + 1)*(x**6 - x + 1)
assert solveset_complex(eq, x) == \
FiniteSet(-1, 0, 1, CRootOf(x**6 - x + 1, 0),
CRootOf(x**6 - x + 1, 1),
CRootOf(x**6 - x + 1, 2),
CRootOf(x**6 - x + 1, 3),
CRootOf(x**6 - x + 1, 4),
CRootOf(x**6 - x + 1, 5))
def test__has_rational_power():
from sympy.solvers.solveset import _has_rational_power
assert _has_rational_power(sqrt(2), x)[0] is False
assert _has_rational_power(x*sqrt(2), x)[0] is False
assert _has_rational_power(x**2*sqrt(x), x) == (True, 2)
assert _has_rational_power(sqrt(2)*x**Rational(1, 3), x) == (True, 3)
assert _has_rational_power(sqrt(x)*x**Rational(1, 3), x) == (True, 6)
def test_solveset_sqrt_1():
assert solveset_real(sqrt(5*x + 6) - 2 - x, x) == \
FiniteSet(-S.One, S(2))
assert solveset_real(sqrt(x - 1) - x + 7, x) == FiniteSet(10)
assert solveset_real(sqrt(x - 2) - 5, x) == FiniteSet(27)
assert solveset_real(sqrt(x) - 2 - 5, x) == FiniteSet(49)
assert solveset_real(sqrt(x**3), x) == FiniteSet(0)
assert solveset_real(sqrt(x - 1), x) == FiniteSet(1)
def test_solveset_sqrt_2():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
# http://tutorial.math.lamar.edu/Classes/Alg/SolveRadicalEqns.aspx#Solve_Rad_Ex2_a
assert solveset_real(sqrt(2*x - 1) - sqrt(x - 4) - 2, x) == \
FiniteSet(S(5), S(13))
assert solveset_real(sqrt(x + 7) + 2 - sqrt(3 - x), x) == \
FiniteSet(-6)
# http://www.purplemath.com/modules/solverad.htm
assert solveset_real(sqrt(17*x - sqrt(x**2 - 5)) - 7, x) == \
FiniteSet(3)
eq = x + 1 - (x**4 + 4*x**3 - x)**Rational(1, 4)
assert solveset_real(eq, x) == FiniteSet(Rational(-1, 2), Rational(-1, 3))
eq = sqrt(2*x + 9) - sqrt(x + 1) - sqrt(x + 4)
assert solveset_real(eq, x) == FiniteSet(0)
eq = sqrt(x + 4) + sqrt(2*x - 1) - 3*sqrt(x - 1)
assert solveset_real(eq, x) == FiniteSet(5)
eq = sqrt(x)*sqrt(x - 7) - 12
assert solveset_real(eq, x) == FiniteSet(16)
eq = sqrt(x - 3) + sqrt(x) - 3
assert solveset_real(eq, x) == FiniteSet(4)
eq = sqrt(2*x**2 - 7) - (3 - x)
assert solveset_real(eq, x) == FiniteSet(-S(8), S(2))
# others
eq = sqrt(9*x**2 + 4) - (3*x + 2)
assert solveset_real(eq, x) == FiniteSet(0)
assert solveset_real(sqrt(x - 3) - sqrt(x) - 3, x) == FiniteSet()
eq = (2*x - 5)**Rational(1, 3) - 3
assert solveset_real(eq, x) == FiniteSet(16)
assert solveset_real(sqrt(x) + sqrt(sqrt(x)) - 4, x) == \
FiniteSet((Rational(-1, 2) + sqrt(17)/2)**4)
eq = sqrt(x) - sqrt(x - 1) + sqrt(sqrt(x))
assert solveset_real(eq, x) == FiniteSet()
eq = (sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
ans = solveset_real(eq, x)
ra = S('''-1484/375 - 4*(-1/2 + sqrt(3)*I/2)*(-12459439/52734375 +
114*sqrt(12657)/78125)**(1/3) - 172564/(140625*(-1/2 +
sqrt(3)*I/2)*(-12459439/52734375 + 114*sqrt(12657)/78125)**(1/3))''')
rb = Rational(4, 5)
assert all(abs(eq.subs(x, i).n()) < 1e-10 for i in (ra, rb)) and \
len(ans) == 2 and \
{i.n(chop=True) for i in ans} == \
{i.n(chop=True) for i in (ra, rb)}
assert solveset_real(sqrt(x) + x**Rational(1, 3) +
x**Rational(1, 4), x) == FiniteSet(0)
assert solveset_real(x/sqrt(x**2 + 1), x) == FiniteSet(0)
eq = (x - y**3)/((y**2)*sqrt(1 - y**2))
assert solveset_real(eq, x) == FiniteSet(y**3)
# issue 4497
assert solveset_real(1/(5 + x)**Rational(1, 5) - 9, x) == \
FiniteSet(Rational(-295244, 59049))
@XFAIL
def test_solve_sqrt_fail():
# this only works if we check real_root(eq.subs(x, Rational(1, 3)))
# but checksol doesn't work like that
eq = (x**3 - 3*x**2)**Rational(1, 3) + 1 - x
assert solveset_real(eq, x) == FiniteSet(Rational(1, 3))
@slow
def test_solve_sqrt_3():
R = Symbol('R')
eq = sqrt(2)*R*sqrt(1/(R + 1)) + (R + 1)*(sqrt(2)*sqrt(1/(R + 1)) - 1)
sol = solveset_complex(eq, R)
fset = [Rational(5, 3) + 4*sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3,
-sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3 +
40*re(1/((Rational(-1, 2) - sqrt(3)*I/2)*(Rational(251, 27) + sqrt(111)*I/9)**Rational(1, 3)))/9 +
sqrt(30)*sin(atan(3*sqrt(111)/251)/3)/3 + Rational(5, 3) +
I*(-sqrt(30)*cos(atan(3*sqrt(111)/251)/3)/3 -
sqrt(10)*sin(atan(3*sqrt(111)/251)/3)/3 +
40*im(1/((Rational(-1, 2) - sqrt(3)*I/2)*(Rational(251, 27) + sqrt(111)*I/9)**Rational(1, 3)))/9)]
cset = [40*re(1/((Rational(-1, 2) + sqrt(3)*I/2)*(Rational(251, 27) + sqrt(111)*I/9)**Rational(1, 3)))/9 -
sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3 - sqrt(30)*sin(atan(3*sqrt(111)/251)/3)/3 +
Rational(5, 3) +
I*(40*im(1/((Rational(-1, 2) + sqrt(3)*I/2)*(Rational(251, 27) + sqrt(111)*I/9)**Rational(1, 3)))/9 -
sqrt(10)*sin(atan(3*sqrt(111)/251)/3)/3 +
sqrt(30)*cos(atan(3*sqrt(111)/251)/3)/3)]
assert sol._args[0] == FiniteSet(*fset)
assert sol._args[1] == ConditionSet(
R,
Eq(sqrt(2)*R*sqrt(1/(R + 1)) + (R + 1)*(sqrt(2)*sqrt(1/(R + 1)) - 1), 0),
FiniteSet(*cset))
# the number of real roots will depend on the value of m: for m=1 there are 4
# and for m=-1 there are none.
eq = -sqrt((m - q)**2 + (-m/(2*q) + S.Half)**2) + sqrt((-m**2/2 - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - Rational(1, 4))**2 + (m**2/2 - m - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - Rational(1, 4))**2)
unsolved_object = ConditionSet(q, Eq(sqrt((m - q)**2 + (-m/(2*q) + S.Half)**2) -
sqrt((-m**2/2 - sqrt(4*m**4 - 4*m**2 + 8*m + 1)/4 - Rational(1, 4))**2 + (m**2/2 - m -
sqrt(4*m**4 - 4*m**2 + 8*m + 1)/4 - Rational(1, 4))**2), 0), S.Reals)
assert solveset_real(eq, q) == unsolved_object
def test_solve_polynomial_symbolic_param():
assert solveset_complex((x**2 - 1)**2 - a, x) == \
FiniteSet(sqrt(1 + sqrt(a)), -sqrt(1 + sqrt(a)),
sqrt(1 - sqrt(a)), -sqrt(1 - sqrt(a)))
# issue 4507
assert solveset_complex(y - b/(1 + a*x), x) == \
FiniteSet((b/y - 1)/a) - FiniteSet(-1/a)
# issue 4508
assert solveset_complex(y - b*x/(a + x), x) == \
FiniteSet(-a*y/(y - b)) - FiniteSet(-a)
def test_solve_rational():
assert solveset_real(1/x + 1, x) == FiniteSet(-S.One)
assert solveset_real(1/exp(x) - 1, x) == FiniteSet(0)
assert solveset_real(x*(1 - 5/x), x) == FiniteSet(5)
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
assert solveset_real((x**2/(7 - x)).diff(x), x) == \
FiniteSet(S.Zero, S(14))
def test_solveset_real_gen_is_pow():
assert solveset_real(sqrt(1) + 1, x) == EmptySet()
def test_no_sol():
assert solveset(1 - oo*x) == EmptySet()
assert solveset(oo*x, x) == EmptySet()
assert solveset(oo*x - oo, x) == EmptySet()
assert solveset_real(4, x) == EmptySet()
assert solveset_real(exp(x), x) == EmptySet()
assert solveset_real(x**2 + 1, x) == EmptySet()
assert solveset_real(-3*a/sqrt(x), x) == EmptySet()
assert solveset_real(1/x, x) == EmptySet()
assert solveset_real(-(1 + x)/(2 + x)**2 + 1/(2 + x), x) == \
EmptySet()
def test_sol_zero_real():
assert solveset_real(0, x) == S.Reals
assert solveset(0, x, Interval(1, 2)) == Interval(1, 2)
assert solveset_real(-x**2 - 2*x + (x + 1)**2 - 1, x) == S.Reals
def test_no_sol_rational_extragenous():
assert solveset_real((x/(x + 1) + 3)**(-2), x) == EmptySet()
assert solveset_real((x - 1)/(1 + 1/(x - 1)), x) == EmptySet()
def test_solve_polynomial_cv_1a():
"""
Test for solving on equations that can be converted to
a polynomial equation using the change of variable y -> x**Rational(p, q)
"""
assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)
assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)
assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)
assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)
assert solveset_real(x*(x**(S.One / 3) - 3), x) == \
FiniteSet(S.Zero, S(27))
def test_solveset_real_rational():
"""Test solveset_real for rational functions"""
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert solveset_real((x - y**3) / ((y**2)*sqrt(1 - y**2)), x) \
== FiniteSet(y**3)
# issue 4486
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
def test_solveset_real_log():
assert solveset_real(log((x-1)*(x+1)), x) == \
FiniteSet(sqrt(2), -sqrt(2))
def test_poly_gens():
assert solveset_real(4**(2*(x**2) + 2*x) - 8, x) == \
FiniteSet(Rational(-3, 2), S.Half)
def test_solve_abs():
n = Dummy('n')
raises(ValueError, lambda: solveset(Abs(x) - 1, x))
assert solveset(Abs(x) - n, x, S.Reals).dummy_eq(
ConditionSet(x, Contains(n, Interval(0, oo)), {-n, n}))
assert solveset_real(Abs(x) - 2, x) == FiniteSet(-2, 2)
assert solveset_real(Abs(x) + 2, x) is S.EmptySet
assert solveset_real(Abs(x + 3) - 2*Abs(x - 3), x) == \
FiniteSet(1, 9)
assert solveset_real(2*Abs(x) - Abs(x - 1), x) == \
FiniteSet(-1, Rational(1, 3))
sol = ConditionSet(
x,
And(
Contains(b, Interval(0, oo)),
Contains(a + b, Interval(0, oo)),
Contains(a - b, Interval(0, oo))),
FiniteSet(-a - b - 3, -a + b - 3, a - b - 3, a + b - 3))
eq = Abs(Abs(x + 3) - a) - b
assert invert_real(eq, 0, x)[1] == sol
reps = {a: 3, b: 1}
eqab = eq.subs(reps)
for si in sol.subs(reps):
assert not eqab.subs(x, si)
assert dumeq(solveset(Eq(sin(Abs(x)), 1), x, domain=S.Reals), Union(
Intersection(Interval(0, oo),
ImageSet(Lambda(n, (-1)**n*pi/2 + n*pi), S.Integers)),
Intersection(Interval(-oo, 0),
ImageSet(Lambda(n, n*pi - (-1)**(-n)*pi/2), S.Integers))))
def test_issue_9824():
assert dumeq(solveset(sin(x)**2 - 2*sin(x) + 1, x), ImageSet(Lambda(n, 2*n*pi + pi/2), S.Integers))
assert dumeq(solveset(cos(x)**2 - 2*cos(x) + 1, x), ImageSet(Lambda(n, 2*n*pi), S.Integers))
def test_issue_9565():
assert solveset_real(Abs((x - 1)/(x - 5)) <= Rational(1, 3), x) == Interval(-1, 2)
def test_issue_10069():
eq = abs(1/(x - 1)) - 1 > 0
assert solveset_real(eq, x) == Union(
Interval.open(0, 1), Interval.open(1, 2))
def test_real_imag_splitting():
a, b = symbols('a b', real=True)
assert solveset_real(sqrt(a**2 - b**2) - 3, a) == \
FiniteSet(-sqrt(b**2 + 9), sqrt(b**2 + 9))
assert solveset_real(sqrt(a**2 + b**2) - 3, a) != \
S.EmptySet
def test_units():
assert solveset_real(1/x - 1/(2*cm), x) == FiniteSet(2*cm)
def test_solve_only_exp_1():
y = Symbol('y', positive=True)
assert solveset_real(exp(x) - y, x) == FiniteSet(log(y))
assert solveset_real(exp(x) + exp(-x) - 4, x) == \
FiniteSet(log(-sqrt(3) + 2), log(sqrt(3) + 2))
assert solveset_real(exp(x) + exp(-x) - y, x) != S.EmptySet
def test_atan2():
# The .inverse() method on atan2 works only if x.is_real is True and the
# second argument is a real constant
assert solveset_real(atan2(x, 2) - pi/3, x) == FiniteSet(2*sqrt(3))
def test_piecewise_solveset():
eq = Piecewise((x - 2, Gt(x, 2)), (2 - x, True)) - 3
assert set(solveset_real(eq, x)) == set(FiniteSet(-1, 5))
absxm3 = Piecewise(
(x - 3, 0 <= x - 3),
(3 - x, 0 > x - 3))
y = Symbol('y', positive=True)
assert solveset_real(absxm3 - y, x) == FiniteSet(-y + 3, y + 3)
f = Piecewise(((x - 2)**2, x >= 0), (0, True))
assert solveset(f, x, domain=S.Reals) == Union(FiniteSet(2), Interval(-oo, 0, True, True))
assert solveset(
Piecewise((x + 1, x > 0), (I, True)) - I, x, S.Reals
) == Interval(-oo, 0)
assert solveset(Piecewise((x - 1, Ne(x, I)), (x, True)), x) == FiniteSet(1)
# issue 19718
g = Piecewise((1, x > 10), (0, True))
assert solveset(g > 0, x, S.Reals) == Interval.open(10, oo)
from sympy.logic.boolalg import BooleanTrue
f = BooleanTrue()
assert solveset(f, x, domain=Interval(-3, 10)) == Interval(-3, 10)
# issue 20552
f = Piecewise((0, Eq(x, 0)), (x**2/Abs(x), True))
g = Piecewise((0, Eq(x, pi)), ((x - pi)/sin(x), True))
assert solveset(f, x, domain=S.Reals) == FiniteSet(0)
assert solveset(g) == FiniteSet(pi)
def test_solveset_complex_polynomial():
assert solveset_complex(a*x**2 + b*x + c, x) == \
FiniteSet(-b/(2*a) - sqrt(-4*a*c + b**2)/(2*a),
-b/(2*a) + sqrt(-4*a*c + b**2)/(2*a))
assert solveset_complex(x - y**3, y) == FiniteSet(
(-x**Rational(1, 3))/2 + I*sqrt(3)*x**Rational(1, 3)/2,
x**Rational(1, 3),
(-x**Rational(1, 3))/2 - I*sqrt(3)*x**Rational(1, 3)/2)
assert solveset_complex(x + 1/x - 1, x) == \
FiniteSet(S.Half + I*sqrt(3)/2, S.Half - I*sqrt(3)/2)
def test_sol_zero_complex():
assert solveset_complex(0, x) == S.Complexes
def test_solveset_complex_rational():
assert solveset_complex((x - 1)*(x - I)/(x - 3), x) == \
FiniteSet(1, I)
assert solveset_complex((x - y**3)/((y**2)*sqrt(1 - y**2)), x) == \
FiniteSet(y**3)
assert solveset_complex(-x**2 - I, x) == \
FiniteSet(-sqrt(2)/2 + sqrt(2)*I/2, sqrt(2)/2 - sqrt(2)*I/2)
def test_solve_quintics():
skip("This test is too slow")
f = x**5 - 110*x**3 - 55*x**2 + 2310*x + 979
s = solveset_complex(f, x)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
f = x**5 + 15*x + 12
s = solveset_complex(f, x)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
def test_solveset_complex_exp():
from sympy.abc import x, n
assert dumeq(solveset_complex(exp(x) - 1, x),
imageset(Lambda(n, I*2*n*pi), S.Integers))
assert dumeq(solveset_complex(exp(x) - I, x),
imageset(Lambda(n, I*(2*n*pi + pi/2)), S.Integers))
assert solveset_complex(1/exp(x), x) == S.EmptySet
assert dumeq(solveset_complex(sinh(x).rewrite(exp), x),
imageset(Lambda(n, n*pi*I), S.Integers))
def test_solveset_real_exp():
from sympy.abc import x, y
assert solveset(Eq((-2)**x, 4), x, S.Reals) == FiniteSet(2)
assert solveset(Eq(-2**x, 4), x, S.Reals) == S.EmptySet
assert solveset(Eq((-3)**x, 27), x, S.Reals) == S.EmptySet
assert solveset(Eq((-5)**(x+1), 625), x, S.Reals) == FiniteSet(3)
assert solveset(Eq(2**(x-3), -16), x, S.Reals) == S.EmptySet
assert solveset(Eq((-3)**(x - 3), -3**39), x, S.Reals) == FiniteSet(42)
assert solveset(Eq(2**x, y), x, S.Reals) == Intersection(S.Reals, FiniteSet(log(y)/log(2)))
assert invert_real((-2)**(2*x) - 16, 0, x) == (x, FiniteSet(2))
def test_solve_complex_log():
assert solveset_complex(log(x), x) == FiniteSet(1)
assert solveset_complex(1 - log(a + 4*x**2), x) == \
FiniteSet(-sqrt(-a + E)/2, sqrt(-a + E)/2)
def test_solve_complex_sqrt():
assert solveset_complex(sqrt(5*x + 6) - 2 - x, x) == \
FiniteSet(-S.One, S(2))
assert solveset_complex(sqrt(5*x + 6) - (2 + 2*I) - x, x) == \
FiniteSet(-S(2), 3 - 4*I)
assert solveset_complex(4*x*(1 - a * sqrt(x)), x) == \
FiniteSet(S.Zero, 1 / a ** 2)
def test_solveset_complex_tan():
s = solveset_complex(tan(x).rewrite(exp), x)
assert dumeq(s, imageset(Lambda(n, pi*n), S.Integers) - \
imageset(Lambda(n, pi*n + pi/2), S.Integers))
def test_solve_trig():
from sympy.abc import n
assert dumeq(solveset_real(sin(x), x),
Union(imageset(Lambda(n, 2*pi*n), S.Integers),
imageset(Lambda(n, 2*pi*n + pi), S.Integers)))
assert dumeq(solveset_real(sin(x) - 1, x),
imageset(Lambda(n, 2*pi*n + pi/2), S.Integers))
assert dumeq(solveset_real(cos(x), x),
Union(imageset(Lambda(n, 2*pi*n + pi/2), S.Integers),
imageset(Lambda(n, 2*pi*n + pi*Rational(3, 2)), S.Integers)))
assert dumeq(solveset_real(sin(x) + cos(x), x),
Union(imageset(Lambda(n, 2*n*pi + pi*Rational(3, 4)), S.Integers),
imageset(Lambda(n, 2*n*pi + pi*Rational(7, 4)), S.Integers)))
assert solveset_real(sin(x)**2 + cos(x)**2, x) == S.EmptySet
assert dumeq(solveset_complex(cos(x) - S.Half, x),
Union(imageset(Lambda(n, 2*n*pi + pi*Rational(5, 3)), S.Integers),
imageset(Lambda(n, 2*n*pi + pi/3), S.Integers)))
assert dumeq(solveset(sin(y + a) - sin(y), a, domain=S.Reals),
Union(ImageSet(Lambda(n, 2*n*pi), S.Integers),
Intersection(ImageSet(Lambda(n, -I*(I*(
2*n*pi + arg(-exp(-2*I*y))) +
2*im(y))), S.Integers), S.Reals)))
assert dumeq(solveset_real(sin(2*x)*cos(x) + cos(2*x)*sin(x)-1, x),
ImageSet(Lambda(n, n*pi*Rational(2, 3) + pi/6), S.Integers))
assert dumeq(solveset_real(2*tan(x)*sin(x) + 1, x), Union(
ImageSet(Lambda(n, 2*n*pi + atan(sqrt(2)*sqrt(-1 + sqrt(17))/
(1 - sqrt(17))) + pi), S.Integers),
ImageSet(Lambda(n, 2*n*pi - atan(sqrt(2)*sqrt(-1 + sqrt(17))/
(1 - sqrt(17))) + pi), S.Integers)))
assert dumeq(solveset_real(cos(2*x)*cos(4*x) - 1, x),
ImageSet(Lambda(n, n*pi), S.Integers))
assert dumeq(solveset(sin(x/10) + Rational(3, 4)), Union(
ImageSet(Lambda(n, 20*n*pi + 10*atan(3*sqrt(7)/7) + 10*pi), S.Integers),
ImageSet(Lambda(n, 20*n*pi - 10*atan(3*sqrt(7)/7) + 20*pi), S.Integers)))
assert dumeq(solveset(cos(x/15) + cos(x/5)), Union(
ImageSet(Lambda(n, 30*n*pi + 15*pi/2), S.Integers),
ImageSet(Lambda(n, 30*n*pi + 45*pi/2), S.Integers),
ImageSet(Lambda(n, 30*n*pi + 75*pi/4), S.Integers),
ImageSet(Lambda(n, 30*n*pi + 45*pi/4), S.Integers),
ImageSet(Lambda(n, 30*n*pi + 105*pi/4), S.Integers),
ImageSet(Lambda(n, 30*n*pi + 15*pi/4), S.Integers)))
assert dumeq(solveset(sec(sqrt(2)*x/3) + 5), Union(
ImageSet(Lambda(n, 3*sqrt(2)*(2*n*pi - pi + atan(2*sqrt(6)))/2), S.Integers),
ImageSet(Lambda(n, 3*sqrt(2)*(2*n*pi - atan(2*sqrt(6)) + pi)/2), S.Integers)))
assert dumeq(simplify(solveset(tan(pi*x) - cot(pi/2*x))), Union(
ImageSet(Lambda(n, 4*n + 1), S.Integers),
ImageSet(Lambda(n, 4*n + 3), S.Integers),
ImageSet(Lambda(n, 4*n + Rational(7, 3)), S.Integers),
ImageSet(Lambda(n, 4*n + Rational(5, 3)), S.Integers),
ImageSet(Lambda(n, 4*n + Rational(11, 3)), S.Integers),
ImageSet(Lambda(n, 4*n + Rational(1, 3)), S.Integers)))
assert dumeq(solveset(cos(9*x)), Union(
ImageSet(Lambda(n, 2*n*pi/9 + pi/18), S.Integers),
ImageSet(Lambda(n, 2*n*pi/9 + pi/6), S.Integers)))
assert dumeq(solveset(sin(8*x) + cot(12*x), x, S.Reals), Union(
ImageSet(Lambda(n, n*pi/2 + pi/8), S.Integers),
ImageSet(Lambda(n, n*pi/2 + 3*pi/8), S.Integers),
ImageSet(Lambda(n, n*pi/2 + 5*pi/16), S.Integers),
ImageSet(Lambda(n, n*pi/2 + 3*pi/16), S.Integers),
ImageSet(Lambda(n, n*pi/2 + 7*pi/16), S.Integers),
ImageSet(Lambda(n, n*pi/2 + pi/16), S.Integers)))
# This is the only remaining solveset test that actually ends up being solved
# by _solve_trig2(). All others are handled by the improved _solve_trig1.
assert dumeq(solveset_real(2*cos(x)*cos(2*x) - 1, x),
Union(ImageSet(Lambda(n, 2*n*pi + 2*atan(sqrt(-2*2**Rational(1, 3)*(67 +
9*sqrt(57))**Rational(2, 3) + 8*2**Rational(2, 3) + 11*(67 +
9*sqrt(57))**Rational(1, 3))/(3*(67 + 9*sqrt(57))**Rational(1, 6)))), S.Integers),
ImageSet(Lambda(n, 2*n*pi - 2*atan(sqrt(-2*2**Rational(1, 3)*(67 +
9*sqrt(57))**Rational(2, 3) + 8*2**Rational(2, 3) + 11*(67 +
9*sqrt(57))**Rational(1, 3))/(3*(67 + 9*sqrt(57))**Rational(1, 6))) +
2*pi), S.Integers)))
# issue #16870
assert dumeq(simplify(solveset(sin(x/180*pi) - S.Half, x, S.Reals)), Union(
ImageSet(Lambda(n, 360*n + 150), S.Integers),
ImageSet(Lambda(n, 360*n + 30), S.Integers)))
def test_solve_hyperbolic():
# actual solver: _solve_trig1
n = Dummy('n')
assert solveset(sinh(x) + cosh(x), x) == S.EmptySet
assert solveset(sinh(x) + cos(x), x) == ConditionSet(x,
Eq(cos(x) + sinh(x), 0), S.Complexes)
assert solveset_real(sinh(x) + sech(x), x) == FiniteSet(
log(sqrt(sqrt(5) - 2)))
assert solveset_real(3*cosh(2*x) - 5, x) == FiniteSet(
-log(3)/2, log(3)/2)
assert solveset_real(sinh(x - 3) - 2, x) == FiniteSet(
log((2 + sqrt(5))*exp(3)))
assert solveset_real(cosh(2*x) + 2*sinh(x) - 5, x) == FiniteSet(
log(-2 + sqrt(5)), log(1 + sqrt(2)))
assert solveset_real((coth(x) + sinh(2*x))/cosh(x) - 3, x) == FiniteSet(
log(S.Half + sqrt(5)/2), log(1 + sqrt(2)))
assert solveset_real(cosh(x)*sinh(x) - 2, x) == FiniteSet(
log(4 + sqrt(17))/2)
assert solveset_real(sinh(x) + tanh(x) - 1, x) == FiniteSet(
log(sqrt(2)/2 + sqrt(-S(1)/2 + sqrt(2))))
assert dumeq(solveset_complex(sinh(x) - I/2, x), Union(
ImageSet(Lambda(n, I*(2*n*pi + 5*pi/6)), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi/6)), S.Integers)))
assert dumeq(solveset_complex(sinh(x) + sech(x), x), Union(
ImageSet(Lambda(n, 2*n*I*pi + log(sqrt(-2 + sqrt(5)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi/2) + log(sqrt(2 + sqrt(5)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi) + log(sqrt(-2 + sqrt(5)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi - pi/2) + log(sqrt(2 + sqrt(5)))), S.Integers)))
assert dumeq(solveset(sinh(x/10) + Rational(3, 4)), Union(
ImageSet(Lambda(n, 10*I*(2*n*pi + pi) + 10*log(2)), S.Integers),
ImageSet(Lambda(n, 20*n*I*pi - 10*log(2)), S.Integers)))
assert dumeq(solveset(cosh(x/15) + cosh(x/5)), Union(
ImageSet(Lambda(n, 15*I*(2*n*pi + pi/2)), S.Integers),
ImageSet(Lambda(n, 15*I*(2*n*pi - pi/2)), S.Integers),
ImageSet(Lambda(n, 15*I*(2*n*pi - 3*pi/4)), S.Integers),
ImageSet(Lambda(n, 15*I*(2*n*pi + 3*pi/4)), S.Integers),
ImageSet(Lambda(n, 15*I*(2*n*pi - pi/4)), S.Integers),
ImageSet(Lambda(n, 15*I*(2*n*pi + pi/4)), S.Integers)))
assert dumeq(solveset(sech(sqrt(2)*x/3) + 5), Union(
ImageSet(Lambda(n, 3*sqrt(2)*I*(2*n*pi - pi + atan(2*sqrt(6)))/2), S.Integers),
ImageSet(Lambda(n, 3*sqrt(2)*I*(2*n*pi - atan(2*sqrt(6)) + pi)/2), S.Integers)))
assert dumeq(solveset(tanh(pi*x) - coth(pi/2*x)), Union(
ImageSet(Lambda(n, 2*I*(2*n*pi + pi/2)/pi), S.Integers),
ImageSet(Lambda(n, 2*I*(2*n*pi - pi/2)/pi), S.Integers)))
assert dumeq(solveset(cosh(9*x)), Union(
ImageSet(Lambda(n, I*(2*n*pi + pi/2)/9), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi - pi/2)/9), S.Integers)))
# issues #9606 / #9531:
assert solveset(sinh(x), x, S.Reals) == FiniteSet(0)
assert dumeq(solveset(sinh(x), x, S.Complexes), Union(
ImageSet(Lambda(n, I*(2*n*pi + pi)), S.Integers),
ImageSet(Lambda(n, 2*n*I*pi), S.Integers)))
# issues #11218 / #18427
assert dumeq(solveset(sin(pi*x), x, S.Reals), Union(
ImageSet(Lambda(n, (2*n*pi + pi)/pi), S.Integers),
ImageSet(Lambda(n, 2*n), S.Integers)))
assert dumeq(solveset(sin(pi*x), x), Union(
ImageSet(Lambda(n, (2*n*pi + pi)/pi), S.Integers),
ImageSet(Lambda(n, 2*n), S.Integers)))
# issue #17543
assert dumeq(simplify(solveset(I*cot(8*x - 8*E), x)), Union(
ImageSet(Lambda(n, n*pi/4 - 13*pi/16 + E), S.Integers),
ImageSet(Lambda(n, n*pi/4 - 11*pi/16 + E), S.Integers)))
# issues #18490 / #19489
assert solveset(cosh(x) + cosh(3*x) - cosh(5*x), x, S.Reals
).dummy_eq(ConditionSet(x,
Eq(cosh(x) + cosh(3*x) - cosh(5*x), 0), S.Reals))
assert solveset(sinh(8*x) + coth(12*x)).dummy_eq(
ConditionSet(x, Eq(sinh(8*x) + coth(12*x), 0), S.Complexes))
def test_solve_trig_hyp_symbolic():
# actual solver: _solve_trig1
assert dumeq(solveset(sin(a*x), x), ConditionSet(x, Ne(a, 0), Union(
ImageSet(Lambda(n, (2*n*pi + pi)/a), S.Integers),
ImageSet(Lambda(n, 2*n*pi/a), S.Integers))))
assert dumeq(solveset(cosh(x/a), x), ConditionSet(x, Ne(a, 0), Union(
ImageSet(Lambda(n, I*a*(2*n*pi + pi/2)), S.Integers),
ImageSet(Lambda(n, I*a*(2*n*pi - pi/2)), S.Integers))))
assert dumeq(solveset(sin(2*sqrt(3)/3*a**2/(b*pi)*x)
+ cos(4*sqrt(3)/3*a**2/(b*pi)*x), x),
ConditionSet(x, Ne(b, 0) & Ne(a**2, 0), Union(
ImageSet(Lambda(n, sqrt(3)*pi*b*(2*n*pi + pi/2)/(2*a**2)), S.Integers),
ImageSet(Lambda(n, sqrt(3)*pi*b*(2*n*pi - 5*pi/6)/(2*a**2)), S.Integers),
ImageSet(Lambda(n, sqrt(3)*pi*b*(2*n*pi - pi/6)/(2*a**2)), S.Integers))))
assert dumeq(simplify(solveset(cot((1 + I)*x) - cot((3 + 3*I)*x), x)), Union(
ImageSet(Lambda(n, pi*(1 - I)*(4*n + 1)/4), S.Integers),
ImageSet(Lambda(n, pi*(1 - I)*(4*n - 1)/4), S.Integers)))
assert dumeq(solveset(cosh((a**2 + 1)*x) - 3, x),
ConditionSet(x, Ne(a**2 + 1, 0), Union(
ImageSet(Lambda(n, (2*n*I*pi + log(3 - 2*sqrt(2)))/(a**2 + 1)), S.Integers),
ImageSet(Lambda(n, (2*n*I*pi + log(2*sqrt(2) + 3))/(a**2 + 1)), S.Integers))))
ar = Symbol('ar', real=True)
assert solveset(cosh((ar**2 + 1)*x) - 2, x, S.Reals) == FiniteSet(
log(sqrt(3) + 2)/(ar**2 + 1), log(2 - sqrt(3))/(ar**2 + 1))
def test_issue_9616():
assert dumeq(solveset(sinh(x) + tanh(x) - 1, x), Union(
ImageSet(Lambda(n, 2*n*I*pi + log(sqrt(2)/2 + sqrt(-S.Half + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi - atan(sqrt(2)*sqrt(S.Half + sqrt(2))) + pi)
+ log(sqrt(1 + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi) + log(-sqrt(2)/2 + sqrt(-S.Half + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi - pi + atan(sqrt(2)*sqrt(S.Half + sqrt(2))))
+ log(sqrt(1 + sqrt(2)))), S.Integers)))
f1 = (sinh(x)).rewrite(exp)
f2 = (tanh(x)).rewrite(exp)
assert dumeq(solveset(f1 + f2 - 1, x), Union(
Complement(ImageSet(
Lambda(n, I*(2*n*pi + pi) + log(-sqrt(2)/2 + sqrt(-S.Half + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi)/2), S.Integers)),
Complement(ImageSet(Lambda(n, I*(2*n*pi - pi + atan(sqrt(2)*sqrt(S.Half + sqrt(2))))
+ log(sqrt(1 + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi)/2), S.Integers)),
Complement(ImageSet(Lambda(n, I*(2*n*pi - atan(sqrt(2)*sqrt(S.Half + sqrt(2))) + pi)
+ log(sqrt(1 + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi)/2), S.Integers)),
Complement(
ImageSet(Lambda(n, 2*n*I*pi + log(sqrt(2)/2 + sqrt(-S.Half + sqrt(2)))), S.Integers),
ImageSet(Lambda(n, I*(2*n*pi + pi)/2), S.Integers))))
def test_solve_invalid_sol():
assert 0 not in solveset_real(sin(x)/x, x)
assert 0 not in solveset_complex((exp(x) - 1)/x, x)
@XFAIL
def test_solve_trig_simplified():
from sympy.abc import n
assert dumeq(solveset_real(sin(x), x),
imageset(Lambda(n, n*pi), S.Integers))
assert dumeq(solveset_real(cos(x), x),
imageset(Lambda(n, n*pi + pi/2), S.Integers))
assert dumeq(solveset_real(cos(x) + sin(x), x),
imageset(Lambda(n, n*pi - pi/4), S.Integers))
def test_solveset():
f = Function('f')
raises(ValueError, lambda: solveset(x + y))
assert solveset(x, 1) == S.EmptySet
assert solveset(f(1)**2 + y + 1, f(1)
) == FiniteSet(-sqrt(-y - 1), sqrt(-y - 1))
assert solveset(f(1)**2 - 1, f(1), S.Reals) == FiniteSet(-1, 1)
assert solveset(f(1)**2 + 1, f(1)) == FiniteSet(-I, I)
assert solveset(x - 1, 1) == FiniteSet(x)
assert solveset(sin(x) - cos(x), sin(x)) == FiniteSet(cos(x))
assert solveset(0, domain=S.Reals) == S.Reals
assert solveset(1) == S.EmptySet
assert solveset(True, domain=S.Reals) == S.Reals # issue 10197
assert solveset(False, domain=S.Reals) == S.EmptySet
assert solveset(exp(x) - 1, domain=S.Reals) == FiniteSet(0)
assert solveset(exp(x) - 1, x, S.Reals) == FiniteSet(0)
assert solveset(Eq(exp(x), 1), x, S.Reals) == FiniteSet(0)
assert solveset(exp(x) - 1, exp(x), S.Reals) == FiniteSet(1)
A = Indexed('A', x)
assert solveset(A - 1, A, S.Reals) == FiniteSet(1)
assert solveset(x - 1 >= 0, x, S.Reals) == Interval(1, oo)
assert solveset(exp(x) - 1 >= 0, x, S.Reals) == Interval(0, oo)
assert dumeq(solveset(exp(x) - 1, x), imageset(Lambda(n, 2*I*pi*n), S.Integers))
assert dumeq(solveset(Eq(exp(x), 1), x), imageset(Lambda(n, 2*I*pi*n),
S.Integers))
# issue 13825
assert solveset(x**2 + f(0) + 1, x) == {-sqrt(-f(0) - 1), sqrt(-f(0) - 1)}
# issue 19977
assert solveset(atan(log(x)) > 0, x, domain=Interval.open(0, oo)) == Interval.open(1, oo)
def test__solveset_multi():
from sympy.solvers.solveset import _solveset_multi
from sympy import Reals
# Basic univariate case:
from sympy.abc import x
assert _solveset_multi([x**2-1], [x], [S.Reals]) == FiniteSet((1,), (-1,))
# Linear systems of two equations
from sympy.abc import x, y
assert _solveset_multi([x+y, x+1], [x, y], [Reals, Reals]) == FiniteSet((-1, 1))
assert _solveset_multi([x+y, x+1], [y, x], [Reals, Reals]) == FiniteSet((1, -1))
assert _solveset_multi([x+y, x-y-1], [x, y], [Reals, Reals]) == FiniteSet((S(1)/2, -S(1)/2))
assert _solveset_multi([x-1, y-2], [x, y], [Reals, Reals]) == FiniteSet((1, 2))
# assert dumeq(_solveset_multi([x+y], [x, y], [Reals, Reals]), ImageSet(Lambda(x, (x, -x)), Reals))
assert dumeq(_solveset_multi([x+y], [x, y], [Reals, Reals]), Union(
ImageSet(Lambda(((x,),), (x, -x)), ProductSet(Reals)),
ImageSet(Lambda(((y,),), (-y, y)), ProductSet(Reals))))
assert _solveset_multi([x+y, x+y+1], [x, y], [Reals, Reals]) == S.EmptySet
assert _solveset_multi([x+y, x-y, x-1], [x, y], [Reals, Reals]) == S.EmptySet
assert _solveset_multi([x+y, x-y, x-1], [y, x], [Reals, Reals]) == S.EmptySet
# Systems of three equations:
from sympy.abc import x, y, z
assert _solveset_multi([x+y+z-1, x+y-z-2, x-y-z-3], [x, y, z], [Reals,
Reals, Reals]) == FiniteSet((2, -S.Half, -S.Half))
# Nonlinear systems:
from sympy.abc import r, theta, z, x, y
assert _solveset_multi([x**2+y**2-2, x+y], [x, y], [Reals, Reals]) == FiniteSet((-1, 1), (1, -1))
assert _solveset_multi([x**2-1, y], [x, y], [Reals, Reals]) == FiniteSet((1, 0), (-1, 0))
#assert _solveset_multi([x**2-y**2], [x, y], [Reals, Reals]) == Union(
# ImageSet(Lambda(x, (x, -x)), Reals), ImageSet(Lambda(x, (x, x)), Reals))
assert dumeq(_solveset_multi([x**2-y**2], [x, y], [Reals, Reals]), Union(
ImageSet(Lambda(((x,),), (x, -Abs(x))), ProductSet(Reals)),
ImageSet(Lambda(((x,),), (x, Abs(x))), ProductSet(Reals)),
ImageSet(Lambda(((y,),), (-Abs(y), y)), ProductSet(Reals)),
ImageSet(Lambda(((y,),), (Abs(y), y)), ProductSet(Reals))))
assert _solveset_multi([r*cos(theta)-1, r*sin(theta)], [theta, r],
[Interval(0, pi), Interval(-1, 1)]) == FiniteSet((0, 1), (pi, -1))
assert _solveset_multi([r*cos(theta)-1, r*sin(theta)], [r, theta],
[Interval(0, 1), Interval(0, pi)]) == FiniteSet((1, 0))
#assert _solveset_multi([r*cos(theta)-r, r*sin(theta)], [r, theta],
# [Interval(0, 1), Interval(0, pi)]) == ?
assert dumeq(_solveset_multi([r*cos(theta)-r, r*sin(theta)], [r, theta],
[Interval(0, 1), Interval(0, pi)]), Union(
ImageSet(Lambda(((r,),), (r, 0)), ImageSet(Lambda(r, (r,)), Interval(0, 1))),
ImageSet(Lambda(((theta,),), (0, theta)), ImageSet(Lambda(theta, (theta,)), Interval(0, pi)))))
def test_conditionset():
assert solveset(Eq(sin(x)**2 + cos(x)**2, 1), x, domain=S.Reals
) is S.Reals
assert solveset(Eq(x**2 + x*sin(x), 1), x, domain=S.Reals
).dummy_eq(ConditionSet(x, Eq(x**2 + x*sin(x) - 1, 0), S.Reals))
assert dumeq(solveset(Eq(-I*(exp(I*x) - exp(-I*x))/2, 1), x
), imageset(Lambda(n, 2*n*pi + pi/2), S.Integers))
assert solveset(x + sin(x) > 1, x, domain=S.Reals
).dummy_eq(ConditionSet(x, x + sin(x) > 1, S.Reals))
assert solveset(Eq(sin(Abs(x)), x), x, domain=S.Reals
).dummy_eq(ConditionSet(x, Eq(-x + sin(Abs(x)), 0), S.Reals))
assert solveset(y**x-z, x, S.Reals
).dummy_eq(FiniteSet(log(z)/log(y)))
@XFAIL
def test_conditionset_equality():
''' Checking equality of different representations of ConditionSet'''
assert solveset(Eq(tan(x), y), x) == ConditionSet(x, Eq(tan(x), y), S.Complexes)
def test_solveset_domain():
assert solveset(x**2 - x - 6, x, Interval(0, oo)) == FiniteSet(3)
assert solveset(x**2 - 1, x, Interval(0, oo)) == FiniteSet(1)
assert solveset(x**4 - 16, x, Interval(0, 10)) == FiniteSet(2)
def test_improve_coverage():
from sympy.solvers.solveset import _has_rational_power
solution = solveset(exp(x) + sin(x), x, S.Reals)
unsolved_object = ConditionSet(x, Eq(exp(x) + sin(x), 0), S.Reals)
assert solution.dummy_eq(unsolved_object)
assert _has_rational_power(sin(x)*exp(x) + 1, x) == (False, S.One)
assert _has_rational_power((sin(x)**2)*(exp(x) + 1)**3, x) == (False, S.One)
def test_issue_9522():
expr1 = Eq(1/(x**2 - 4) + x, 1/(x**2 - 4) + 2)
expr2 = Eq(1/x + x, 1/x)
assert solveset(expr1, x, S.Reals) == EmptySet()
assert solveset(expr2, x, S.Reals) == EmptySet()
def test_solvify():
assert solvify(x**2 + 10, x, S.Reals) == []
assert solvify(x**3 + 1, x, S.Complexes) == [-1, S.Half - sqrt(3)*I/2,
S.Half + sqrt(3)*I/2]
assert solvify(log(x), x, S.Reals) == [1]
assert solvify(cos(x), x, S.Reals) == [pi/2, pi*Rational(3, 2)]
assert solvify(sin(x) + 1, x, S.Reals) == [pi*Rational(3, 2)]
raises(NotImplementedError, lambda: solvify(sin(exp(x)), x, S.Complexes))
def test_abs_invert_solvify():
assert solvify(sin(Abs(x)), x, S.Reals) is None
def test_linear_eq_to_matrix():
eqns1 = [2*x + y - 2*z - 3, x - y - z, x + y + 3*z - 12]
eqns2 = [Eq(3*x + 2*y - z, 1), Eq(2*x - 2*y + 4*z, -2), -2*x + y - 2*z]
A, B = linear_eq_to_matrix(eqns1, x, y, z)
assert A == Matrix([[2, 1, -2], [1, -1, -1], [1, 1, 3]])
assert B == Matrix([[3], [0], [12]])
A, B = linear_eq_to_matrix(eqns2, x, y, z)
assert A == Matrix([[3, 2, -1], [2, -2, 4], [-2, 1, -2]])
assert B == Matrix([[1], [-2], [0]])
# Pure symbolic coefficients
eqns3 = [a*b*x + b*y + c*z - d, e*x + d*x + f*y + g*z - h, i*x + j*y + k*z - l]
A, B = linear_eq_to_matrix(eqns3, x, y, z)
assert A == Matrix([[a*b, b, c], [d + e, f, g], [i, j, k]])
assert B == Matrix([[d], [h], [l]])
# raise ValueError if
# 1) no symbols are given
raises(ValueError, lambda: linear_eq_to_matrix(eqns3))
# 2) there are duplicates
raises(ValueError, lambda: linear_eq_to_matrix(eqns3, [x, x, y]))
# 3) there are non-symbols
raises(ValueError, lambda: linear_eq_to_matrix(eqns3, [x, 1/a, y]))
# 4) a nonlinear term is detected in the original expression
raises(NonlinearError, lambda: linear_eq_to_matrix(Eq(1/x + x, 1/x), [x]))
assert linear_eq_to_matrix(1, x) == (Matrix([[0]]), Matrix([[-1]]))
# issue 15195
assert linear_eq_to_matrix(x + y*(z*(3*x + 2) + 3), x) == (
Matrix([[3*y*z + 1]]), Matrix([[-y*(2*z + 3)]]))
assert linear_eq_to_matrix(Matrix(
[[a*x + b*y - 7], [5*x + 6*y - c]]), x, y) == (
Matrix([[a, b], [5, 6]]), Matrix([[7], [c]]))
# issue 15312
assert linear_eq_to_matrix(Eq(x + 2, 1), x) == (
Matrix([[1]]), Matrix([[-1]]))
def test_issue_16577():
assert linear_eq_to_matrix(Eq(a*(2*x + 3*y) + 4*y, 5), x, y) == (
Matrix([[2*a, 3*a + 4]]), Matrix([[5]]))
def test_linsolve():
x1, x2, x3, x4 = symbols('x1, x2, x3, x4')
# Test for different input forms
M = Matrix([[1, 2, 1, 1, 7], [1, 2, 2, -1, 12], [2, 4, 0, 6, 4]])
system1 = A, B = M[:, :-1], M[:, -1]
Eqns = [x1 + 2*x2 + x3 + x4 - 7, x1 + 2*x2 + 2*x3 - x4 - 12,
2*x1 + 4*x2 + 6*x4 - 4]
sol = FiniteSet((-2*x2 - 3*x4 + 2, x2, 2*x4 + 5, x4))
assert linsolve(Eqns, (x1, x2, x3, x4)) == sol
assert linsolve(Eqns, *(x1, x2, x3, x4)) == sol
assert linsolve(system1, (x1, x2, x3, x4)) == sol
assert linsolve(system1, *(x1, x2, x3, x4)) == sol
# issue 9667 - symbols can be Dummy symbols
x1, x2, x3, x4 = symbols('x:4', cls=Dummy)
assert linsolve(system1, x1, x2, x3, x4) == FiniteSet(
(-2*x2 - 3*x4 + 2, x2, 2*x4 + 5, x4))
# raise ValueError for garbage value
raises(ValueError, lambda: linsolve(Eqns))
raises(ValueError, lambda: linsolve(x1))
raises(ValueError, lambda: linsolve(x1, x2))
raises(ValueError, lambda: linsolve((A,), x1, x2))
raises(ValueError, lambda: linsolve(A, B, x1, x2))
#raise ValueError if equations are non-linear in given variables
raises(NonlinearError, lambda: linsolve([x + y - 1, x ** 2 + y - 3], [x, y]))
raises(NonlinearError, lambda: linsolve([cos(x) + y, x + y], [x, y]))
assert linsolve([x + z - 1, x ** 2 + y - 3], [z, y]) == {(-x + 1, -x**2 + 3)}
# Fully symbolic test
A = Matrix([[a, b], [c, d]])
B = Matrix([[e], [g]])
system2 = (A, B)
sol = FiniteSet(((-b*g + d*e)/(a*d - b*c), (a*g - c*e)/(a*d - b*c)))
assert linsolve(system2, [x, y]) == sol
# No solution
A = Matrix([[1, 2, 3], [2, 4, 6], [3, 6, 9]])
B = Matrix([0, 0, 1])
assert linsolve((A, B), (x, y, z)) == EmptySet()
# Issue #10056
A, B, J1, J2 = symbols('A B J1 J2')
Augmatrix = Matrix([
[2*I*J1, 2*I*J2, -2/J1],
[-2*I*J2, -2*I*J1, 2/J2],
[0, 2, 2*I/(J1*J2)],
[2, 0, 0],
])
assert linsolve(Augmatrix, A, B) == FiniteSet((0, I/(J1*J2)))
# Issue #10121 - Assignment of free variables
Augmatrix = Matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]])
assert linsolve(Augmatrix, a, b, c, d, e) == FiniteSet((a, 0, c, 0, e))
#raises(IndexError, lambda: linsolve(Augmatrix, a, b, c))
x0, x1, x2, _x0 = symbols('tau0 tau1 tau2 _tau0')
assert linsolve(Matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, _x0]])
) == FiniteSet((x0, 0, x1, _x0, x2))
x0, x1, x2, _x0 = symbols('tau00 tau01 tau02 tau0')
assert linsolve(Matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, _x0]])
) == FiniteSet((x0, 0, x1, _x0, x2))
x0, x1, x2, _x0 = symbols('tau00 tau01 tau02 tau1')
assert linsolve(Matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, _x0]])
) == FiniteSet((x0, 0, x1, _x0, x2))
# symbols can be given as generators
x0, x2, x4 = symbols('x0, x2, x4')
assert linsolve(Augmatrix, numbered_symbols('x')
) == FiniteSet((x0, 0, x2, 0, x4))
Augmatrix[-1, -1] = x0
# use Dummy to avoid clash; the names may clash but the symbols
# will not
Augmatrix[-1, -1] = symbols('_x0')
assert len(linsolve(
Augmatrix, numbered_symbols('x', cls=Dummy)).free_symbols) == 4
# Issue #12604
f = Function('f')
assert linsolve([f(x) - 5], f(x)) == FiniteSet((5,))
# Issue #14860
from sympy.physics.units import meter, newton, kilo
kN = kilo*newton
Eqns = [8*kN + x + y, 28*kN*meter + 3*x*meter]
assert linsolve(Eqns, x, y) == {
(kilo*newton*Rational(-28, 3), kN*Rational(4, 3))}
# linsolve fully expands expressions, so removable singularities
# and other nonlinearity does not raise an error
assert linsolve([Eq(x, x + y)], [x, y]) == {(x, 0)}
assert linsolve([Eq(1/x, 1/x + y)], [x, y]) == {(x, 0)}
assert linsolve([Eq(y/x, y/x + y)], [x, y]) == {(x, 0)}
assert linsolve([Eq(x*(x + 1), x**2 + y)], [x, y]) == {(y, y)}
def test_linsolve_large_sparse():
#
# This is mainly a performance test
#
def _mk_eqs_sol(n):
xs = symbols('x:{}'.format(n))
ys = symbols('y:{}'.format(n))
syms = xs + ys
eqs = []
sol = (-S.Half,) * n + (S.Half,) * n
for xi, yi in zip(xs, ys):
eqs.extend([xi + yi, xi - yi + 1])
return eqs, syms, FiniteSet(sol)
n = 500
eqs, syms, sol = _mk_eqs_sol(n)
assert linsolve(eqs, syms) == sol
def test_linsolve_immutable():
A = ImmutableDenseMatrix([[1, 1, 2], [0, 1, 2], [0, 0, 1]])
B = ImmutableDenseMatrix([2, 1, -1])
assert linsolve([A, B], (x, y, z)) == FiniteSet((1, 3, -1))
A = ImmutableDenseMatrix([[1, 1, 7], [1, -1, 3]])
assert linsolve(A) == FiniteSet((5, 2))
def test_solve_decomposition():
n = Dummy('n')
f1 = exp(3*x) - 6*exp(2*x) + 11*exp(x) - 6
f2 = sin(x)**2 - 2*sin(x) + 1
f3 = sin(x)**2 - sin(x)
f4 = sin(x + 1)
f5 = exp(x + 2) - 1
f6 = 1/log(x)
f7 = 1/x
s1 = ImageSet(Lambda(n, 2*n*pi), S.Integers)
s2 = ImageSet(Lambda(n, 2*n*pi + pi), S.Integers)
s3 = ImageSet(Lambda(n, 2*n*pi + pi/2), S.Integers)
s4 = ImageSet(Lambda(n, 2*n*pi - 1), S.Integers)
s5 = ImageSet(Lambda(n, 2*n*pi - 1 + pi), S.Integers)
assert solve_decomposition(f1, x, S.Reals) == FiniteSet(0, log(2), log(3))
assert dumeq(solve_decomposition(f2, x, S.Reals), s3)
assert dumeq(solve_decomposition(f3, x, S.Reals), Union(s1, s2, s3))
assert dumeq(solve_decomposition(f4, x, S.Reals), Union(s4, s5))
assert solve_decomposition(f5, x, S.Reals) == FiniteSet(-2)
assert solve_decomposition(f6, x, S.Reals) == S.EmptySet
assert solve_decomposition(f7, x, S.Reals) == S.EmptySet
assert solve_decomposition(x, x, Interval(1, 2)) == S.EmptySet
# nonlinsolve testcases
def test_nonlinsolve_basic():
assert nonlinsolve([],[]) == S.EmptySet
assert nonlinsolve([],[x, y]) == S.EmptySet
system = [x, y - x - 5]
assert nonlinsolve([x],[x, y]) == FiniteSet((0, y))
assert nonlinsolve(system, [y]) == FiniteSet((x + 5,))
soln = (ImageSet(Lambda(n, 2*n*pi + pi/2), S.Integers),)
assert dumeq(nonlinsolve([sin(x) - 1], [x]), FiniteSet(tuple(soln)))
assert nonlinsolve([x**2 - 1], [x]) == FiniteSet((-1,), (1,))
soln = FiniteSet((y, y))
assert nonlinsolve([x - y, 0], x, y) == soln
assert nonlinsolve([0, x - y], x, y) == soln
assert nonlinsolve([x - y, x - y], x, y) == soln
assert nonlinsolve([x, 0], x, y) == FiniteSet((0, y))
f = Function('f')
assert nonlinsolve([f(x), 0], f(x), y) == FiniteSet((0, y))
assert nonlinsolve([f(x), 0], f(x), f(y)) == FiniteSet((0, f(y)))
A = Indexed('A', x)
assert nonlinsolve([A, 0], A, y) == FiniteSet((0, y))
assert nonlinsolve([x**2 -1], [sin(x)]) == FiniteSet((S.EmptySet,))
assert nonlinsolve([x**2 -1], sin(x)) == FiniteSet((S.EmptySet,))
assert nonlinsolve([x**2 -1], 1) == FiniteSet((x**2,))
assert nonlinsolve([x**2 -1], x + y) == FiniteSet((S.EmptySet,))
def test_nonlinsolve_abs():
soln = FiniteSet((x, Abs(x)))
assert nonlinsolve([Abs(x) - y], x, y) == soln
def test_raise_exception_nonlinsolve():
raises(IndexError, lambda: nonlinsolve([x**2 -1], []))
raises(ValueError, lambda: nonlinsolve([x**2 -1]))
raises(NotImplementedError, lambda: nonlinsolve([(x+y)**2 - 9, x**2 - y**2 - 0.75], (x, y)))
def test_trig_system():
# TODO: add more simple testcases when solveset returns
# simplified soln for Trig eq
assert nonlinsolve([sin(x) - 1, cos(x) -1 ], x) == S.EmptySet
soln1 = (ImageSet(Lambda(n, 2*n*pi + pi/2), S.Integers),)
soln = FiniteSet(soln1)
assert dumeq(nonlinsolve([sin(x) - 1, cos(x)], x), soln)
@XFAIL
def test_trig_system_fail():
# fails because solveset trig solver is not much smart.
sys = [x + y - pi/2, sin(x) + sin(y) - 1]
# solveset returns conditionset for sin(x) + sin(y) - 1
soln_1 = (ImageSet(Lambda(n, n*pi + pi/2), S.Integers),
ImageSet(Lambda(n, n*pi)), S.Integers)
soln_1 = FiniteSet(soln_1)
soln_2 = (ImageSet(Lambda(n, n*pi), S.Integers),
ImageSet(Lambda(n, n*pi+ pi/2), S.Integers))
soln_2 = FiniteSet(soln_2)
soln = soln_1 + soln_2
assert dumeq(nonlinsolve(sys, [x, y]), soln)
# Add more cases from here
# http://www.vitutor.com/geometry/trigonometry/equations_systems.html#uno
sys = [sin(x) + sin(y) - (sqrt(3)+1)/2, sin(x) - sin(y) - (sqrt(3) - 1)/2]
soln_x = Union(ImageSet(Lambda(n, 2*n*pi + pi/3), S.Integers),
ImageSet(Lambda(n, 2*n*pi + pi*Rational(2, 3)), S.Integers))
soln_y = Union(ImageSet(Lambda(n, 2*n*pi + pi/6), S.Integers),
ImageSet(Lambda(n, 2*n*pi + pi*Rational(5, 6)), S.Integers))
assert dumeq(nonlinsolve(sys, [x, y]), FiniteSet((soln_x, soln_y)))
def test_nonlinsolve_positive_dimensional():
x, y, z, a, b, c, d = symbols('x, y, z, a, b, c, d', extended_real=True)
assert nonlinsolve([x*y, x*y - x], [x, y]) == FiniteSet((0, y))
system = [a**2 + a*c, a - b]
assert nonlinsolve(system, [a, b]) == FiniteSet((0, 0), (-c, -c))
# here (a= 0, b = 0) is independent soln so both is printed.
# if symbols = [a, b, c] then only {a : -c ,b : -c}
eq1 = a + b + c + d
eq2 = a*b + b*c + c*d + d*a
eq3 = a*b*c + b*c*d + c*d*a + d*a*b
eq4 = a*b*c*d - 1
system = [eq1, eq2, eq3, eq4]
sol1 = (-1/d, -d, 1/d, FiniteSet(d) - FiniteSet(0))
sol2 = (1/d, -d, -1/d, FiniteSet(d) - FiniteSet(0))
soln = FiniteSet(sol1, sol2)
assert nonlinsolve(system, [a, b, c, d]) == soln
def test_nonlinsolve_polysys():
x, y, z = symbols('x, y, z', real=True)
assert nonlinsolve([x**2 + y - 2, x**2 + y], [x, y]) == S.EmptySet
s = (-y + 2, y)
assert nonlinsolve([(x + y)**2 - 4, x + y - 2], [x, y]) == FiniteSet(s)
system = [x**2 - y**2]
soln_real = FiniteSet((-y, y), (y, y))
soln_complex = FiniteSet((-Abs(y), y), (Abs(y), y))
soln =soln_real + soln_complex
assert nonlinsolve(system, [x, y]) == soln
system = [x**2 - y**2]
soln_real= FiniteSet((y, -y), (y, y))
soln_complex = FiniteSet((y, -Abs(y)), (y, Abs(y)))
soln = soln_real + soln_complex
assert nonlinsolve(system, [y, x]) == soln
system = [x**2 + y - 3, x - y - 4]
assert nonlinsolve(system, (x, y)) != nonlinsolve(system, (y, x))
def test_nonlinsolve_using_substitution():
x, y, z, n = symbols('x, y, z, n', real = True)
system = [(x + y)*n - y**2 + 2]
s_x = (n*y - y**2 + 2)/n
soln = (-s_x, y)
assert nonlinsolve(system, [x, y]) == FiniteSet(soln)
# def test_nonlinsolve_using_substitution1():
# n = Dummy('n')
# system = [z**2*x**2 - z**2*y**2/exp(x)]
# syms = [y, x, z]
# lam1 = Lambda(n, 2*LambertW(-y/2, n)
# soln1 = (ImageSet(lam1, S.Integers), y, z)
# lam2 = Lambda(n, 2*LambertW(y/2, n))
# soln2 = (ImageSet(lam2, S.Integers), y, z)
# assert dumeq(nonlinsolve(system,syms) , { (x, y, 0), soln1, soln2})
def test_nonlinsolve_complex():
n = Dummy('n')
assert dumeq(nonlinsolve([exp(x) - sin(y), 1/y - 3], [x, y]), {
(ImageSet(Lambda(n, 2*n*I*pi + log(sin(Rational(1, 3)))), S.Integers), Rational(1, 3))})
system = [exp(x) - sin(y), 1/exp(y) - 3]
assert dumeq(nonlinsolve(system, [x, y]), {
(ImageSet(Lambda(n, I*(2*n*pi + pi)
+ log(sin(log(3)))), S.Integers), -log(3)),
(ImageSet(Lambda(n, I*(2*n*pi + arg(sin(2*n*I*pi - log(3))))
+ log(Abs(sin(2*n*I*pi - log(3))))), S.Integers),
ImageSet(Lambda(n, 2*n*I*pi - log(3)), S.Integers))})
system = [exp(x) - sin(y), y**2 - 4]
assert dumeq(nonlinsolve(system, [x, y]), {
(ImageSet(Lambda(n, I*(2*n*pi + pi) + log(sin(2))), S.Integers), -2),
(ImageSet(Lambda(n, 2*n*I*pi + log(sin(2))), S.Integers), 2)})
@XFAIL
def test_solve_nonlinear_trans():
# After the transcendental equation solver these will work
x, y, z = symbols('x, y, z', real=True)
soln1 = FiniteSet((2*LambertW(y/2), y))
soln2 = FiniteSet((-x*sqrt(exp(x)), y), (x*sqrt(exp(x)), y))
soln3 = FiniteSet((x*exp(x/2), x))
soln4 = FiniteSet(2*LambertW(y/2), y)
assert nonlinsolve([x**2 - y**2/exp(x)], [x, y]) == soln1
assert nonlinsolve([x**2 - y**2/exp(x)], [y, x]) == soln2
assert nonlinsolve([x**2 - y**2/exp(x)], [y, x]) == soln3
assert nonlinsolve([x**2 - y**2/exp(x)], [x, y]) == soln4
def test_issue_5132_1():
system = [sqrt(x**2 + y**2) - sqrt(10), x + y - 4]
assert nonlinsolve(system, [x, y]) == FiniteSet((1, 3), (3, 1))
n = Dummy('n')
eqs = [exp(x)**2 - sin(y) + z**2, 1/exp(y) - 3]
s_real_y = -log(3)
s_real_z = sqrt(-exp(2*x) - sin(log(3)))
soln_real = FiniteSet((s_real_y, s_real_z), (s_real_y, -s_real_z))
lam = Lambda(n, 2*n*I*pi + -log(3))
s_complex_y = ImageSet(lam, S.Integers)
lam = Lambda(n, sqrt(-exp(2*x) + sin(2*n*I*pi + -log(3))))
s_complex_z_1 = ImageSet(lam, S.Integers)
lam = Lambda(n, -sqrt(-exp(2*x) + sin(2*n*I*pi + -log(3))))
s_complex_z_2 = ImageSet(lam, S.Integers)
soln_complex = FiniteSet(
(s_complex_y, s_complex_z_1),
(s_complex_y, s_complex_z_2)
)
soln = soln_real + soln_complex
assert dumeq(nonlinsolve(eqs, [y, z]), soln)
def test_issue_5132_2():
x, y = symbols('x, y', real=True)
eqs = [exp(x)**2 - sin(y) + z**2, 1/exp(y) - 3]
n = Dummy('n')
soln_real = (log(-z**2 + sin(y))/2, z)
lam = Lambda( n, I*(2*n*pi + arg(-z**2 + sin(y)))/2 + log(Abs(z**2 - sin(y)))/2)
img = ImageSet(lam, S.Integers)
# not sure about the complex soln. But it looks correct.
soln_complex = (img, z)
soln = FiniteSet(soln_real, soln_complex)
assert dumeq(nonlinsolve(eqs, [x, z]), soln)
system = [r - x**2 - y**2, tan(t) - y/x]
s_x = sqrt(r/(tan(t)**2 + 1))
s_y = sqrt(r/(tan(t)**2 + 1))*tan(t)
soln = FiniteSet((s_x, s_y), (-s_x, -s_y))
assert nonlinsolve(system, [x, y]) == soln
def test_issue_6752():
a,b,c,d = symbols('a, b, c, d', real=True)
assert nonlinsolve([a**2 + a, a - b], [a, b]) == {(-1, -1), (0, 0)}
@SKIP("slow")
def test_issue_5114_solveset():
# slow testcase
from sympy.abc import d, e, f, g, h, i, j, k, l, o, p, q, r
# there is no 'a' in the equation set but this is how the
# problem was originally posed
syms = [a, b, c, f, h, k, n]
eqs = [b + r/d - c/d,
c*(1/d + 1/e + 1/g) - f/g - r/d,
f*(1/g + 1/i + 1/j) - c/g - h/i,
h*(1/i + 1/l + 1/m) - f/i - k/m,
k*(1/m + 1/o + 1/p) - h/m - n/p,
n*(1/p + 1/q) - k/p]
assert len(nonlinsolve(eqs, syms)) == 1
@SKIP("Hangs")
def _test_issue_5335():
# Not able to check zero dimensional system.
# is_zero_dimensional Hangs
lam, a0, conc = symbols('lam a0 conc')
eqs = [lam + 2*y - a0*(1 - x/2)*x - 0.005*x/2*x,
a0*(1 - x/2)*x - 1*y - 0.743436700916726*y,
x + y - conc]
sym = [x, y, a0]
# there are 4 solutions but only two are valid
assert len(nonlinsolve(eqs, sym)) == 2
# float
eqs = [lam + 2*y - a0*(1 - x/2)*x - 0.005*x/2*x,
a0*(1 - x/2)*x - 1*y - 0.743436700916726*y,
x + y - conc]
sym = [x, y, a0]
assert len(nonlinsolve(eqs, sym)) == 2
def test_issue_2777():
# the equations represent two circles
x, y = symbols('x y', real=True)
e1, e2 = sqrt(x**2 + y**2) - 10, sqrt(y**2 + (-x + 10)**2) - 3
a, b = Rational(191, 20), 3*sqrt(391)/20
ans = {(a, -b), (a, b)}
assert nonlinsolve((e1, e2), (x, y)) == ans
assert nonlinsolve((e1, e2/(x - a)), (x, y)) == S.EmptySet
# make the 2nd circle's radius be -3
e2 += 6
assert nonlinsolve((e1, e2), (x, y)) == S.EmptySet
def test_issue_8828():
x1 = 0
y1 = -620
r1 = 920
x2 = 126
y2 = 276
x3 = 51
y3 = 205
r3 = 104
v = [x, y, z]
f1 = (x - x1)**2 + (y - y1)**2 - (r1 - z)**2
f2 = (x2 - x)**2 + (y2 - y)**2 - z**2
f3 = (x - x3)**2 + (y - y3)**2 - (r3 - z)**2
F = [f1, f2, f3]
g1 = sqrt((x - x1)**2 + (y - y1)**2) + z - r1
g2 = f2
g3 = sqrt((x - x3)**2 + (y - y3)**2) + z - r3
G = [g1, g2, g3]
# both soln same
A = nonlinsolve(F, v)
B = nonlinsolve(G, v)
assert A == B
def test_nonlinsolve_conditionset():
# when solveset failed to solve all the eq
# return conditionset
f = Function('f')
f1 = f(x) - pi/2
f2 = f(y) - pi*Rational(3, 2)
intermediate_system = Eq(2*f(x) - pi, 0) & Eq(2*f(y) - 3*pi, 0)
symbols = Tuple(x, y)
soln = ConditionSet(
symbols,
intermediate_system,
S.Complexes**2)
assert nonlinsolve([f1, f2], [x, y]) == soln
def test_substitution_basic():
assert substitution([], [x, y]) == S.EmptySet
assert substitution([], []) == S.EmptySet
system = [2*x**2 + 3*y**2 - 30, 3*x**2 - 2*y**2 - 19]
soln = FiniteSet((-3, -2), (-3, 2), (3, -2), (3, 2))
assert substitution(system, [x, y]) == soln
soln = FiniteSet((-1, 1))
assert substitution([x + y], [x], [{y: 1}], [y], set(), [x, y]) == soln
assert substitution(
[x + y], [x], [{y: 1}], [y],
{x + 1}, [y, x]) == S.EmptySet
def test_issue_5132_substitution():
x, y, z, r, t = symbols('x, y, z, r, t', real=True)
system = [r - x**2 - y**2, tan(t) - y/x]
s_x_1 = Complement(FiniteSet(-sqrt(r/(tan(t)**2 + 1))), FiniteSet(0))
s_x_2 = Complement(FiniteSet(sqrt(r/(tan(t)**2 + 1))), FiniteSet(0))
s_y = sqrt(r/(tan(t)**2 + 1))*tan(t)
soln = FiniteSet((s_x_2, s_y)) + FiniteSet((s_x_1, -s_y))
assert substitution(system, [x, y]) == soln
n = Dummy('n')
eqs = [exp(x)**2 - sin(y) + z**2, 1/exp(y) - 3]
s_real_y = -log(3)
s_real_z = sqrt(-exp(2*x) - sin(log(3)))
soln_real = FiniteSet((s_real_y, s_real_z), (s_real_y, -s_real_z))
lam = Lambda(n, 2*n*I*pi + -log(3))
s_complex_y = ImageSet(lam, S.Integers)
lam = Lambda(n, sqrt(-exp(2*x) + sin(2*n*I*pi + -log(3))))
s_complex_z_1 = ImageSet(lam, S.Integers)
lam = Lambda(n, -sqrt(-exp(2*x) + sin(2*n*I*pi + -log(3))))
s_complex_z_2 = ImageSet(lam, S.Integers)
soln_complex = FiniteSet(
(s_complex_y, s_complex_z_1),
(s_complex_y, s_complex_z_2))
soln = soln_real + soln_complex
assert dumeq(substitution(eqs, [y, z]), soln)
def test_raises_substitution():
raises(ValueError, lambda: substitution([x**2 -1], []))
raises(TypeError, lambda: substitution([x**2 -1]))
raises(ValueError, lambda: substitution([x**2 -1], [sin(x)]))
raises(TypeError, lambda: substitution([x**2 -1], x))
raises(TypeError, lambda: substitution([x**2 -1], 1))
# end of tests for nonlinsolve
def test_issue_9556():
b = Symbol('b', positive=True)
assert solveset(Abs(x) + 1, x, S.Reals) == EmptySet()
assert solveset(Abs(x) + b, x, S.Reals) == EmptySet()
assert solveset(Eq(b, -1), b, S.Reals) == EmptySet()
def test_issue_9611():
assert solveset(Eq(x - x + a, a), x, S.Reals) == S.Reals
assert solveset(Eq(y - y + a, a), y) == S.Complexes
def test_issue_9557():
assert solveset(x**2 + a, x, S.Reals) == Intersection(S.Reals,
FiniteSet(-sqrt(-a), sqrt(-a)))
def test_issue_9778():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert solveset(x**3 + 1, x, S.Reals) == FiniteSet(-1)
assert solveset(x**Rational(3, 5) + 1, x, S.Reals) == S.EmptySet
assert solveset(x**3 + y, x, S.Reals) == \
FiniteSet(-Abs(y)**Rational(1, 3)*sign(y))
def test_issue_10214():
assert solveset(x**Rational(3, 2) + 4, x, S.Reals) == S.EmptySet
assert solveset(x**(Rational(-3, 2)) + 4, x, S.Reals) == S.EmptySet
ans = FiniteSet(-2**Rational(2, 3))
assert solveset(x**(S(3)) + 4, x, S.Reals) == ans
assert (x**(S(3)) + 4).subs(x,list(ans)[0]) == 0 # substituting ans and verifying the result.
assert (x**(S(3)) + 4).subs(x,-(-2)**Rational(2, 3)) == 0
def test_issue_9849():
assert solveset(Abs(sin(x)) + 1, x, S.Reals) == S.EmptySet
def test_issue_9953():
assert linsolve([ ], x) == S.EmptySet
def test_issue_9913():
assert solveset(2*x + 1/(x - 10)**2, x, S.Reals) == \
FiniteSet(-(3*sqrt(24081)/4 + Rational(4027, 4))**Rational(1, 3)/3 - 100/
(3*(3*sqrt(24081)/4 + Rational(4027, 4))**Rational(1, 3)) + Rational(20, 3))
def test_issue_10397():
assert solveset(sqrt(x), x, S.Complexes) == FiniteSet(0)
def test_issue_14987():
raises(ValueError, lambda: linear_eq_to_matrix(
[x**2], x))
raises(ValueError, lambda: linear_eq_to_matrix(
[x*(-3/x + 1) + 2*y - a], [x, y]))
raises(ValueError, lambda: linear_eq_to_matrix(
[(x**2 - 3*x)/(x - 3) - 3], x))
raises(ValueError, lambda: linear_eq_to_matrix(
[(x + 1)**3 - x**3 - 3*x**2 + 7], x))
raises(ValueError, lambda: linear_eq_to_matrix(
[x*(1/x + 1) + y], [x, y]))
raises(ValueError, lambda: linear_eq_to_matrix(
[(x + 1)*y], [x, y]))
raises(ValueError, lambda: linear_eq_to_matrix(
[Eq(1/x, 1/x + y)], [x, y]))
raises(ValueError, lambda: linear_eq_to_matrix(
[Eq(y/x, y/x + y)], [x, y]))
raises(ValueError, lambda: linear_eq_to_matrix(
[Eq(x*(x + 1), x**2 + y)], [x, y]))
def test_simplification():
eq = x + (a - b)/(-2*a + 2*b)
assert solveset(eq, x) == FiniteSet(S.Half)
assert solveset(eq, x, S.Reals) == Intersection({-((a - b)/(-2*a + 2*b))}, S.Reals)
# So that ap - bn is not zero:
ap = Symbol('ap', positive=True)
bn = Symbol('bn', negative=True)
eq = x + (ap - bn)/(-2*ap + 2*bn)
assert solveset(eq, x) == FiniteSet(S.Half)
assert solveset(eq, x, S.Reals) == FiniteSet(S.Half)
def test_issue_10555():
f = Function('f')
g = Function('g')
assert solveset(f(x) - pi/2, x, S.Reals).dummy_eq(
ConditionSet(x, Eq(f(x) - pi/2, 0), S.Reals))
assert solveset(f(g(x)) - pi/2, g(x), S.Reals).dummy_eq(
ConditionSet(g(x), Eq(f(g(x)) - pi/2, 0), S.Reals))
def test_issue_8715():
eq = x + 1/x > -2 + 1/x
assert solveset(eq, x, S.Reals) == \
(Interval.open(-2, oo) - FiniteSet(0))
assert solveset(eq.subs(x,log(x)), x, S.Reals) == \
Interval.open(exp(-2), oo) - FiniteSet(1)
def test_issue_11174():
eq = z**2 + exp(2*x) - sin(y)
soln = Intersection(S.Reals, FiniteSet(log(-z**2 + sin(y))/2))
assert solveset(eq, x, S.Reals) == soln
eq = sqrt(r)*Abs(tan(t))/sqrt(tan(t)**2 + 1) + x*tan(t)
s = -sqrt(r)*Abs(tan(t))/(sqrt(tan(t)**2 + 1)*tan(t))
soln = Intersection(S.Reals, FiniteSet(s))
assert solveset(eq, x, S.Reals) == soln
def test_issue_11534():
# eq and eq2 should give the same solution as a Complement
x = Symbol('x', real=True)
y = Symbol('y', real=True)
eq = -y + x/sqrt(-x**2 + 1)
eq2 = -y**2 + x**2/(-x**2 + 1)
soln = Complement(FiniteSet(-y/sqrt(y**2 + 1), y/sqrt(y**2 + 1)), FiniteSet(-1, 1))
assert solveset(eq, x, S.Reals) == soln
assert solveset(eq2, x, S.Reals) == soln
def test_issue_10477():
assert solveset((x**2 + 4*x - 3)/x < 2, x, S.Reals) == \
Union(Interval.open(-oo, -3), Interval.open(0, 1))
def test_issue_10671():
assert solveset(sin(y), y, Interval(0, pi)) == FiniteSet(0, pi)
i = Interval(1, 10)
assert solveset((1/x).diff(x) < 0, x, i) == i
def test_issue_11064():
eq = x + sqrt(x**2 - 5)
assert solveset(eq > 0, x, S.Reals) == \
Interval(sqrt(5), oo)
assert solveset(eq < 0, x, S.Reals) == \
Interval(-oo, -sqrt(5))
assert solveset(eq > sqrt(5), x, S.Reals) == \
Interval.Lopen(sqrt(5), oo)
def test_issue_12478():
eq = sqrt(x - 2) + 2
soln = solveset_real(eq, x)
assert soln is S.EmptySet
assert solveset(eq < 0, x, S.Reals) is S.EmptySet
assert solveset(eq > 0, x, S.Reals) == Interval(2, oo)
def test_issue_12429():
eq = solveset(log(x)/x <= 0, x, S.Reals)
sol = Interval.Lopen(0, 1)
assert eq == sol
def test_solveset_arg():
assert solveset(arg(x), x, S.Reals) == Interval.open(0, oo)
assert solveset(arg(4*x -3), x) == Interval.open(Rational(3, 4), oo)
def test__is_finite_with_finite_vars():
f = _is_finite_with_finite_vars
# issue 12482
assert all(f(1/x) is None for x in (
Dummy(), Dummy(real=True), Dummy(complex=True)))
assert f(1/Dummy(real=False)) is True # b/c it's finite but not 0
def test_issue_13550():
assert solveset(x**2 - 2*x - 15, symbol = x, domain = Interval(-oo, 0)) == FiniteSet(-3)
def test_issue_13849():
assert nonlinsolve((t*(sqrt(5) + sqrt(2)) - sqrt(2), t), t) == EmptySet()
def test_issue_14223():
assert solveset((Abs(x + Min(x, 2)) - 2).rewrite(Piecewise), x,
S.Reals) == FiniteSet(-1, 1)
assert solveset((Abs(x + Min(x, 2)) - 2).rewrite(Piecewise), x,
Interval(0, 2)) == FiniteSet(1)
def test_issue_10158():
dom = S.Reals
assert solveset(x*Max(x, 15) - 10, x, dom) == FiniteSet(Rational(2, 3))
assert solveset(x*Min(x, 15) - 10, x, dom) == FiniteSet(-sqrt(10), sqrt(10))
assert solveset(Max(Abs(x - 3) - 1, x + 2) - 3, x, dom) == FiniteSet(-1, 1)
assert solveset(Abs(x - 1) - Abs(y), x, dom) == FiniteSet(-Abs(y) + 1, Abs(y) + 1)
assert solveset(Abs(x + 4*Abs(x + 1)), x, dom) == FiniteSet(Rational(-4, 3), Rational(-4, 5))
assert solveset(2*Abs(x + Abs(x + Max(3, x))) - 2, x, S.Reals) == FiniteSet(-1, -2)
dom = S.Complexes
raises(ValueError, lambda: solveset(x*Max(x, 15) - 10, x, dom))
raises(ValueError, lambda: solveset(x*Min(x, 15) - 10, x, dom))
raises(ValueError, lambda: solveset(Max(Abs(x - 3) - 1, x + 2) - 3, x, dom))
raises(ValueError, lambda: solveset(Abs(x - 1) - Abs(y), x, dom))
raises(ValueError, lambda: solveset(Abs(x + 4*Abs(x + 1)), x, dom))
def test_issue_14300():
f = 1 - exp(-18000000*x) - y
a1 = FiniteSet(-log(-y + 1)/18000000)
assert solveset(f, x, S.Reals) == \
Intersection(S.Reals, a1)
assert dumeq(solveset(f, x),
ImageSet(Lambda(n, -I*(2*n*pi + arg(-y + 1))/18000000 -
log(Abs(y - 1))/18000000), S.Integers))
def test_issue_14454():
number = CRootOf(x**4 + x - 1, 2)
raises(ValueError, lambda: invert_real(number, 0, x, S.Reals))
assert invert_real(x**2, number, x, S.Reals) # no error
def test_issue_17882():
assert solveset(-8*x**2/(9*(x**2 - 1)**(S(4)/3)) + 4/(3*(x**2 - 1)**(S(1)/3)), x, S.Complexes) == \
FiniteSet(sqrt(3), -sqrt(3))
def test_term_factors():
assert list(_term_factors(3**x - 2)) == [-2, 3**x]
expr = 4**(x + 1) + 4**(x + 2) + 4**(x - 1) - 3**(x + 2) - 3**(x + 3)
assert set(_term_factors(expr)) == {
3**(x + 2), 4**(x + 2), 3**(x + 3), 4**(x - 1), -1, 4**(x + 1)}
#################### tests for transolve and its helpers ###############
def test_transolve():
assert _transolve(3**x, x, S.Reals) == S.EmptySet
assert _transolve(3**x - 9**(x + 5), x, S.Reals) == FiniteSet(-10)
# exponential tests
def test_exponential_real():
from sympy.abc import x, y, z
e1 = 3**(2*x) - 2**(x + 3)
e2 = 4**(5 - 9*x) - 8**(2 - x)
e3 = 2**x + 4**x
e4 = exp(log(5)*x) - 2**x
e5 = exp(x/y)*exp(-z/y) - 2
e6 = 5**(x/2) - 2**(x/3)
e7 = 4**(x + 1) + 4**(x + 2) + 4**(x - 1) - 3**(x + 2) - 3**(x + 3)
e8 = -9*exp(-2*x + 5) + 4*exp(3*x + 1)
e9 = 2**x + 4**x + 8**x - 84
assert solveset(e1, x, S.Reals) == FiniteSet(
-3*log(2)/(-2*log(3) + log(2)))
assert solveset(e2, x, S.Reals) == FiniteSet(Rational(4, 15))
assert solveset(e3, x, S.Reals) == S.EmptySet
assert solveset(e4, x, S.Reals) == FiniteSet(0)
assert solveset(e5, x, S.Reals) == Intersection(
S.Reals, FiniteSet(y*log(2*exp(z/y))))
assert solveset(e6, x, S.Reals) == FiniteSet(0)
assert solveset(e7, x, S.Reals) == FiniteSet(2)
assert solveset(e8, x, S.Reals) == FiniteSet(-2*log(2)/5 + 2*log(3)/5 + Rational(4, 5))
assert solveset(e9, x, S.Reals) == FiniteSet(2)
assert solveset_real(-9*exp(-2*x + 5) + 2**(x + 1), x) == FiniteSet(
-((-5 - 2*log(3) + log(2))/(log(2) + 2)))
assert solveset_real(4**(x/2) - 2**(x/3), x) == FiniteSet(0)
b = sqrt(6)*sqrt(log(2))/sqrt(log(5))
assert solveset_real(5**(x/2) - 2**(3/x), x) == FiniteSet(-b, b)
# coverage test
C1, C2 = symbols('C1 C2')
f = Function('f')
assert solveset_real(C1 + C2/x**2 - exp(-f(x)), f(x)) == Intersection(
S.Reals, FiniteSet(-log(C1 + C2/x**2)))
y = symbols('y', positive=True)
assert solveset_real(x**2 - y**2/exp(x), y) == Intersection(
S.Reals, FiniteSet(-sqrt(x**2*exp(x)), sqrt(x**2*exp(x))))
p = Symbol('p', positive=True)
assert solveset_real((1/p + 1)**(p + 1), p) == EmptySet()
@XFAIL
def test_exponential_complex():
from sympy.abc import x
from sympy import Dummy
n = Dummy('n')
assert dumeq(solveset_complex(2**x + 4**x, x),imageset(
Lambda(n, I*(2*n*pi + pi)/log(2)), S.Integers))
assert solveset_complex(x**z*y**z - 2, z) == FiniteSet(
log(2)/(log(x) + log(y)))
assert dumeq(solveset_complex(4**(x/2) - 2**(x/3), x), imageset(
Lambda(n, 3*n*I*pi/log(2)), S.Integers))
assert dumeq(solveset(2**x + 32, x), imageset(
Lambda(n, (I*(2*n*pi + pi) + 5*log(2))/log(2)), S.Integers))
eq = (2**exp(y**2/x) + 2)/(x**2 + 15)
a = sqrt(x)*sqrt(-log(log(2)) + log(log(2) + 2*n*I*pi))
assert solveset_complex(eq, y) == FiniteSet(-a, a)
union1 = imageset(Lambda(n, I*(2*n*pi - pi*Rational(2, 3))/log(2)), S.Integers)
union2 = imageset(Lambda(n, I*(2*n*pi + pi*Rational(2, 3))/log(2)), S.Integers)
assert dumeq(solveset(2**x + 4**x + 8**x, x), Union(union1, union2))
eq = 4**(x + 1) + 4**(x + 2) + 4**(x - 1) - 3**(x + 2) - 3**(x + 3)
res = solveset(eq, x)
num = 2*n*I*pi - 4*log(2) + 2*log(3)
den = -2*log(2) + log(3)
ans = imageset(Lambda(n, num/den), S.Integers)
assert dumeq(res, ans)
def test_expo_conditionset():
f1 = (exp(x) + 1)**x - 2
f2 = (x + 2)**y*x - 3
f3 = 2**x - exp(x) - 3
f4 = log(x) - exp(x)
f5 = 2**x + 3**x - 5**x
assert solveset(f1, x, S.Reals).dummy_eq(ConditionSet(
x,Eq(x*log(exp(x) + 1) - log(2), 0), S.Reals))
assert solveset(f2, x, S.Reals).dummy_eq(ConditionSet(
x, Eq(x*(x + 2)**y - 3, 0), S.Reals))
assert solveset(f3, x, S.Reals).dummy_eq(ConditionSet(
x, Eq(2**x - exp(x) - 3, 0), S.Reals))
assert solveset(f4, x, S.Reals).dummy_eq(ConditionSet(
x, Eq(-exp(x) + log(x), 0), S.Reals))
assert solveset(f5, x, S.Reals).dummy_eq(ConditionSet(
x, Eq(2**x + 3**x - 5**x, 0), S.Reals))
def test_exponential_symbols():
x, y, z = symbols('x y z', positive=True)
assert solveset(z**x - y, x, S.Reals) == Intersection(
S.Reals, FiniteSet(log(y)/log(z)))
f1 = 2*x**w - 4*y**w
f2 = (x/y)**w - 2
sol1 = Intersection({log(2)/(log(x) - log(y))}, S.Reals)
sol2 = Intersection({log(2)/log(x/y)}, S.Reals)
assert solveset(f1, w, S.Reals) == sol1, solveset(f1, w, S.Reals)
assert solveset(f2, w, S.Reals) == sol2, solveset(f2, w, S.Reals)
assert solveset(x**y - 1, y, S.Reals) == FiniteSet(0)
assert solveset(exp(x/y)*exp(-z/y) - 2, y, S.Reals) == FiniteSet(
(x - z)/log(2)) - FiniteSet(0)
assert solveset(a**x - b**x, x).dummy_eq(ConditionSet(
w, Ne(a, 0) & Ne(b, 0), FiniteSet(0)))
assert solveset(x**x, x, Interval.Lopen(0,oo)) == EmptySet()
def test_ignore_assumptions():
# make sure assumptions are ignored
xpos = symbols('x', positive=True)
x = symbols('x')
assert solveset_complex(xpos**2 - 4, xpos
) == solveset_complex(x**2 - 4, x)
@XFAIL
def test_issue_10864():
assert solveset(x**(y*z) - x, x, S.Reals) == FiniteSet(1)
def test_solve_only_exp_2():
assert solveset_real(sqrt(exp(x)) + sqrt(exp(-x)) - 4, x) == \
FiniteSet(log(7 - 4*sqrt(3)), log(4*sqrt(3) + 7))
def test_is_exponential():
assert _is_exponential(y, x) is False
assert _is_exponential(3**x - 2, x) is True
assert _is_exponential(5**x - 7**(2 - x), x) is True
assert _is_exponential(sin(2**x) - 4*x, x) is False
assert _is_exponential(x**y - z, y) is True
assert _is_exponential(x**y - z, x) is False
assert _is_exponential(2**x + 4**x - 1, x) is True
assert _is_exponential(x**(y*z) - x, x) is False
assert _is_exponential(x**(2*x) - 3**x, x) is False
assert _is_exponential(x**y - y*z, y) is False
assert _is_exponential(x**y - x*z, y) is True
def test_solve_exponential():
assert _solve_exponential(3**(2*x) - 2**(x + 3), 0, x, S.Reals) == \
FiniteSet(-3*log(2)/(-2*log(3) + log(2)))
assert _solve_exponential(2**y + 4**y, 1, y, S.Reals) == \
FiniteSet(log(Rational(-1, 2) + sqrt(5)/2)/log(2))
assert _solve_exponential(2**y + 4**y, 0, y, S.Reals) == \
S.EmptySet
assert _solve_exponential(2**x + 3**x - 5**x, 0, x, S.Reals) == \
ConditionSet(x, Eq(2**x + 3**x - 5**x, 0), S.Reals)
# end of exponential tests
# logarithmic tests
def test_logarithmic():
assert solveset_real(log(x - 3) + log(x + 3), x) == FiniteSet(
-sqrt(10), sqrt(10))
assert solveset_real(log(x + 1) - log(2*x - 1), x) == FiniteSet(2)
assert solveset_real(log(x + 3) + log(1 + 3/x) - 3, x) == FiniteSet(
-3 + sqrt(-12 + exp(3))*exp(Rational(3, 2))/2 + exp(3)/2,
-sqrt(-12 + exp(3))*exp(Rational(3, 2))/2 - 3 + exp(3)/2)
eq = z - log(x) + log(y/(x*(-1 + y**2/x**2)))
assert solveset_real(eq, x) == \
Intersection(S.Reals, FiniteSet(-sqrt(y**2 - y*exp(z)),
sqrt(y**2 - y*exp(z)))) - \
Intersection(S.Reals, FiniteSet(-sqrt(y**2), sqrt(y**2)))
assert solveset_real(
log(3*x) - log(-x + 1) - log(4*x + 1), x) == FiniteSet(Rational(-1, 2), S.Half)
assert solveset(log(x**y) - y*log(x), x, S.Reals) == S.Reals
@XFAIL
def test_uselogcombine_2():
eq = log(exp(2*x) + 1) + log(-tanh(x) + 1) - log(2)
assert solveset_real(eq, x) == EmptySet()
eq = log(8*x) - log(sqrt(x) + 1) - 2
assert solveset_real(eq, x) == EmptySet()
def test_is_logarithmic():
assert _is_logarithmic(y, x) is False
assert _is_logarithmic(log(x), x) is True
assert _is_logarithmic(log(x) - 3, x) is True
assert _is_logarithmic(log(x)*log(y), x) is True
assert _is_logarithmic(log(x)**2, x) is False
assert _is_logarithmic(log(x - 3) + log(x + 3), x) is True
assert _is_logarithmic(log(x**y) - y*log(x), x) is True
assert _is_logarithmic(sin(log(x)), x) is False
assert _is_logarithmic(x + y, x) is False
assert _is_logarithmic(log(3*x) - log(1 - x) + 4, x) is True
assert _is_logarithmic(log(x) + log(y) + x, x) is False
assert _is_logarithmic(log(log(x - 3)) + log(x - 3), x) is True
assert _is_logarithmic(log(log(3) + x) + log(x), x) is True
assert _is_logarithmic(log(x)*(y + 3) + log(x), y) is False
def test_solve_logarithm():
y = Symbol('y')
assert _solve_logarithm(log(x**y) - y*log(x), 0, x, S.Reals) == S.Reals
y = Symbol('y', positive=True)
assert _solve_logarithm(log(x)*log(y), 0, x, S.Reals) == FiniteSet(1)
# end of logarithmic tests
# lambert tests
def test_solve_lambert():
a = Symbol('a', real=True)
assert solveset_real(3*log(x) - x*log(3), x) == FiniteSet(
-3*LambertW(-log(3)/3)/log(3),
-3*LambertW(-log(3)/3, -1)/log(3))
assert solveset_real(exp(x) - 10*x, x) == FiniteSet(-LambertW(Rational(-1,10)), -LambertW(Rational(-1, 10), -1))
assert solveset(exp(x) - 10*x, x) == FiniteSet(-LambertW(Rational(-1, 10)), -LambertW(Rational(-1, 10), -1))
assert solveset_real(exp(x) + x, x) == FiniteSet(-LambertW(1))
assert solveset(exp(x) + x, x) == FiniteSet(-LambertW(1))
assert solveset_real(x + 2**x, x) == FiniteSet(-LambertW(log(2))/log(2))
assert solveset(x + 2**x, x) == FiniteSet(-LambertW(log(2))/log(2))
assert solveset_real(3*x + log(4*x), x) == FiniteSet(LambertW(Rational(3, 4))/3)
assert solveset(3*x + log(4*x), x) == FiniteSet(LambertW(Rational(3, 4))/3)
assert solveset_real(x*exp(x) - 1, x) == FiniteSet(LambertW(1))
assert solveset(x*exp(x) - 1, x) == FiniteSet(LambertW(1))
assert solveset_real(x**2 - 2**x, x) == solveset_real(-x**2 + 2**x, x)
assert solveset_real(x**x - 2, x) == FiniteSet(exp(LambertW(log(2))))
assert solveset(x**x - 2, x) == FiniteSet(exp(LambertW(log(2)))) #--> takes solve n much time
assert solveset_real(x**3 - 3**x, x) == FiniteSet(
-3*LambertW(-log(3)/3)/log(3), -3*LambertW(-log(3)/3, -1)/log(3))
assert solveset_real(log(log(x - 3)) + log(x-3), x) == FiniteSet(exp(LambertW(1)) + 3)
assert solveset(log(log(x - 3)) + log(x-3), x) == FiniteSet(exp(LambertW(1)) + 3)
assert solveset_real(2*x + 5 + log(3*x - 2), x) == \
FiniteSet(Rational(2, 3) + LambertW(2*exp(-Rational(19, 3))/3)/2)
assert dumeq(solveset(2*x + 5 + log(3*x - 2), x) ,
Union(FiniteSet(LambertW(2*exp(Rational(-19,3))/3)/2 + Rational(2,3)),\
ImageSet(Lambda(n, LambertW(-exp(Rational(-19,3))/3 - sqrt(3)*I*exp(Rational(-19,3))/3, n)/2 + Rational(2,3)), S.Integers), \
ImageSet(Lambda(n, LambertW(-exp(Rational(-19,3))/3 + sqrt(3)*I*exp(Rational(-19,3))/3, n)/2 + Rational(2,3)), S.Integers)))
assert solveset_real(x*log(x) + 3*x + 1, x) == S.EmptySet
assert dumeq(solveset(x*log(x) + 3*x + 1, x),ImageSet(Lambda(n, exp(LambertW(-exp(3), n) - 3)), S.Integers))
eq = (x*exp(x) - 3).subs(x, x*exp(x))
assert solveset_real(eq, x) == FiniteSet(LambertW(3*exp(-LambertW(3))))
assert solveset_real(tanh(x + 3)*tanh(x - 3) - 1, x) == EmptySet()
assert solveset_real(3**cos(x) - cos(x)**3,x) == S.EmptySet
assert solveset_real(LambertW(2*x) - y, x) == Intersection(FiniteSet(y*exp(y)/2), S.Reals)
a = Symbol('a',positive=True)
assert solveset_real(x*exp(x)- a, x) == \
Intersection(FiniteSet(LambertW(a)), Interval(0, oo))
a = Symbol('a',positive=True,real=True)
assert solveset_real(x*exp(x)- a, x) == \
Intersection(FiniteSet(LambertW(a)), Interval(0, oo))
a = Symbol('a',positive=True,complex=True)
assert solveset_real(x*exp(x)- a, x) == Intersection(FiniteSet(LambertW(a)), Interval(0, oo))
a = Symbol('a',negative=True)
assert solveset_real(x*exp(x)- a, x) == Union(FiniteSet(LambertW(a), LambertW(a, -1)), Interval(-exp(-1), 0))
a = Symbol('a')
assert solveset_real(x*exp(x)- a, x) == Union(Intersection(FiniteSet(LambertW(a,-1)), Interval(-exp(-1), 0)), \
Intersection(FiniteSet(LambertW(a)), Interval(-exp(-1), oo)))
assert dumeq(solveset(x*exp(x)- a, x), ImageSet(Lambda(n, LambertW(a, n)), S.Integers))
assert solveset_real(a/x + exp(x/2), x) == Union(\
Complement(Intersection(FiniteSet(2*LambertW(-a/2, -1)), Interval(-exp(-1), 0)), FiniteSet(0)), \
Complement(Intersection(FiniteSet(2*LambertW(-a/2)), Interval(-exp(-1), oo)), FiniteSet(0)))
assert dumeq(solveset(a/x + exp(x/2), x),\
Complement(ImageSet(Lambda(n, 2*LambertW(-a/2, n)), S.Integers), FiniteSet(0)))
# check collection
b = Symbol('b')
eq = 3*log(a**(3*x + 5)) + b*log(a**(3*x + 5)) + a**(3*x + 5)
assert solveset_real(eq, x) == Union(Intersection(FiniteSet((-log(a**5) - LambertW(1/(b + 3), -1))/(3*log(a))), Interval(-exp(-1), 0)),\
Intersection(FiniteSet((-log(a**5) - LambertW(1/(b + 3)))/(3*log(a))), Interval(-exp(-1), oo)))
assert dumeq(solveset(eq, x), ImageSet(Lambda(n, (-log(a**5) - LambertW(1/(b + 3), n))/(3*log(a))), S.Integers))
p = symbols('p', positive=True)
eq = 3*log(p**(3*x + 5)) + p**(3*x + 5)
assert solveset(eq, x) == FiniteSet(-S(5)/3 - LambertW(S(1)/3)/(3*log(p)))
assert solveset_real(eq, x) == Intersection(FiniteSet(Rational(-5, 3) - LambertW(Rational(1, 3))/(3*log(p))), Interval(0, oo))
assert solveset_real((a/x + exp(x/2)).diff(x), x) == \
Union(Complement(Intersection(FiniteSet(4*LambertW(-sqrt(2)*sqrt(a)/4, -1), 4*LambertW(sqrt(2)*sqrt(a)/4, -1)), Interval(-exp(-1), 0)), FiniteSet(0)), \
Complement(Intersection(FiniteSet(4*LambertW(-sqrt(2)*sqrt(a)/4), 4*LambertW(sqrt(2)*sqrt(a)/4)), Interval(-exp(-1), S.Infinity)), FiniteSet(0)))
assert dumeq(solveset((a/x + exp(x/2)).diff(x), x),
Union(Complement(ImageSet(Lambda(n, 4*LambertW(-sqrt(2)*sqrt(a)/4, n)), S.Integers), FiniteSet(0)),
Complement(ImageSet(Lambda(n, 4*LambertW(sqrt(2)*sqrt(a)/4, n)), S.Integers), FiniteSet(0))))
a = -1
assert solveset_real((a/x + exp(x/2)).diff(x), x) == S.EmptySet
assert dumeq(solveset((a/x + exp(x/2)).diff(x), x), \
Union(Complement(ImageSet(Lambda(n, 4*LambertW(-sqrt(2)*I/4, n)), S.Integers), FiniteSet(0)),
Complement(ImageSet(Lambda(n, 4*LambertW(sqrt(2)*I/4, n)), S.Integers), FiniteSet(0))))
a = 1
assert solveset_real((a/x + exp(x/2)).diff(x), x) == FiniteSet(
4*LambertW(sqrt(2)/4),4*LambertW(-sqrt(2)/4, -1),4*LambertW(-sqrt(2)/4))
assert solveset((a/x + exp(x/2)).diff(x), x) == FiniteSet(4*LambertW(-sqrt(2)/4), 4*LambertW(sqrt(2)/4),\
4*LambertW(-sqrt(2)/4, -1))
assert solveset_real(3*x + 5 + 2**(-5*x + 3), x) is S.EmptySet
def test_is_lambert():
a, b, c = symbols('a,b,c')
assert _is_lambert(x**2,x) is False
assert _is_lambert(a**x**2+b*x+c,x) is True
assert _is_lambert(E**2,x) is False
assert _is_lambert(x*E**2,x) is False
assert _is_lambert(3*log(x) - x*log(3),x) is True
assert _is_lambert(log(log(x - 3)) + log(x-3),x) is True
assert _is_lambert(5*x - 1 + 3*exp(2 - 7*x),x) is True
assert _is_lambert((a/x + exp(x/2)).diff(x, 2),x) is True
assert _is_lambert((x**2 - 2*x + 1).subs(x, log(x) + 3*x), x) is True
def test_solve_bivariate():
assert solveset_real((x**2 - 2*x + 1).subs(x, log(x) + 3*x), x) == \
FiniteSet(LambertW(3*S.Exp1)/3)
assert solveset_real((x**2 - 2*x + 1).subs(x, (log(x) + 3*x)**2 - 1), x) == \
FiniteSet(LambertW(3*exp(sqrt(2)))/3, LambertW(3*exp(-sqrt(2)))/3)
assert solveset_real((x**2 - 2*x - 2).subs(x, log(x) + 3*x), x) == \
FiniteSet(LambertW(3*exp(1 + sqrt(3)))/3, LambertW(3*exp(1 - sqrt(3)))/3)
eq = 2*(3*x + 4)**5 - 6*7**(3*x + 9)
result = solveset_real(eq, x)
ans = S.EmptySet
assert result == ans
assert solveset_real(eq.expand(), x) == result # it takes solve and too much time to complete
assert solveset_real(-a*x + 2*x*log(x), x) == FiniteSet(exp(a/2))
@XFAIL
def test_other_solve_lambert():
assert solveset_real(x**a - a**x, x) == \
FiniteSet(a, -a*LambertW(-log(a)/a)/log(a))
# end of transolve's tests
def test_linear_coeffs():
from sympy.solvers.solveset import linear_coeffs
assert linear_coeffs(0, x) == [0, 0]
assert all(i is S.Zero for i in linear_coeffs(0, x))
assert linear_coeffs(x + 2*y + 3, x, y) == [1, 2, 3]
assert linear_coeffs(x + 2*y + 3, y, x) == [2, 1, 3]
assert linear_coeffs(x + 2*x**2 + 3, x, x**2) == [1, 2, 3]
raises(ValueError, lambda:
linear_coeffs(x + 2*x**2 + x**3, x, x**2))
raises(ValueError, lambda:
linear_coeffs(1/x*(x - 1) + 1/x, x))
assert linear_coeffs(a*(x + y), x, y) == [a, a, 0]
assert linear_coeffs(1.0, x, y) == [0, 0, 1.0]
# modular tests
def test_is_modular():
assert _is_modular(y, x) is False
assert _is_modular(Mod(x, 3) - 1, x) is True
assert _is_modular(Mod(x**3 - 3*x**2 - x + 1, 3) - 1, x) is True
assert _is_modular(Mod(exp(x + y), 3) - 2, x) is True
assert _is_modular(Mod(exp(x + y), 3) - log(x), x) is True
assert _is_modular(Mod(x, 3) - 1, y) is False
assert _is_modular(Mod(x, 3)**2 - 5, x) is False
assert _is_modular(Mod(x, 3)**2 - y, x) is False
assert _is_modular(exp(Mod(x, 3)) - 1, x) is False
assert _is_modular(Mod(3, y) - 1, y) is False
def test_invert_modular():
n = Dummy('n', integer=True)
from sympy.solvers.solveset import _invert_modular as invert_modular
# non invertible cases
assert invert_modular(Mod(sin(x), 7), S(5), n, x) == (Mod(sin(x), 7), 5)
assert invert_modular(Mod(exp(x), 7), S(5), n, x) == (Mod(exp(x), 7), 5)
assert invert_modular(Mod(log(x), 7), S(5), n, x) == (Mod(log(x), 7), 5)
# a is symbol
assert dumeq(invert_modular(Mod(x, 7), S(5), n, x),
(x, ImageSet(Lambda(n, 7*n + 5), S.Integers)))
# a.is_Add
assert dumeq(invert_modular(Mod(x + 8, 7), S(5), n, x),
(x, ImageSet(Lambda(n, 7*n + 4), S.Integers)))
assert invert_modular(Mod(x**2 + x, 7), S(5), n, x) == \
(Mod(x**2 + x, 7), 5)
# a.is_Mul
assert dumeq(invert_modular(Mod(3*x, 7), S(5), n, x),
(x, ImageSet(Lambda(n, 7*n + 4), S.Integers)))
assert invert_modular(Mod((x + 1)*(x + 2), 7), S(5), n, x) == \
(Mod((x + 1)*(x + 2), 7), 5)
# a.is_Pow
assert invert_modular(Mod(x**4, 7), S(5), n, x) == \
(x, EmptySet())
assert dumeq(invert_modular(Mod(3**x, 4), S(3), n, x),
(x, ImageSet(Lambda(n, 2*n + 1), S.Naturals0)))
assert dumeq(invert_modular(Mod(2**(x**2 + x + 1), 7), S(2), n, x),
(x**2 + x + 1, ImageSet(Lambda(n, 3*n + 1), S.Naturals0)))
assert invert_modular(Mod(sin(x)**4, 7), S(5), n, x) == (x, EmptySet())
def test_solve_modular():
n = Dummy('n', integer=True)
# if rhs has symbol (need to be implemented in future).
assert solveset(Mod(x, 4) - x, x, S.Integers
).dummy_eq(
ConditionSet(x, Eq(-x + Mod(x, 4), 0),
S.Integers))
# when _invert_modular fails to invert
assert solveset(3 - Mod(sin(x), 7), x, S.Integers
).dummy_eq(
ConditionSet(x, Eq(Mod(sin(x), 7) - 3, 0), S.Integers))
assert solveset(3 - Mod(log(x), 7), x, S.Integers
).dummy_eq(
ConditionSet(x, Eq(Mod(log(x), 7) - 3, 0), S.Integers))
assert solveset(3 - Mod(exp(x), 7), x, S.Integers
).dummy_eq(ConditionSet(x, Eq(Mod(exp(x), 7) - 3, 0),
S.Integers))
# EmptySet solution definitely
assert solveset(7 - Mod(x, 5), x, S.Integers) == EmptySet()
assert solveset(5 - Mod(x, 5), x, S.Integers) == EmptySet()
# Negative m
assert dumeq(solveset(2 + Mod(x, -3), x, S.Integers),
ImageSet(Lambda(n, -3*n - 2), S.Integers))
assert solveset(4 + Mod(x, -3), x, S.Integers) == EmptySet()
# linear expression in Mod
assert dumeq(solveset(3 - Mod(x, 5), x, S.Integers),
ImageSet(Lambda(n, 5*n + 3), S.Integers))
assert dumeq(solveset(3 - Mod(5*x - 8, 7), x, S.Integers),
ImageSet(Lambda(n, 7*n + 5), S.Integers))
assert dumeq(solveset(3 - Mod(5*x, 7), x, S.Integers),
ImageSet(Lambda(n, 7*n + 2), S.Integers))
# higher degree expression in Mod
assert dumeq(solveset(Mod(x**2, 160) - 9, x, S.Integers),
Union(ImageSet(Lambda(n, 160*n + 3), S.Integers),
ImageSet(Lambda(n, 160*n + 13), S.Integers),
ImageSet(Lambda(n, 160*n + 67), S.Integers),
ImageSet(Lambda(n, 160*n + 77), S.Integers),
ImageSet(Lambda(n, 160*n + 83), S.Integers),
ImageSet(Lambda(n, 160*n + 93), S.Integers),
ImageSet(Lambda(n, 160*n + 147), S.Integers),
ImageSet(Lambda(n, 160*n + 157), S.Integers)))
assert solveset(3 - Mod(x**4, 7), x, S.Integers) == EmptySet()
assert dumeq(solveset(Mod(x**4, 17) - 13, x, S.Integers),
Union(ImageSet(Lambda(n, 17*n + 3), S.Integers),
ImageSet(Lambda(n, 17*n + 5), S.Integers),
ImageSet(Lambda(n, 17*n + 12), S.Integers),
ImageSet(Lambda(n, 17*n + 14), S.Integers)))
# a.is_Pow tests
assert dumeq(solveset(Mod(7**x, 41) - 15, x, S.Integers),
ImageSet(Lambda(n, 40*n + 3), S.Naturals0))
assert dumeq(solveset(Mod(12**x, 21) - 18, x, S.Integers),
ImageSet(Lambda(n, 6*n + 2), S.Naturals0))
assert dumeq(solveset(Mod(3**x, 4) - 3, x, S.Integers),
ImageSet(Lambda(n, 2*n + 1), S.Naturals0))
assert dumeq(solveset(Mod(2**x, 7) - 2 , x, S.Integers),
ImageSet(Lambda(n, 3*n + 1), S.Naturals0))
assert dumeq(solveset(Mod(3**(3**x), 4) - 3, x, S.Integers),
Intersection(ImageSet(Lambda(n, Intersection({log(2*n + 1)/log(3)},
S.Integers)), S.Naturals0), S.Integers))
# Implemented for m without primitive root
assert solveset(Mod(x**3, 7) - 2, x, S.Integers) == EmptySet()
assert dumeq(solveset(Mod(x**3, 8) - 1, x, S.Integers),
ImageSet(Lambda(n, 8*n + 1), S.Integers))
assert dumeq(solveset(Mod(x**4, 9) - 4, x, S.Integers),
Union(ImageSet(Lambda(n, 9*n + 4), S.Integers),
ImageSet(Lambda(n, 9*n + 5), S.Integers)))
# domain intersection
assert dumeq(solveset(3 - Mod(5*x - 8, 7), x, S.Naturals0),
Intersection(ImageSet(Lambda(n, 7*n + 5), S.Integers), S.Naturals0))
# Complex args
assert solveset(Mod(x, 3) - I, x, S.Integers) == \
EmptySet()
assert solveset(Mod(I*x, 3) - 2, x, S.Integers
).dummy_eq(
ConditionSet(x, Eq(Mod(I*x, 3) - 2, 0), S.Integers))
assert solveset(Mod(I + x, 3) - 2, x, S.Integers
).dummy_eq(
ConditionSet(x, Eq(Mod(x + I, 3) - 2, 0), S.Integers))
# issue 17373 (https://github.com/sympy/sympy/issues/17373)
assert dumeq(solveset(Mod(x**4, 14) - 11, x, S.Integers),
Union(ImageSet(Lambda(n, 14*n + 3), S.Integers),
ImageSet(Lambda(n, 14*n + 11), S.Integers)))
assert dumeq(solveset(Mod(x**31, 74) - 43, x, S.Integers),
ImageSet(Lambda(n, 74*n + 31), S.Integers))
# issue 13178
n = symbols('n', integer=True)
a = 742938285
b = 1898888478
m = 2**31 - 1
c = 20170816
assert dumeq(solveset(c - Mod(a**n*b, m), n, S.Integers),
ImageSet(Lambda(n, 2147483646*n + 100), S.Naturals0))
assert dumeq(solveset(c - Mod(a**n*b, m), n, S.Naturals0),
Intersection(ImageSet(Lambda(n, 2147483646*n + 100), S.Naturals0),
S.Naturals0))
assert dumeq(solveset(c - Mod(a**(2*n)*b, m), n, S.Integers),
Intersection(ImageSet(Lambda(n, 1073741823*n + 50), S.Naturals0),
S.Integers))
assert solveset(c - Mod(a**(2*n + 7)*b, m), n, S.Integers) == EmptySet()
assert dumeq(solveset(c - Mod(a**(n - 4)*b, m), n, S.Integers),
Intersection(ImageSet(Lambda(n, 2147483646*n + 104), S.Naturals0),
S.Integers))
# end of modular tests
def test_issue_17276():
assert nonlinsolve([Eq(x, 5**(S(1)/5)), Eq(x*y, 25*sqrt(5))], x, y) == \
FiniteSet((5**(S(1)/5), 25*5**(S(3)/10)))
def test_issue_10426():
x=Dummy('x')
a=Symbol('a')
n=Dummy('n')
assert (solveset(sin(x + a) - sin(x), a)).dummy_eq(Dummy('x')) == (Union(
ImageSet(Lambda(n, 2*n*pi), S.Integers),
Intersection(S.Complexes, ImageSet(Lambda(n, -I*(I*(2*n*pi + arg(-exp(-2*I*x))) + 2*im(x))),
S.Integers)))).dummy_eq(Dummy('x,n'))
@XFAIL
def test_substitution_with_infeasible_solution():
a00, a01, a10, a11, l0, l1, l2, l3, m0, m1, m2, m3, m4, m5, m6, m7, c00, c01, c10, c11, p00, p01, p10, p11 = symbols(
'a00, a01, a10, a11, l0, l1, l2, l3, m0, m1, m2, m3, m4, m5, m6, m7, c00, c01, c10, c11, p00, p01, p10, p11'
)
solvefor = [p00, p01, p10, p11, c00, c01, c10, c11, m0, m1, m3, l0, l1, l2, l3]
system = [
-l0 * c00 - l1 * c01 + m0 + c00 + c01,
-l0 * c10 - l1 * c11 + m1,
-l2 * c00 - l3 * c01 + c00 + c01,
-l2 * c10 - l3 * c11 + m3,
-l0 * p00 - l2 * p10 + p00 + p10,
-l1 * p00 - l3 * p10 + p00 + p10,
-l0 * p01 - l2 * p11,
-l1 * p01 - l3 * p11,
-a00 + c00 * p00 + c10 * p01,
-a01 + c01 * p00 + c11 * p01,
-a10 + c00 * p10 + c10 * p11,
-a11 + c01 * p10 + c11 * p11,
-m0 * p00,
-m1 * p01,
-m2 * p10,
-m3 * p11,
-m4 * c00,
-m5 * c01,
-m6 * c10,
-m7 * c11,
m2,
m4,
m5,
m6,
m7
]
sol = FiniteSet(
(0, Complement(FiniteSet(p01), FiniteSet(0)), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, l2, l3),
(p00, Complement(FiniteSet(p01), FiniteSet(0)), 0, p11, 0, 0, 0, 0, 0, 0, 0, 1, 1, -p01/p11, -p01/p11),
(0, Complement(FiniteSet(p01), FiniteSet(0)), 0, p11, 0, 0, 0, 0, 0, 0, 0, 1, -l3*p11/p01, -p01/p11, l3),
(0, Complement(FiniteSet(p01), FiniteSet(0)), 0, p11, 0, 0, 0, 0, 0, 0, 0, -l2*p11/p01, -l3*p11/p01, l2, l3),
)
assert sol != nonlinsolve(system, solvefor)
| 40.935659
| 160
| 0.563155
|
c320c3f3b7a034f8a18ee62226c2749a4fd174af
| 1,427
|
py
|
Python
|
rllib/environment/vectorized/pendulum.py
|
SamueleMeta/optimal_IS
|
7d8e0041825acfa003874cd1ad2aec0581f6a9e1
|
[
"MIT"
] | null | null | null |
rllib/environment/vectorized/pendulum.py
|
SamueleMeta/optimal_IS
|
7d8e0041825acfa003874cd1ad2aec0581f6a9e1
|
[
"MIT"
] | null | null | null |
rllib/environment/vectorized/pendulum.py
|
SamueleMeta/optimal_IS
|
7d8e0041825acfa003874cd1ad2aec0581f6a9e1
|
[
"MIT"
] | null | null | null |
"""Vectorized Gym Pendulum Environment."""
import numpy as np
from gym.envs.classic_control.pendulum import PendulumEnv, angle_normalize
from rllib.environment.vectorized.util import VectorizedEnv
class VectorizedPendulumEnv(PendulumEnv, VectorizedEnv):
"""Vectorized implementation of Pendulum."""
def step(self, action):
"""See `PendulumEnv.step()'."""
g = self.g
m = self.m
length = self.l
inertia = m * length ** 2
bk = self.bk
dt = self.dt
theta, theta_dot = self.state[..., 0], self.state[..., 1]
u = self.clip(action, -self.max_torque, self.max_torque)[..., 0]
if not u.shape:
self.last_u = u # for rendering
costs = angle_normalize(theta) ** 2 + 0.1 * theta_dot ** 2 + 0.001 * (u ** 2)
theta_d_dot = -3 * g / (2 * length) * bk.sin(theta + np.pi) + 3.0 / inertia * u
new_theta_dot = theta_dot + theta_d_dot * dt
new_theta = theta + new_theta_dot * dt
new_theta_dot = self.clip(new_theta_dot, -self.max_speed, self.max_speed)
self.state = self.bk.stack((new_theta, new_theta_dot), -1)
done = bk.zeros_like(costs, dtype=bk.bool)
return self._get_obs(), -costs, done, {}
def _get_obs(self):
theta, theta_dot = self.state[..., 0], self.state[..., 1]
return self.bk.stack((self.bk.cos(theta), self.bk.sin(theta), theta_dot), -1)
| 34.804878
| 87
| 0.60897
|
d750caaf7b3d26c7f24346867e370302ed4a76d1
| 1,308
|
py
|
Python
|
dataloader/CustomDataSetLoader.py
|
BadlyDrunkScotsman/PSMNet
|
2bed5282daccb7eba2ef1f454e1e9f34e0f9aed3
|
[
"MIT"
] | null | null | null |
dataloader/CustomDataSetLoader.py
|
BadlyDrunkScotsman/PSMNet
|
2bed5282daccb7eba2ef1f454e1e9f34e0f9aed3
|
[
"MIT"
] | null | null | null |
dataloader/CustomDataSetLoader.py
|
BadlyDrunkScotsman/PSMNet
|
2bed5282daccb7eba2ef1f454e1e9f34e0f9aed3
|
[
"MIT"
] | null | null | null |
import torch.utils.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(filepath):
all_left_img = []
all_right_img = []
all_left_disp = []
eval_left_img = []
eval_right_img = []
eval_left_disp = []
dir = filepath
dir_disp = filepath + '/cam_dep_60_Bl0/'
subdir = ['/cam_60_BL0/', '/cam_60_BL30/']
train_file = open(os.path.join(dir, 'train.txt'), 'r')
valid_file = open(os.path.join(dir, 'valid.txt'), 'r')
train_lines = train_file.readlines()
valid_lines = valid_file.readlines()
for line in train_lines:
line = line.strip()
all_left_img.append(dir + subdir[0] + line)
all_left_disp.append(dir_disp + line)
all_right_img.append(dir + subdir[1] + line)
for line in valid_lines:
line = line.strip()
eval_left_img.append(dir + subdir[0] + line)
eval_left_disp.append(dir_disp + line)
eval_right_img.append(dir + subdir[1] + line)
return all_left_img, all_right_img, all_left_disp, eval_left_img, eval_right_img, eval_left_disp
| 25.153846
| 100
| 0.643731
|
ac1e30cd9512b0892980ed668f38d0302521e61c
| 2,686
|
py
|
Python
|
IMLearn/learners/regressors/linear_regression.py
|
LidarAb/IML.HUJI
|
798c99f9b1c29a701c1e06e923a429cae639937f
|
[
"MIT"
] | null | null | null |
IMLearn/learners/regressors/linear_regression.py
|
LidarAb/IML.HUJI
|
798c99f9b1c29a701c1e06e923a429cae639937f
|
[
"MIT"
] | null | null | null |
IMLearn/learners/regressors/linear_regression.py
|
LidarAb/IML.HUJI
|
798c99f9b1c29a701c1e06e923a429cae639937f
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from numpy.linalg import pinv
import IMLearn.metrics.loss_functions as loss_functions
class LinearRegression(BaseEstimator):
"""
Linear Regression Estimator
Solving Ordinary Least Squares optimization problem
"""
def __init__(self, include_intercept: bool = True) -> LinearRegression:
"""
Instantiate a linear regression estimator
Parameters
----------
include_intercept: bool, default=True
Should fitted model include an intercept or not
Attributes
----------
include_intercept_: bool
Should fitted model include an intercept or not
coefs_: ndarray of shape (n_features,) or (n_features+1,)
Coefficients vector fitted by linear regression. To be set in
`LinearRegression.fit` function.
"""
super().__init__()
self.include_intercept_, self.coefs_ = include_intercept, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit Least Squares model to given samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
Notes
-----
Fits model with or without an intercept depending on value of `self.include_intercept_`
"""
if not self.include_intercept_:
new_x = np.delete(X, 0, 1)
else:
new_x = X
self.coefs_ = pinv(new_x) @ y
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
return X @ self.coefs_
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under MSE loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under MSE loss function
"""
y_pred = self._predict(X)
return loss_functions.mean_square_error(y, y_pred)
| 28.574468
| 95
| 0.593075
|
e7898c08bc90f34ff16a94f0674b31474b60a3f2
| 11,215
|
py
|
Python
|
flask_dance/consumer/storage/sqla.py
|
timgates42/flask-dance
|
ebe3ea48d3263136e18ccea37e50292b7c503c67
|
[
"MIT"
] | null | null | null |
flask_dance/consumer/storage/sqla.py
|
timgates42/flask-dance
|
ebe3ea48d3263136e18ccea37e50292b7c503c67
|
[
"MIT"
] | null | null | null |
flask_dance/consumer/storage/sqla.py
|
timgates42/flask-dance
|
ebe3ea48d3263136e18ccea37e50292b7c503c67
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy_utils import JSONType
from sqlalchemy.orm.exc import NoResultFound
from flask_dance.utils import FakeCache, first
from flask_dance.consumer.storage import BaseStorage
try:
from flask_login import AnonymousUserMixin
except ImportError:
AnonymousUserMixin = None
class OAuthConsumerMixin(object):
"""
A :ref:`SQLAlchemy declarative mixin <sqlalchemy:declarative_mixins>` with
some suggested columns for a model to store OAuth tokens:
``id``
an integer primary key
``provider``
a short name to indicate which OAuth provider issued
this token
``created_at``
an automatically generated datetime that indicates when
the OAuth provider issued this token
``token``
a :class:`JSON <sqlalchemy_utils.types.json.JSONType>` field to store
the actual token received from the OAuth provider
"""
@declared_attr
def __tablename__(cls):
return "flask_dance_{}".format(cls.__name__.lower())
id = Column(Integer, primary_key=True)
provider = Column(String(50), nullable=False)
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
token = Column(MutableDict.as_mutable(JSONType), nullable=False)
def __repr__(self):
parts = []
parts.append(self.__class__.__name__)
if self.id:
parts.append("id={}".format(self.id))
if self.provider:
parts.append('provider="{}"'.format(self.provider))
return "<{}>".format(" ".join(parts))
class SQLAlchemyStorage(BaseStorage):
"""
Stores and retrieves OAuth tokens using a relational database through
the `SQLAlchemy`_ ORM.
.. _SQLAlchemy: http://www.sqlalchemy.org/
"""
def __init__(
self,
model,
session,
user=None,
user_id=None,
user_required=None,
anon_user=None,
cache=None,
):
"""
Args:
model: The SQLAlchemy model class that represents the OAuth token
table in the database. At a minimum, it must have a
``provider`` column and a ``token`` column. If tokens are to be
associated with individual users in the application, it must
also have a ``user`` relationship to your User model.
It is recommended, though not required, that your model class
inherit from
:class:`~flask_dance.consumer.storage.sqla.OAuthConsumerMixin`.
session:
The :class:`SQLAlchemy session <sqlalchemy.orm.session.Session>`
for the database. If you're using `Flask-SQLAlchemy`_, this is
``db.session``.
user:
If you want OAuth tokens to be associated with individual users
in your application, this is a reference to the user that you
want to use for the current request. It can be an actual User
object, a function that returns a User object, or a proxy to the
User object. If you're using `Flask-Login`_, this is
:attr:`~flask.ext.login.current_user`.
user_id:
If you want to pass an identifier for a user instead of an actual
User object, use this argument instead. Sometimes it can save
a database query or two. If both ``user`` and ``user_id`` are
provided, ``user_id`` will take precendence.
user_required:
If set to ``True``, an exception will be raised if you try to
set or retrieve an OAuth token without an associated user.
If set to ``False``, OAuth tokens can be set with or without
an associated user. The default is auto-detection: it will
be ``True`` if you pass a ``user`` or ``user_id`` parameter,
``False`` otherwise.
anon_user:
If anonymous users are represented by a class in your application,
provide that class here. If you are using `Flask-Login`_,
anonymous users are represented by the
:class:`flask_login.AnonymousUserMixin` class, but you don't have
to provide that -- Flask-Dance treats it as the default.
cache:
An instance of `Flask-Caching`_. Providing a caching system is
highly recommended, but not required.
.. _Flask-SQLAlchemy: http://pythonhosted.org/Flask-SQLAlchemy/
.. _Flask-Login: https://flask-login.readthedocs.io/
.. _Flask-Caching: https://flask-caching.readthedocs.io/en/latest/
"""
self.model = model
self.session = session
self.user = user
self.user_id = user_id
if user_required is None:
self.user_required = user is not None or user_id is not None
else:
self.user_required = user_required
self.anon_user = anon_user or AnonymousUserMixin
self.cache = cache or FakeCache()
def make_cache_key(self, blueprint, user=None, user_id=None):
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
if not uid:
u = first(
_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user"))
)
uid = getattr(u, "id", u)
return "flask_dance_token|{name}|{user_id}".format(
name=blueprint.name, user_id=uid
)
def get(self, blueprint, user=None, user_id=None):
""" When you have a statement in your code that says
"if <provider>.authorized:" (for example "if twitter.authorized:"),
a long string of function calls result in this function being used to
check the Flask server's cache and database for any records associated
with the current_user. The `user` and `user_id` parameters are actually
not set in that case (see base.py:token(), that's what calls this
function), so the user information is instead loaded from the
current_user (if that's what you specified when you created the
blueprint) with blueprint.config.get('user_id').
:param blueprint:
:param user:
:param user_id:
:return:
"""
# check cache
cache_key = self.make_cache_key(blueprint=blueprint, user=user, user_id=user_id)
token = self.cache.get(cache_key)
if token:
return token
# if not cached, make database queries
query = self.session.query(self.model).filter_by(provider=blueprint.name)
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(
_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user"))
)
if self.user_required and not u and not uid:
raise ValueError("Cannot get OAuth token without an associated user")
# check for user ID
if hasattr(self.model, "user_id") and uid:
query = query.filter_by(user_id=uid)
# check for user (relationship property)
elif hasattr(self.model, "user") and u:
query = query.filter_by(user=u)
# if we have the property, but not value, filter by None
elif hasattr(self.model, "user_id"):
query = query.filter_by(user_id=None)
# run query
try:
token = query.one().token
except NoResultFound:
token = None
# cache the result
self.cache.set(cache_key, token)
return token
def set(self, blueprint, token, user=None, user_id=None):
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(
_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user"))
)
if self.user_required and not u and not uid:
raise ValueError("Cannot set OAuth token without an associated user")
# if there was an existing model, delete it
existing_query = self.session.query(self.model).filter_by(
provider=blueprint.name
)
# check for user ID
has_user_id = hasattr(self.model, "user_id")
if has_user_id and uid:
existing_query = existing_query.filter_by(user_id=uid)
# check for user (relationship property)
has_user = hasattr(self.model, "user")
if has_user and u:
existing_query = existing_query.filter_by(user=u)
# queue up delete query -- won't be run until commit()
existing_query.delete()
# create a new model for this token
kwargs = {"provider": blueprint.name, "token": token}
if has_user_id and uid:
kwargs["user_id"] = uid
if has_user and u:
kwargs["user"] = u
self.session.add(self.model(**kwargs))
# commit to delete and add simultaneously
self.session.commit()
# invalidate cache
self.cache.delete(
self.make_cache_key(blueprint=blueprint, user=user, user_id=user_id)
)
def delete(self, blueprint, user=None, user_id=None):
query = self.session.query(self.model).filter_by(provider=blueprint.name)
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(
_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user"))
)
if self.user_required and not u and not uid:
raise ValueError("Cannot delete OAuth token without an associated user")
# check for user ID
if hasattr(self.model, "user_id") and uid:
query = query.filter_by(user_id=uid)
# check for user (relationship property)
elif hasattr(self.model, "user") and u:
query = query.filter_by(user=u)
# if we have the property, but not value, filter by None
elif hasattr(self.model, "user_id"):
query = query.filter_by(user_id=None)
# run query
query.delete()
self.session.commit()
# invalidate cache
self.cache.delete(
self.make_cache_key(blueprint=blueprint, user=user, user_id=user_id)
)
def _get_real_user(user, anon_user=None):
"""
Given a "user" that could be:
* a real user object
* a function that returns a real user object
* a LocalProxy to a real user object (like Flask-Login's ``current_user``)
This function returns the real user object, regardless of which we have.
"""
if hasattr(user, "_get_current_object"):
# this is a proxy
user = user._get_current_object()
if callable(user):
# this is a function
user = user()
if anon_user and isinstance(user, anon_user):
return None
return user
| 39.911032
| 88
| 0.618457
|
086fcdea8dda34e51db9417f80d5f372f1251f5f
| 2,798
|
py
|
Python
|
commons/c2cgeoportal_commons/alembic/main/338b57593823_remove_trigger_on_role_name_change.py
|
rbovard/c2cgeoportal
|
61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8
|
[
"BSD-2-Clause-FreeBSD"
] | 43
|
2015-02-16T06:56:25.000Z
|
2021-09-12T17:49:16.000Z
|
commons/c2cgeoportal_commons/alembic/main/338b57593823_remove_trigger_on_role_name_change.py
|
rbovard/c2cgeoportal
|
61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8
|
[
"BSD-2-Clause-FreeBSD"
] | 3,227
|
2015-01-05T10:30:59.000Z
|
2022-03-31T03:25:39.000Z
|
commons/c2cgeoportal_commons/alembic/main/338b57593823_remove_trigger_on_role_name_change.py
|
rbovard/c2cgeoportal
|
61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8
|
[
"BSD-2-Clause-FreeBSD"
] | 57
|
2015-01-29T08:32:12.000Z
|
2022-03-16T07:07:33.000Z
|
# Copyright (c) 2018-2019, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
# pylint: disable=no-member
"""
Remove trigger on_role_name_change.
Revision ID: 338b57593823
Revises: dba87f2647f9
Create Date: 2018-12-05 09:13:21.191424
"""
from alembic import op
from c2c.template.config import config
# revision identifiers, used by Alembic.
revision = "338b57593823"
down_revision = "dba87f2647f9"
branch_labels = None
depends_on = None
def upgrade() -> None:
"""Upgrade."""
schema = config["schema"]
op.execute(f"DROP TRIGGER on_role_name_change ON {schema}.role")
op.execute(f"DROP FUNCTION {schema}.on_role_name_change()")
def downgrade() -> None:
"""Downgrade."""
schema = config["schema"]
staticschema = config["schema_static"]
op.execute(
"""
CREATE OR REPLACE FUNCTION {schema}.on_role_name_change()
RETURNS trigger AS
$$
BEGIN
IF NEW.name <> OLD.name THEN
UPDATE {staticschema}."user" SET role_name = NEW.name WHERE role_name = OLD.name;
END IF;
RETURN NEW;
END;
$$
LANGUAGE plpgsql""".format(
schema=schema, staticschema=staticschema
)
)
op.execute(
"CREATE TRIGGER on_role_name_change AFTER UPDATE ON {schema}.role FOR EACH ROW "
"EXECUTE PROCEDURE {schema}.on_role_name_change()".format(schema=schema)
)
| 34.54321
| 88
| 0.750179
|
4261d5c311787ac0dda83085eb784eabf66362a9
| 2,498
|
py
|
Python
|
snippets.py
|
dmidlo/histdata.com-tools
|
dd1f17134711e5588ba019d6575287937936afb7
|
[
"MIT"
] | null | null | null |
snippets.py
|
dmidlo/histdata.com-tools
|
dd1f17134711e5588ba019d6575287937936afb7
|
[
"MIT"
] | null | null | null |
snippets.py
|
dmidlo/histdata.com-tools
|
dd1f17134711e5588ba019d6575287937936afb7
|
[
"MIT"
] | null | null | null |
# import datatable as dt
# from datatable import f
# Try something like this
# DT = dt.Frame(["20220401 001612839"])
# print(DT)
# | C0
# | str32
# -- + ------------------
# 0 | 20220401 000012839
# year, month, day, hour
# DT = DT[:, dt.time.ymdt(f[:][0:4].as_type(int), \
# f[:][4:6].as_type(int), \
# f[:][6:8].as_type(int), \
# f[:][9:11].as_type(int), \
# f[:][11:13].as_type(int), \
# f[:][13:15].as_type(int), \
# 10**6 * f[:][15:18].as_type(int))]
# # > | C0
# # > | time64
# # > -- + -----------------------
# # > 0 | 2022-04-01T00:00:12.839
# # > [1 row x 1 column]
# print(DT)
# DT = DT[:, (f[:].as_type(int)//10**6)]
# print(DT)
# DT = DT[:, f[:].as_type(dt.Type.time64)**6]
# print(DT)
# print(DT)
# > | C0
# > | int64
# > -- + -------------
# > 0 | 1648771212839
# > [1 row x 1 column]
# DT = dt.Frame(["20220401 000012839"])
# DT = DT[:, f[:][0:4]+"-"+f[:][4:6]+"-"+f[:][6:8]+" "+f[:][9:11]+":"+f[:][11:13]+":"+f[:][13:15]+"."+f[:][15:18]]
# DT[0] = dt.Type.time64
# print(DT[:, f[:].as_type(int)//10**6])
# | C0
# | int64
# -- + -------------
# 0 | 1648771212839
# [1 row x 1 column]
# histdatacom -I -p eurusd usdjpy gbpusd usdcad usdchf audusd nzdusd -f ascii -t tick-data-quotes -s start -e now
# histdatacom -I -p eurgbp euraud gbpchf audnzd audcad audchf gbpaud usdmxn -f ascii -t tick-data-quotes -s start -e now -c low
# histdatacom -I -p eurchf eurcad eurnzd eurjpy gbpjpy chfjpy cadjpy -f ascii -t tick-data-quotes -s start -e now -c low
# histdatacom -I -p audjpy nzdjpy gbpcad nzdcad sgdjpy gbpnzd cadchf -f ascii -t tick-data-quotes -s start -e now -c low
# histdatacom -I -p eurtry usdtry usdsek usdnok usddkk usdzar usdhkd -f ascii -t tick-data-quotes -s start -e now -c low
# histdatacom -I -p usdsgd eurpln eurhuf nzdchf usdhuf usdpln eurczk -f ascii -t tick-data-quotes -s start -e now -c low
# histdatacom -I -p eursek usdczk zarjpy eurdkk eurnok usddkk-f ascii -t tick-data-quotes -s start -e now -c low
# histdatacom -I -p xauusd xauaud xauchf bcousd wtiusd xaueur xagusd xaugbp -f ascii -t tick-data-quotes -s start -e now -c low
# histdatacom -I -p grxeur auxaud frxeur hkxhkd spxusd jpxjpy udxusd -f ascii -t tick-data-quotes -s start -e now -c low
# histdatacom -I -p nsxusd ukxgbp etxeur -f ascii -t tick-data-quotes -s start -e now -c low
| 41.633333
| 127
| 0.558847
|
e1714ff66e00b41a99eeb0443e2f72c1d4917702
| 22,478
|
py
|
Python
|
file/importer.py
|
stylekilla/syncmrt
|
816bb57d80d6595719b8b9d7f027f4f17d0a6c0a
|
[
"Apache-2.0"
] | null | null | null |
file/importer.py
|
stylekilla/syncmrt
|
816bb57d80d6595719b8b9d7f027f4f17d0a6c0a
|
[
"Apache-2.0"
] | 25
|
2019-03-05T05:56:35.000Z
|
2019-07-24T13:11:57.000Z
|
file/importer.py
|
stylekilla/syncmrt
|
816bb57d80d6595719b8b9d7f027f4f17d0a6c0a
|
[
"Apache-2.0"
] | 1
|
2019-11-27T05:10:47.000Z
|
2019-11-27T05:10:47.000Z
|
import os
import pydicom as dicom
import numpy as np
from file.image import Image2d
from file import hdf5
from tools.opencl import gpu as gpuInterface
from tools.math import wcs2wcs
from natsort import natsorted
from PyQt5 import QtCore, QtWidgets
import csv
import logging
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
'''
The importer class takes DICOM/HDF5 images and turns them into a
class (image2d or image3d) for plotting in QsWidgets.QPlot().
This is where we disconnect the DICOM information and take
only what the internals of SyncMRT requires to operate. Maybe
in the future such integrations could just see the use of
DICOM throughout but then things would have to be re-written
to understand DICOM. This is just currently my own interface.
Think of this class as the interface to QPlot. As such it should
probably be packaged with it.
'''
class sync_dx:
def __init__(self,dataset,new=False):
# Read in hdf5 dataset.
if new:
self.file = hdf5.new(dataset)
else:
self.file = hdf5.load(dataset)
def getImageList(self):
""" Reads the image names in the HDF5 file. Return as list. """
return list(self.file['Image'].keys())
def getImageSet(self,idx):
logging.debug("Reading image set {}.".format(idx))
_set = self.file.getImageSet(idx)
imageSet = []
for i in range(len(_set)):
# Get the image and its attributes.
image = Image2d()
image.pixelArray = _set[str(i+1)][()]
image.extent = _set[str(i+1)].attrs.get('Extent',default=None)
image.patientIsocenter = _set[str(i+1)].attrs.get('Image Isocenter',default=None)
image.patientPosition = list(_set[str(i+1)].attrs.get('Patient Support Position',default=None)) + list(_set[str(i+1)].attrs.get('Patient Support Angle',default=None))
image.view['title'] = str(_set[str(i+1)].attrs.get('Image Angle',default="None"))+"\u00B0"
image.imagingAngle = _set[str(i+1)].attrs.get('Image Angle',default=None)
image.M = _set[str(i+1)].attrs.get('M',default=None)
image.Mi = _set[str(i+1)].attrs.get('Mi',default=None)
image.comment = _set[str(i+1)].attrs.get('Comment',default=None)
# Append the image.
imageSet.append(image)
return imageSet
class csvPlan(QtCore.QObject):
newSequence = QtCore.pyqtSignal()
def __init__(self,file=None):
"""
Create a customised treatment plan that can be delivered on the beamline.
"""
super().__init__()
# Create an empty sequence.
self.sequence = []
if type(file) != type(None):
self.loadPlan(file)
def addSequence(self,position,speed,contour):
""" Add a new delivery sequence to the plan. """
kwargs = {}
kwargs['position'] = position
kwargs['speed'] = speed
kwargs['contour'] = contour
kwargs['treated'] = False
self.sequence.append(kwargs)
def insertSequence(self,index,position,speed,contour):
""" Insert a new delivery sequence in the plan. """
kwargs = {}
kwargs['position'] = position
kwargs['speed'] = speed
kwargs['contour'] = contour
kwargs['treated'] = False
self.sequence.insert(index,kwargs)
def removeSequence(self,index):
""" Remove a beam delivery sequence. """
del self.sequence[index]
def getSequence(self,index):
""" Get a specified delivery sequence. """
return self.sequence[index]
def numberOfBeams(self):
""" Return the number of beam delivery sequences present in the plan. """
return len(self.sequence)
def loadPlan(self,file):
""" Load a csv file containing the plan. """
import csv
with open(file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row['Sequence'] = int(row['Sequence'])
# row['Position'] = list(map(float,row['Position'][1:-1].split(',')))
row['Angle'] = float(row['Angle'])
row['Speed'] = float(row['Speed'])
self.sequence.append(row)
self.newSequence.emit()
def reset(self):
""" Reset the plan. This removes all sequences. """
self.sequence = []
def checkDicomModality(dataset,modality):
""" Check the modality of each dicom file and return only the files that match the desired modality. """
# Start with empty list of files.
files = {}
for i in range(len(dataset)):
# Read the file in.
testFile = dicom.dcmread(dataset[i])
if testFile.Modality == modality:
# Save in dict where the key is the slice position.
# files[int(testFile.SliceLocation)] = dataset[i]
files[list(map(float,testFile.ImagePositionPatient))[2]] = dataset[i]
else:
pass
# Sort the files based on slice location.
sortedFiles = []
for key in sorted(files.keys()):
sortedFiles.append(files[key])
# Return the sorted file list.
return sortedFiles
class ct(QtCore.QObject):
newCtView = QtCore.pyqtSignal()
def __init__(self,dataset,gpu):
super().__init__()
# Hold a reference to the gpu instance.
self.gpu = gpu
# Check that the dataset is indeed a DICOM CT dataset.
dataset = checkDicomModality(dataset,'CT')
if len(dataset) is 0:
# If the dataset has no CT files, then exit this function.
return
else:
# Else, read the first one as a reference point.
ref = dicom.dcmread(dataset[0])
# Get the 3D CT array shape.
shape = np.array([int(ref.Rows), int(ref.Columns), len(dataset)])
# Create an empty python array to dump the CT data into.
self.pixelArray = np.zeros(shape, dtype=np.int32)
# Read array in one slice at a time.
for index,fn in enumerate(dataset):
ctSlice = dicom.dcmread(fn)
self.pixelArray[:,:,dataset.index(fn)] = ctSlice.pixel_array
# Rescale the Hounsfield Units.
self.pixelArray = (self.pixelArray*ref.RescaleSlope) + ref.RescaleIntercept
# Get current CT orientation.
self.patientPosition = ref.PatientPosition
# Python coordinate system.
self.PCS = np.array([[0,1,0],[1,0,0],[0,0,1]])
# Patient reference coordinate system (RCS).
dcmAxes = np.array(list(map(float,ref.ImageOrientationPatient)))
x = dcmAxes[:3]
y = dcmAxes[3:6]
z = np.cross(x,y)
self.RCS = np.vstack((x,y,z))
# Calculate spacing between slices as it isn't always provided.
z1 = list(map(float,ref.ImagePositionPatient))[2]
z2 = list(map(float,dicom.dcmread(dataset[-1]).ImagePositionPatient))[2]
spacingBetweenSlices = (z2-z1)/(len(dataset)-1)
# Get the pixel size.
self.pixelSize = np.append(np.array(list(map(float,ref.PixelSpacing))),spacingBetweenSlices)
# Get the top left front pixel position in the RCS (set as the centre of the voxel).
self.TLF = np.array(list(map(float,ref.ImagePositionPatient)))
# Adjust the TLF to sit on the outside corner of the voxel (to align with the expected inputs for matplotlib's extent).
self.TLF += np.sign(self.TLF)*(self.pixelSize/2)
# Construct the transformation matrix, M.
self.M = np.zeros((4,4))
self.M[:3,0] = self.pixelSize[0]*x
self.M[:3,1] = self.pixelSize[1]*y
self.M[:3,2] = self.pixelSize[2]*z
self.M[:3,3] = self.TLF
self.M[3,3] = 1
# Get the top left front and bottom right back indices for caclualting extent.
voxelIndex1 = np.array([0,0,0,1]).reshape((4,1))
voxelIndex2 = np.array([shape[0],shape[1],shape[2],1]).reshape((4,1))
# Compute the voxel indices in mm.
voxelPosition1 = self.M@voxelIndex1
voxelPosition2 = self.M@voxelIndex2
# Extent is [Left,Right,Bottom,Top,Front,Back]
_x = [voxelPosition1[0],voxelPosition2[0]]
_y = [voxelPosition2[1],voxelPosition1[1]]
_z = [voxelPosition1[2],voxelPosition2[2]]
self.extent = np.array(_x+_y+_z).reshape((6,))
# Placeholder for a view extent.
self.viewExtent = np.zeros(self.extent.shape)
# Calculate the base extent.
# self.baseExtent = np.array(sorted(_x)+sorted(_y)+sorted(_z)).reshape((6,))
# Find the (0,0,0) mm as an 'index' (float).
# self.zeroIndex = np.linalg.inv(self.M)@np.array([0,0,0,1])
# Load array onto GPU for future reference.
self.gpu.loadData(self.pixelArray)
# Create a 2d image list for plotting.
self.image = [Image2d(),Image2d()]
# Create an isocenter for treatment if desired. This must be in DICOM XYZ.
self.isocenter = None
# Set the default.
self.calculateView('AP')
def calculateView(self,view,roi=None,flatteningMethod='sum'):
""" Rotate the CT array for a new view of the dataset. """
# Make the RCS for each view.
default = np.array([[1,0,0],[0,1,0],[0,0,1]])
si = np.array([[-1,0,0],[0,1,0],[0,0,-1]])
lr = np.array([[0,0,1],[0,1,0],[-1,0,0]])
rl = np.array([[0,0,-1],[0,1,0],[1,0,0]])
ap = np.array([[1,0,0],[0,0,1],[0,-1,0]])
pa = np.array([[-1,0,0],[0,0,-1],[0,-1,0]])
# Assign matrix, m, to the view matrix and axis titles.
if view == 'SI':
RCS = si
t1 = 'SI'
t2 = 'RL'
elif view == 'IS':
RCS = default
t1 = 'IS'
t2 = 'LR'
elif view == 'LR':
RCS = lr
t1 = 'LR'
t2 = 'SI'
elif view == 'RL':
RCS = rl
t1 = 'RL'
t2 = 'IS'
elif view == 'AP':
RCS = ap
t1 = 'AP'
t2 = 'LR'
elif view == 'PA':
RCS = pa
t1 = 'PA'
t2 = 'RL'
# Calculate a transform, W, that takes us from the original CT RCS to the new RCS.
W = wcs2wcs(self.RCS,RCS)
# Rotate the CT if required.
if np.array_equal(W,np.identity(3)):
pixelArray = self.pixelArray
else:
pixelArray = self.gpu.rotate(W)
# Calculate the new extent.
# Find Origin
origin = (np.linalg.inv(self.M)@np.array([0,0,0,1]))[:3]
# Rotate the Origin
origin_rot = W@origin
# Rotate the pixel size.
pixelSize_rot = np.absolute(W@self.pixelSize)
# Find bounding box of output array.
basicBox = np.array([
[0,0,0],
[1,0,0],
[0,1,0],
[1,1,0],
[0,0,1],
[1,0,1],
[0,1,1],
[1,1,1]
])
inputShape = basicBox * self.pixelArray.shape
outputShape = np.zeros(basicBox.shape)
for index in range(8):
outputShape[index,:] = W@inputShape[index,:]
mins = np.absolute(np.amin(outputShape,axis=0))
outputShape += mins
# Calculate new origin situated in output array.
origin_new = origin_rot + mins
# Calculate new extent.
extent = np.zeros(self.extent.shape)
TLF = -origin_new * np.sum(RCS,axis=0) * pixelSize_rot
extent[::2] = TLF
extent[1::2] = TLF + np.amax(outputShape,axis=0) * np.sum(RCS,axis=0) * pixelSize_rot
# Extent is calculated as: [left, right, BOTTOM, TOP, front, back]. Swap top/bot values.
extent[2], extent[3] = extent[3], extent[2]
self.viewExtent = extent
# Calculate the view matrix.
self.viewM = np.zeros((4,4))
self.viewM[0,:3] = pixelSize_rot[0] * (np.sign(np.sum(RCS[:,0]))*np.array([1,0,0]))
self.viewM[1,:3] = pixelSize_rot[1] * (np.sign(np.sum(RCS[:,1]))*np.array([0,1,0]))
self.viewM[2,:3] = pixelSize_rot[2] * (np.sign(np.sum(RCS[:,2]))*np.array([0,0,1]))
self.viewM[:3,3] = TLF
self.viewM[3,3] = 1
if np.array_equal(roi,self.viewExtent):
# This does not work...
temporary_extent = self.viewExtent
elif type(roi) is not type(None):
# Set the view extent to the ROI.
temporary_extent = roi
# Get the array indices that match the roi.
indices = self.calculateIndices(temporary_extent)
x1,x2,y1,y2,z1,z2 = indices
# Calculate new extent based of approximate indices of input ROI.
p1 = self.viewM@np.array([x1,y1,z1,1])
p2 = self.viewM@np.array([x2,y2,z2,1])
temporary_extent = np.zeros(extent.shape)
temporary_extent[::2] = p1[:3]
temporary_extent[1::2] = p2[:3]
# Order the indices
x1,x2 = sorted([x1,x2])
y1,y2 = sorted([y1,y2])
z1,z2 = sorted([z1,z2])
# Slice the array.
pixelArray = pixelArray[y1:y2,x1:x2,z1:z2]
else:
temporary_extent = self.viewExtent
# Split up into x, y and z extents for 2D image.
x,y,z = [temporary_extent[i:i+2] for i in range(0,len(temporary_extent),2)]
# Get the first flattened image.
if flatteningMethod == 'sum': self.image[0].pixelArray = np.sum(pixelArray,axis=2)
elif flatteningMethod == 'max': self.image[0].pixelArray = np.amax(pixelArray,axis=2)
self.image[0].extent = np.array(list(x)+list(y))
self.image[0].view = { 'title':t1 }
# Get the second flattened image.
if flatteningMethod == 'sum': self.image[1].pixelArray = np.sum(pixelArray,axis=1)
elif flatteningMethod == 'max': self.image[1].pixelArray = np.amax(pixelArray,axis=1)
self.image[1].extent = np.array(list(z)+list(y))
self.image[1].view = { 'title':t2 }
# Emit a signal to say a new view has been loaded.
self.newCtView.emit()
def calculateIndices(self,extent):
""" Calculate the indices of the CT array for a given ROI. """
p1 = np.insert(extent[::2],3,1)
p2 = np.insert(extent[1::2],3,1)
i1 = (np.linalg.inv(self.viewM)@p1)[:3]
i2 = (np.linalg.inv(self.viewM)@p2)[:3]
indices = np.zeros(np.array(self.extent.shape))
indices[::2] = i1
indices[1::2] = i2
indices = list(map(int,indices))
return indices
class beamClass:
def __init__(self):
self.image = None
self.mask = None
self.maskThickness = None
self.gantry = None
self.patientSupport = None
self.collimator = None
self.pitch = None
self.roll = None
self.isocenter = None
self.BCS = None
self._arr2bcs = None
self._dcm2bcs = None
class rtplan:
def __init__(self,rtplan,ct,gpu):
"""
RCS: Reference Coordinate System (Patient)
BCS: Beam Coordinate System (Linac)
PCS: Pyhon Coordinate System (DICOM to Python)
"""
self.PCS = np.array([[0,1,0],[1,0,0],[0,0,1]])
# Firstly, read in DICOM rtplan file.
ref = dicom.dcmread(rtplan[0])
# Construct an object array of the amount of beams to be delivered.
self.beam = np.empty(ref.FractionGroupSequence[0].NumberOfBeams,dtype=object)
# Get the isocenter. Current only supports a single isocenter.
self.isocenter = np.array(list(map(float,ref.BeamSequence[0].ControlPointSequence[0].IsocenterPosition)))
# logging.info("Isocenter (DICOM) {}".format(self.isocenter))
for i in range(len(self.beam)):
# Get the appropriate data for each beam.
self.beam[i] = beamClass()
# Extract confromal mask data.
# If a block is specified for the MLC then get it.
if ref.BeamSequence[0].NumberOfBlocks > 0:
temp = np.array(list(map(float,ref.BeamSequence[i].BlockSequence[0].BlockData)))
class Mask:
x = np.append(temp[0::2],temp[0])
y = np.append(temp[1::2],temp[1])
self.beam[i].mask = Mask
self.beam[i].maskThickness = ref.BeamSequence[i].BlockSequence[0].BlockThickness
# Get the jaws position for backup.
# Get the machine positions.
self.beam[i].gantry = float(ref.BeamSequence[i].ControlPointSequence[0].GantryAngle)
self.beam[i].patientSupport = float(ref.BeamSequence[i].ControlPointSequence[0].PatientSupportAngle)
self.beam[i].collimator = float(ref.BeamSequence[i].ControlPointSequence[0].BeamLimitingDeviceAngle)
# Currently... these aren't available in treatment planning. Sad face.
self.beam[i].pitch = float(ref.BeamSequence[i].ControlPointSequence[0].TableTopPitchAngle)
self.beam[i].roll = float(ref.BeamSequence[i].ControlPointSequence[0].TableTopRollAngle)
logging.info("Gantry Rotation: {}".format(self.beam[i].gantry))
logging.info("Patient Support: {}".format(self.beam[i].patientSupport))
logging.info("Collimator Rotation: {}".format(self.beam[i].collimator))
# Linac Coordinate System w.r.t WCS.
LCS = np.array([[1,0,0],[0,0,1],[0,-1,0]])
# Beam Port Coordinate system w.r.t WCS.
BCS = np.array([[1,0,0],[0,-1,0],[0,0,-1]])
# Calculate the rotation of the bed in the LCS.
# rotations = [-self.beam[i].patientSupport,self.beam[i].roll,self.beam[i].pitch]
# axes = ['y','z','x']
# cs_bed = (LCS@activeRotation(np.identity(3),rotations,axes))@np.linalg.inv(LCS)
rotations = [self.beam[i].patientSupport]
axes = ['z']
cs_bed = (LCS@activeRotation(np.identity(3),rotations,axes))@np.linalg.inv(LCS)
# Rotate the WCS to the beam port view w.r.t the WCS.
rotations = [90]
axes = ['x']
cs_beamport = activeRotation(np.identity(3),rotations,axes)
# Rotate the gantry and collimator w.r.t to the BCS.
rotations = [self.beam[i].gantry,self.beam[i].collimator]
axes = ['y','z']
cs_linac = (BCS@activeRotation(np.identity(3),rotations,axes))@np.linalg.inv(BCS)
# Calculate the new patient coordin ate system.
# A passive rotation of the patient position w.r.t to the LCS.
temp = ct.RCS@cs_bed
# A passive rotation of the inverse beam port takes the WCS into the view of the BCS w.r.t the WCS.
temp = temp@np.linalg.inv(cs_beamport)
# A passive rotation of the BEV w.r.t the BCS.
self.beam[i].RCS = temp@cs_linac
# Calculate a transform, W, that takes anything from the ct RCS to the beam RCS.
self.beam[i].W = wcs2wcs(ct.RCS, self.beam[i].RCS)
logging.info("\nBED R:\n {}".format(cs_bed))
logging.info("\nBEAM PORT R:\n {}".format(cs_beamport))
logging.info("\nLINAC R:\n {}".format(cs_linac))
# logging.info("\nCT RCS:\n {}".format(ct.RCS))
logging.info("\nBEV RCS:\n {}".format(self.beam[i].RCS))
logging.info("\nW:\n {}".format(self.beam[i].W))
# Rotate the CT.
self.beam[i].pixelArray = gpu.rotate(self.beam[i].W)
# Calculate the new pixel size.
self.beam[i].pixelSize = np.absolute(self.beam[i].W@ct.pixelSize)
logging.info("\nPixelSize: {}".format(self.beam[i].pixelSize))
# Create the 2d projection images.
self.beam[i].image = [Image2d(),Image2d()]
# testAxes = np.absolute(self.beam[i].W)
# Find the RCS of the beam view.
testAxes = np.absolute(self.beam[i].RCS)
# Axes (x is fixed, so which ever arg is maxed means that axis is mapped onto our x fixed axis).
x = np.argmax(testAxes[:,0])
y = np.argmax(testAxes[:,1])
z = np.argmax(testAxes[:,2])
# Directions. Add +1 to axis identifiers since you can't have -0 but you can have -1...
xd = (x+1)*np.sign(self.beam[i].RCS[x,0])
yd = (y+1)*np.sign(self.beam[i].RCS[y,1])
zd = (z+1)*np.sign(self.beam[i].RCS[z,2])
# Extent.
# Axis tells us which extent modifer to take and in what order.
xe = ct.baseExtent[x*2:x*2+2][::np.sign(xd).astype(int)]
ye = ct.baseExtent[y*2:y*2+2][::np.sign(yd).astype(int)]
ze = ct.baseExtent[z*2:z*2+2][::np.sign(zd).astype(int)]
self.beam[i].extent = np.hstack((xe,ye,ze)).reshape((6,))
# Top left front.
self.beam[i].TLF = self.beam[i].extent[::2]
# Get each axis for transform M.
x = self.beam[i].RCS[0,:]
y = self.beam[i].RCS[1,:]
z = self.beam[i].RCS[2,:]
# Construct the transformation matrix, M.
self.beam[i].M = np.zeros((4,4))
self.beam[i].M[:3,0] = self.beam[i].pixelSize[0]*x
self.beam[i].M[:3,1] = self.beam[i].pixelSize[1]*y
self.beam[i].M[:3,2] = self.beam[i].pixelSize[2]*z
self.beam[i].M[:3,3] = self.beam[i].TLF
self.beam[i].M[3,3] = 1
# Calculate new isocenter position.
self.beam[i].isocenter = np.absolute(wcs2wcs(np.identity(3),self.beam[i].RCS))@self.isocenter
logging.info("\nIsocenter: {}".format(self.beam[i].isocenter))
# Flatten the 3d image to the two 2d images.
self.beam[i].image[0].pixelArray = np.sum(self.beam[i].pixelArray,axis=2)
self.beam[i].image[0].extent = np.array([self.beam[i].extent[0],self.beam[i].extent[1],self.beam[i].extent[3],self.beam[i].extent[2]])
self.beam[i].image[1].pixelArray = np.sum(self.beam[i].pixelArray,axis=1)
self.beam[i].image[1].extent = np.array([self.beam[i].extent[4],self.beam[i].extent[5],self.beam[i].extent[3],self.beam[i].extent[2]])
def getIsocenter(self,beamIndex):
return self.PCS@self.beam[beamIndex].isocenter
def activeRotation(cs,theta,axis):
"""
Active rotation of 'cs' by 'theta' about 'axis' for a Right Handed Coordinate System.
When viewed from the end of an axis, a positive rotation results in an anticlockwise direction.
When viewed from looking down the axis, a positive rotation results in an clockwise direction.
If theta = T:
T = T3 x T2 x T1 ...
If cs = P:
P' = T x P
"""
# Put angles into radians.
rotations = []
for i, _ in enumerate(theta):
t = np.deg2rad(theta[i])
if axis[i] == 'x': r = np.array([[1,0,0],[0,np.cos(t),-np.sin(t)],[0,np.sin(t),np.cos(t)]])
elif axis[i] == 'y': r = np.array([[np.cos(t),0,np.sin(t)],[0,1,0],[-np.sin(t),0,np.cos(t)]])
elif axis[i] == 'z': r = np.array([[np.cos(t),-np.sin(t),0],[np.sin(t),np.cos(t),0],[0,0,1]])
rotations.append(r)
# Calculate out the combined rotations.
m = np.identity(3)
for i, _ in enumerate(rotations):
m = m@rotations[i]
# Rotate coordinate system.
rotated_cs = m@cs
return rotated_cs
def calculateNewImageInformation(patientPosition,cs,arraySize,pixelSize,leftTopFront):
# Find which python axes the dicom axes are maximised in.
magnitudes = np.argmax(np.absolute(cs),axis=0)
sx = np.sign(cs[:,0][magnitudes[0]])
sy = np.sign(cs[:,1][magnitudes[1]])
sz = np.sign(cs[:,2][magnitudes[2]])
signs = np.array([sx,sy,sz])
# Set the labels for the patient position.
rcsLabels = np.array(['?','?','?','?','?','?'])
if patientPosition == 'HFS': rcsLabels = np.array(['P','A','R','L','I','S'])
elif patientPosition == 'HFP': rcsLabels = np.array(['A','P','R','L','I','S'])
elif patientPosition == 'FFS': rcsLabels = np.array(['P','A','L','R','S','I'])
elif patientPosition == 'FFP': rcsLabels = np.array(['A','P','L','R','S','I'])
# If magnitudes[0] = 0, then this is the DCM X axis mapped onto the python X axis.
# DCM X Axis = Right to Left (- to +).
# DCM Input for TLF corner is always assigned to (-x,-y,-z), otherwise described as (-0,-1,-2).
# The extent is then that corner + the pixelsize * arraysize * direction (from R to L, T to B, F to B).
for i in range(len(magnitudes)):
if magnitudes[i] == 0:
if signs[i] == +1:
xAxis = str(rcsLabels[0]+rcsLabels[1])
top = leftTopFront[0]
bottom = top + (pixelSize[0]*arraySize[0]*signs[i])
elif signs[i] == -1:
xAxis = str(rcsLabels[1]+rcsLabels[0])
bottom = leftTopFront[0]
top = bottom + (pixelSize[0]*arraySize[0]*signs[i])
elif magnitudes[i] == 1:
if signs[i] == +1:
yAxis = str(rcsLabels[2]+rcsLabels[3])
left = leftTopFront[1]
right = left + (pixelSize[1]*arraySize[1]*signs[i])
elif signs[i] == -1:
yAxis = str(rcsLabels[3]+rcsLabels[2])
right = leftTopFront[1]
left = right + (pixelSize[1]*arraySize[1]*signs[i])
elif magnitudes[i] == 2:
if signs[i] == +1:
zAxis = str(rcsLabels[4]+rcsLabels[5])
front = leftTopFront[2]
back = front + (pixelSize[2]*arraySize[2]*signs[i])
elif signs[i] == -1:
zAxis = str(rcsLabels[5]+rcsLabels[4])
back = leftTopFront[2]
front = back + (pixelSize[2]*arraySize[2]*signs[i])
extent = np.array([left,right,bottom,top,front,back])
labels = np.array([xAxis,yAxis,zAxis])
return extent, labels
| 36.788871
| 169
| 0.666162
|
eee36d733f56b35ea257a19a7406ae9a31da74f7
| 6,197
|
py
|
Python
|
SEIR_ImpVac.py
|
malenetxeberria/TFG-IngElec
|
1a60be3d767540e9254aa3ae0348ae0dbc669758
|
[
"MIT"
] | null | null | null |
SEIR_ImpVac.py
|
malenetxeberria/TFG-IngElec
|
1a60be3d767540e9254aa3ae0348ae0dbc669758
|
[
"MIT"
] | null | null | null |
SEIR_ImpVac.py
|
malenetxeberria/TFG-IngElec
|
1a60be3d767540e9254aa3ae0348ae0dbc669758
|
[
"MIT"
] | 1
|
2021-01-27T19:27:59.000Z
|
2021-01-27T19:27:59.000Z
|
# -*- coding: utf-8 -*-
"""
Date: 18/02/2020
Description: The SEIR epidemic model with pulse vaccination.
"""
import numpy as np
from scipy.integrate import odeint
import matplotlib
import matplotlib.pyplot as plt
import sys
# -----------------------------------------------------------------------------
# LaTex
# -----------------------------------------------------------------------------
matplotlib.rcParams['text.usetex'] = True
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
font = {'weight': 'normal', 'size': 12} # Graph number's fontsize
plt.rc('font', **font)
plt.rc('legend', fontsize=11) # Legend's fontsize
# -----------------------------------------------------------------------------
# Parameter Declaration
# -----------------------------------------------------------------------------
# Initial number of population
N0 = 4e7
# Initial proportion of susceptible, exposed, infectious and recovered ind.
S0, E0, I0, R0 = 1.0 - (1.0/N0), 0, 1.0/N0, 0
# Demographic parameters
A, mu = 0.005, 0.005
# Epidemiologic parameters
alpha, beta, gamma, sigma = 0.01, 0.57, 0.067, 0.13
# Vaccination proportion
p = 0.60
# Pulse vaccination period
T = 30
# Time interval limits
t0, tmax = 0, 1000
# Time subintervals' step number
ss = 350
# -----------------------------------------------------------------------------
# Differential equations
# -----------------------------------------------------------------------------
def deriv(y, t, A, alpha, beta, gamma, mu, sigma):
"""
Implements the differential equations in which the SEIR model is based.
Args:
y (tuple): tuple containing S, E, I and R variables
t (Numpy.ndarray): grid of time points in [t0, tmax] interval
A (float): proportion of new individuals per unit time
mu (float): natural death rate
alpha (float): disease-related death rate
beta (float): contact rate
gamma (float): recovery rate
sigma (float): inverse latent period
Returns:
dSdt (float): derivative of S in time t
dEdt (float): derivative of E in time t
dIdt (float): derivative of I in time t
dRdt (float): derivative of R in time t
"""
S, E, I, R = y
dSdt = A - (beta*I+mu)*S
dEdt = beta*S*I - (mu+sigma)*E
dIdt = sigma*E - (mu+gamma+alpha)*I
dRdt = gamma*I - mu*R
return dSdt, dEdt, dIdt, dRdt
def integrate(y0, t):
"""
Function that integrates the SEIR equations over the given time interval.
Args:
y0 (tuple): tuple containing S, E, I and R variablesi initial values
t (Numpy.ndarray): grid of time points in [t0, tmax] interval
Returns:
S (Numpy.ndarray): solution of S in [t0, tmax] interval
E (Numpy.ndarray): solution of E in [t0, tmax] interval
I (Numpy.ndarray): solution of I in [t0, tmax] interval
R (Numpy.ndarray): solution of R in [t0, tmax] interval
N (Numpy.ndarray): solution of N in [t0, tmax] interval
"""
ret = odeint(deriv, y0, t, args=(A, alpha, beta, gamma, mu, sigma))
S, E, I, R = ret.T
N = S + E + I + R
return [S, E, I, R, N]
# -----------------------------------------------------------------------------
# Integrate over the different time subintervals
# -----------------------------------------------------------------------------
# First time interval
y0 = S0, E0, I0, R0
t = np.linspace(t0, T, ss)
[S, E, I, R, N] = integrate(y0, t)
# Time interval number: IT MUST BE BIGGER THAN OR EQUAL TO 1
nf = (tmax-t0)/T
n = int(np.floor(nf))
# Middle time intervals
for i in range(1,n):
length = len(N)
S0 = (1-p)*S[length-1]
R0 = R[length-1] + p*S[length-1]
y0 = S0, E[length-1], I[length-1], R0
t = np.linspace(i*T, (i+1)*T, ss)
[subS, subE, subI, subR, subN] = integrate(y0, t)
S = np.append(S, subS)
E = np.append(E, subE)
I = np.append(I, subI)
R = np.append(R, subR)
N = np.append(N, subN)
# Last time interval
if nf!=n:
S0 = (1-p)*S[length+ss-1]
R0 = R[length+ss-1] + p*S[length+ss-1]
y0 = S0, E[length+ss-1], I[length+ss-1], R0
t = np.linspace(n*T, tmax, ss)
[subS, subE, subI, subR, subN] = integrate(y0, t)
S = np.append(S, subS)
E = np.append(E, subE)
I = np.append(I, subI)
R = np.append(R, subR)
N = np.append(N, subN)
# Whole time interval (for future plots)
length = len(N)
t = np.linspace(t0, tmax, length)
print("Infectious prop:" , I[len(I)-1])
# Critical susceptible proportion
Rep = A*beta*sigma / ( (mu)*(mu+sigma)*(mu+gamma+alpha) )
print("Reprodutive number:", Rep)
print("Inverse reproductive numer:", 1.0/Rep)
line=[]
for inst in t:
line.append(1.0/Rep)
# -----------------------------------------------------------------------------
# Plot the data
# -----------------------------------------------------------------------------
fig = plt.figure(facecolor='w',figsize=(7,4))
ax = fig.add_subplot(111, axisbelow=True)
ax.plot(t, S, 'tomato', alpha=0.6, lw=1.2, label='Susceptibles')
#ax.plot(t, E, 'tomato', alpha=0.7, lw=1.2, label='Expuestos')
ax.plot(t, I, 'r', alpha=0.8, lw=1.2, label='Infecciosos')
#ax.plot(t, R, 'grey', alpha=0.6, lw=1.2, label='Recuperados')
ax.plot(t, line, "black", alpha=0.8, linestyle="dashdot", lw=1.0, label='$S_c$')
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
ax.set_xlabel('Tiempo (días)', fontsize=13.5, labelpad=6)
ax.set_ylabel('Proporción de individuos', fontsize=13.5, labelpad=10)
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='silver', lw=0.5, ls='-')
legend = ax.legend(loc=1)
legend.get_frame().set_alpha(0.9)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
plt.savefig('422.png', dpi=600)
plt.show()
| 32.615789
| 81
| 0.512345
|
cf427f1d9c02a665d96cef051d10239cb77038b0
| 6,416
|
py
|
Python
|
lib/core/loss.py
|
lsrock1/human-pose-estimation-polar-coordinate
|
378ed6fb8ac37a90758381fdeabcc5f936ce0f60
|
[
"MIT"
] | null | null | null |
lib/core/loss.py
|
lsrock1/human-pose-estimation-polar-coordinate
|
378ed6fb8ac37a90758381fdeabcc5f936ce0f60
|
[
"MIT"
] | null | null | null |
lib/core/loss.py
|
lsrock1/human-pose-estimation-polar-coordinate
|
378ed6fb8ac37a90758381fdeabcc5f936ce0f60
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
class KeypointIOULoss(nn.Module):
def __init__(self, use_target_weight):
super().__init__()
self.use_target_weight = use_target_weight
# def forward(self, output, target, target_weight):
# # output [bs, 14, 2]
# # target same
# # target_weight [bs, 14, 1]
# target = target.float()
# inter = torch.min(output, target)
# union = output + target - inter
# losses = -torch.log((inter + 1.0) / (union + 1.0))
# # print('angle loss : ', losses[:, :, 0].sum() * 0.5, " length loss : ", losses[:, :, 1].sum() * 0.5)
# losses = losses[:, :, 0] * 0.5 + losses[:, :, 1] * 0.5
# losses.unsqueeze_(2)
# # print('pred : ', output[0])
# # print('target : ', target[0])
# if self.use_target_weight and target_weight.sum() > 0:
# return (losses * target_weight).sum() / (target_weight.sum())
# else:
# losses.mean()
# def forward(self, output, target, target_weight):
# # output [bs, 16, 361]
# index = target_weight > 0
# # output [bs * 16]
# index = index.view(-1)
# degree = F.cross_entropy(output[:, :, :361].view(-1, 361)[index, :], target[:, :, 0].view(-1).long()[index])
# length_target = target[:, :, 1].float().view(-1)[index]
# length_pred = output[:, :, 361].view(-1)[index]
# inter = torch.min(length_pred, length_target)
# union = length_pred + length_target - inter
# length = -torch.log((inter + 1.) / (union + 1.))
# length = length.sum() / index.sum()
# return degree + 0.5 + length * 0.5
def forward(self, output, target, target_weight):
# index (bs * joints)
# print('output: ', output[0])
# print('target: ', target[0])
index = target_weight > 0
index = index.view(-1)
output = output.view(-1, 3)[index, :]
target = target.view(-1, 3)[index, :].float()
angle_loss = F.mse_loss(output[:, :2], target[:, :2])
inter = torch.min(output[:, 2], target[:, 2])
union = output[:, 2] + target[:, 2] - inter
length_loss = -torch.log((inter + 1.) / (union + 1.))
length_loss = length_loss.sum() / index.sum()
# length_loss = F.mse_loss(output[:, 9], target[:, 9])
# print('angle : ', angle_loss, " length : ", length_loss)
return 0.5 * angle_loss + 0.5 * length_loss
# def forward(self, output, target, target_weight):
# # index (bs * joints)
# index = target_weight > 0
# index = index.view(-1)
# output = output.view(-1, 10)[index, :]
# target = target.view(-1, 10)[index, :].float()
# angle_loss = F.mse_loss(output[:, :9].view(-1, 3, 3)[:, :2, :2], target[:, :9].view(-1, 3, 3)[:, :2, :2])
# length_loss = F.mse_loss(output[:, 9], target[:, 9])
# return 0.5 * angle_loss + 0.5 * length_loss
# # length_loss = F.mse_loss(output[:, 9], target[:, 9])
# # print('angle : ', angle_loss, " length : ", length_loss)
# # return 0.5 * angle_loss + 0.5 * length_loss
# # length = -torch.log((inter + 1.0) / ( + 1.0))-torch.log(output[:, :, 361], target[:, :, 1].float())
class JointsMSELoss(nn.Module):
def __init__(self, use_target_weight):
super(JointsMSELoss, self).__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
def forward(self, output, target, target_weight):
target = target.float()
batch_size = output.size(0)
num_joints = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1)
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = 0
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss += 0.5 * self.criterion(
heatmap_pred.mul(target_weight[:, idx]),
heatmap_gt.mul(target_weight[:, idx])
)
else:
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
return loss / num_joints
class JointsOHKMMSELoss(nn.Module):
def __init__(self, use_target_weight, topk=8):
super(JointsOHKMMSELoss, self).__init__()
self.criterion = nn.MSELoss(reduction='none')
self.use_target_weight = use_target_weight
self.topk = topk
def ohkm(self, loss):
ohkm_loss = 0.
for i in range(loss.size()[0]):
sub_loss = loss[i]
topk_val, topk_idx = torch.topk(
sub_loss, k=self.topk, dim=0, sorted=False
)
tmp_loss = torch.gather(sub_loss, 0, topk_idx)
ohkm_loss += torch.sum(tmp_loss) / self.topk
ohkm_loss /= loss.size()[0]
return ohkm_loss
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_joints = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1)
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = []
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss.append(0.5 * self.criterion(
heatmap_pred.mul(target_weight[:, idx]),
heatmap_gt.mul(target_weight[:, idx])
))
else:
loss.append(
0.5 * self.criterion(heatmap_pred, heatmap_gt)
)
loss = [l.mean(dim=1).unsqueeze(dim=1) for l in loss]
loss = torch.cat(loss, dim=1)
return self.ohkm(loss)
| 38.884848
| 118
| 0.541771
|
0d8cad0980295b6751e3723add416e505790795a
| 6,058
|
py
|
Python
|
test/integration/component/test_asa1000v_fw.py
|
ksowmya/cloudstack-1
|
f8f779158da056be7da669884ae4ddd109cec044
|
[
"Apache-2.0"
] | 1
|
2020-03-27T22:21:20.000Z
|
2020-03-27T22:21:20.000Z
|
test/integration/component/test_asa1000v_fw.py
|
ksowmya/cloudstack-1
|
f8f779158da056be7da669884ae4ddd109cec044
|
[
"Apache-2.0"
] | null | null | null |
test/integration/component/test_asa1000v_fw.py
|
ksowmya/cloudstack-1
|
f8f779158da056be7da669884ae4ddd109cec044
|
[
"Apache-2.0"
] | 1
|
2019-12-26T07:16:06.000Z
|
2019-12-26T07:16:06.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Cisco ASA1000v external firewall
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from marvin.remoteSSHClient import remoteSSHClient
import datetime
class Services:
"""Test Cisco ASA1000v services
"""
def __init__(self):
self.services = {
"vnmc": {
"ipaddress": '10.147.28.236',
"username": 'admin',
"password": 'Password_123',
},
"asa": {
"ipaddress": '10.147.28.238',
"insideportprofile": 'asa-in123',
},
"network_offering": {
"name": 'CiscoVnmc',
"displaytext": 'CiscoVnmc',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Firewall,UserData,StaticNat',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'CiscoVnmc',
"PortForwarding": 'CiscoVnmc',
"Firewall": 'CiscoVnmc',
"UserData": 'VirtualRouter',
"StaticNat": 'CiscoVnmc',
},
},
"network": {
"name": "CiscoVnmc",
"displaytext": "CiscoVnmc",
},
}
class TestASASetup(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.apiclient = super(
TestASASetup,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
cls.network_offering = NetworkOffering.create(
cls.apiclient,
cls.services["network_offering"],
conservemode=True)
# Enable network offering
cls.network_offering.update(cls.apiclient, state='Enabled')
cls._cleanup = [
cls.network_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
# Cleanup
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.zone = get_zone(self.apiclient, self.services)
self.physicalnetworks = PhysicalNetwork.list(self.apiclient, zoneid=self.zone.id)
self.assertNotEqual(len(self.physicalnetworks), 0, "Check if the list physical network API returns a non-empty response")
self.clusters = Cluster.list(self.apiclient, hypervisor='VMware')
self.assertNotEqual(len(self.clusters), 0, "Check if the list cluster API returns a non-empty response")
self.cleanup = []
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["device", "asa"])
def test_registerVnmc(self):
Vnmc = VNMC.create(self.apiclient, self.services["vnmc"]["ipaddress"], self.services["vnmc"]["username"], self.services["vnmc"]["password"], self.physicalnetworks[0].id)
self.debug("Cisco VNMC appliance with id %s deployed"%(Vnmc.id))
VnmcList = VNMC.list(self.apiclient, physicalnetworkid = self.physicalnetworks[0].id)
self.assertNotEqual(len(VnmcList), 0, "List VNMC API returned an empty response")
Vnmc.delete(self.apiclient)
@attr(tags=["device", "asa"])
def test_registerAsa1000v(self):
Asa = ASA1000V.create(self.apiclient, self.services["asa"]["ipaddress"], self.services["asa"]["insideportprofile"], self.clusters[0].id, self.physicalnetworks[0].id)
self.debug("Cisco ASA 1000v appliance with id %s deployed"%(Asa.id))
AsaList = ASA1000V.list(self.apiclient, physicalnetworkid = self.physicalnetworks[0].id)
self.assertNotEqual(len(AsaList), 0, "List ASA 1000v API returned an empty response")
Asa.delete(self.apiclient)
| 44.218978
| 177
| 0.551172
|
10a7c6d5ad36d9a07821a4e5b70c301030e8d031
| 4,243
|
py
|
Python
|
pong-server.py
|
hsubbaraj/pong-demo
|
dd747a6e25862e4dd0e3e4c553ae31a3f388553b
|
[
"MIT"
] | null | null | null |
pong-server.py
|
hsubbaraj/pong-demo
|
dd747a6e25862e4dd0e3e4c553ae31a3f388553b
|
[
"MIT"
] | null | null | null |
pong-server.py
|
hsubbaraj/pong-demo
|
dd747a6e25862e4dd0e3e4c553ae31a3f388553b
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, absolute_import
from socketserver import ThreadingMixIn
from http.server import BaseHTTPRequestHandler, HTTPServer
import mimetypes
mimetypes.init()
import os
import requests
from datetime import datetime
import logging
import json
import sys
cur_dir = os.path.dirname(os.path.abspath(__file__))
static_dir = os.path.join(cur_dir, "static")
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
PORT = 3000
# NOTE: This is definitely not secure
def in_static_dir(file):
# make both absolute
directory = os.path.join(os.path.realpath(static_dir), '')
file = os.path.realpath(file)
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([file, directory]) == directory
class PongServer(BaseHTTPRequestHandler):
def _respond_not_found(self):
pass
# GET requests serve the corresponding file from the "static/" subdirectory
def do_GET(self):
if self.path == "/pong" or self.path == "/pong/":
self.path = "/pong/index.html"
if self.path.startswith("/pong/"):
self.path = self.path.replace("/pong/", "", 1)
local_path = os.path.abspath(os.path.join(static_dir, self.path))
logger.info("Local path: {}".format(local_path))
if not in_static_dir(local_path):
self.send_error(403, "Forbidden")
elif not os.path.exists(local_path) or not os.path.isfile(local_path):
self.send_error(404, "Not Found")
else:
with open(local_path, "rb") as f:
self.send_response(200)
mtype, encoding = mimetypes.guess_type(local_path)
self.send_header('Content-Type', mtype)
self.end_headers()
self.wfile.write(f.read())
return
def do_POST(self):
if not self.path == "/pong/predict":
self.send_error(404, "Not Found")
return
print(self.rfile)
clipper_url = "http://{}/pong/predict".format(self.server.clipper_addr)
content_length = int(self.headers['Content-Length'])
logger.info(content_length)
print(content_length)
logger.info(clipper_url)
# # Stupid workaround because Javascript's JSON.stringify will turn 1.0 into 1, which
# # Clipper's JSON parsing will parse as an integer not a double
req_json = json.loads(self.rfile.read(content_length).decode("utf-8"))
req_json["input"] = [float(i) for i in req_json["input"]]
print(req_json)
# logger.info("DATA ------------------------------------------------------------------------")
# logger.info(req_json)
logger.debug("Request JSON: {}".format(req_json))
headers = {'Content-Type': 'application/json'}
start = datetime.now()
clipper_response = requests.post(clipper_url, headers=headers, data=json.dumps(req_json))
end = datetime.now()
latency = (end - start).total_seconds() * 1000.0
logger.debug("Clipper responded with '{txt}' in {time} ms".format(
txt=clipper_response.text, time=latency))
self.send_response(clipper_response.status_code)
# Forward headers
print("Clipper responded with '{txt}' in {time} ms".format(
txt=clipper_response.text, time=latency))
print(clipper_response.headers)
print(type(clipper_response.headers))
for k, v in clipper_response.headers.items():
self.send_header(k, v)
self.end_headers()
self.wfile.write(clipper_response.text.encode())
class ThreadingServer(ThreadingMixIn, HTTPServer):
pass
def run(clipper_addr):
server_addr = ('', PORT)
logger.info("Starting Pong Server on localhost:{port}".format(port=PORT))
server = ThreadingServer(server_addr, PongServer)
server.clipper_addr = clipper_addr
server.serve_forever()
if __name__ == '__main__':
clipper_addr = sys.argv[1]
run(clipper_addr)
| 35.957627
| 102
| 0.63917
|
f5c7978a4bceb8949af73bf64e42a1743ec4117d
| 8,503
|
py
|
Python
|
app.py
|
reecestart/SessionManagerTGWControlTower
|
951c12e261ea46b9c37bfe3064878359e3b47118
|
[
"MIT"
] | null | null | null |
app.py
|
reecestart/SessionManagerTGWControlTower
|
951c12e261ea46b9c37bfe3064878359e3b47118
|
[
"MIT"
] | null | null | null |
app.py
|
reecestart/SessionManagerTGWControlTower
|
951c12e261ea46b9c37bfe3064878359e3b47118
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from enum import auto
from aws_cdk import (
aws_ec2 as ec2,
aws_rds as rds,
aws_secretsmanager as secretsmanager,
aws_iam as iam,
aws_autoscaling as autoscaling,
aws_glue as glue,
core,
)
from session_manager_tgw_control_tower.session_manager_tgw_control_tower_stack import SessionManagerTgwControlTowerStack
class SessionManagerTgwControlTowerStack(core.Stack):
def __init__(self, app: core.App, id: str, **kwargs) -> None:
super().__init__(app, id, **kwargs)
vpc = ec2.Vpc(
self, "VPC",
max_azs=3,
cidr='10.0.0.0/16',
enable_dns_hostnames=True,
enable_dns_support=True,
subnet_configuration= [
ec2.SubnetConfiguration(
name='DBSubnet',
subnet_type=ec2.SubnetType.ISOLATED,
cidr_mask=24
),
ec2.SubnetConfiguration(
name='Application-A',
subnet_type=ec2.SubnetType.PRIVATE,
cidr_mask=24
),
ec2.SubnetConfiguration(
name='Application-B',
subnet_type=ec2.SubnetType.PRIVATE,
cidr_mask=24
),
ec2.SubnetConfiguration(
name='Web',
subnet_type=ec2.SubnetType.PUBLIC,
cidr_mask=24
),
]
)
dbSecurityGroup = ec2.SecurityGroup(
self,
id= "dbSecurityGroup",
vpc=vpc,
security_group_name="DBSecurityGroup"
)
dbSubnetGroup = rds.SubnetGroup(
self, "dbSubnetGroup",
subnet_group_name="dbSubnetGroup",
vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.ISOLATED),
description="dbSubnetGroup",
vpc=vpc
)
dbPassword = secretsmanager.Secret(
self, "dbPassword",
description="dbPassword",
generate_secret_string=secretsmanager.SecretStringGenerator(
password_length=30,
secret_string_template='{"username": "dbAdmin"}',
generate_string_key="password",
exclude_characters='"@\\\/',
exclude_punctuation=True
),
secret_name="dbPassword"
)
WindowsASG = autoscaling.AutoScalingGroup(
self, "WindowsASG",
instance_type=ec2.InstanceType.of(
ec2.InstanceClass.BURSTABLE3_AMD,
ec2.InstanceSize.SMALL
),
machine_image=ec2.MachineImage.generic_windows(
ami_map={
'ap-northeast-2': 'ami-0133b1a5b9ca9be36' #Windows
}
),
vpc=vpc,
vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
desired_capacity=1,
min_capacity=1,
max_capacity=2
)
AppASG = autoscaling.AutoScalingGroup(
self, "AppASG",
instance_type=ec2.InstanceType.of(
ec2.InstanceClass.BURSTABLE3_AMD,
ec2.InstanceSize.SMALL
),
machine_image=ec2.MachineImage.generic_linux(
ami_map={
'ap-southeast-2': 'ami-044c46b1952ad5861', #RHEL
'ap-northeast-2': 'ami-07464b2b9929898f8' #AMZLNX2
}
),
vpc=vpc,
vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
user_data=ec2.UserData.custom('\n'.join([
"#!/bin/bash",
"yum install python3 -y",
"dnf install -y https://s3.ap-southeast-2.amazonaws.com/amazon-ssm-ap-southeast-2/latest/linux_amd64/amazon-ssm-agent.rpm",
"dnf install -y https://s3.ap-southeast-1.amazonaws.com/amazon-ssm-ap-southeast-1/latest/linux_amd64/amazon-ssm-agent.rpm",
"systemctl enable amazon-ssm-agent",
"systemctl start amazon-ssm-agent",
"yum install -y postgresql",
"yum install -y git",
"yum update -y",
"cd /home/ec2-user",
"DIR=\"aws-database-migration-samples\"",
"if [ ! -d \"$DIR\" ]; then",
"git clone https://github.com/aws-samples/aws-database-migration-samples.git",
"fi",
"cd aws-database-migration-samples/PostgreSQL/sampledb/v1/",
"kill -9 16673",
"dnf install python2-pip -y",
"dnf install python3-pip -y",
"pip2 --version",
"pip3 --version",
"cd /home/ec2-user",
"curl 'https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip' -o 'awscliv2.zip'",
"yum install zip unzip -y",
"unzip awscliv2.zip",
"./aws/install -i /usr/local/aws-cli -b /usr/local/bin",
"/usr/local/bin/aws --version",
"DBDETAILS=`/usr/local/bin/aws rds describe-db-instances`",
"sudo yum install jq -y",
"DBIDENTIFIER=$(echo $DBDETAILS | jq -r '.[\"DBInstances\"][0][\"DBInstanceIdentifier\"]')",
"/usr/local/bin/aws rds wait db-instance-available --db-instance-identifier $DBIDENTIFIER",
"SECRETSTRING=`/usr/local/bin/aws secretsmanager get-secret-value --secret-id dbPassword --query SecretString --output text`",
"PGPASSWORD=$(echo $SECRETSTRING | jq -r '.[\"password\"]')",
"PGUSER=$(echo $SECRETSTRING | jq -r '.[\"username\"]')",
"DBPROXY=`/usr/local/bin/aws rds describe-db-proxies`",
"PROXYENDPOINT=$(echo $DBPROXY | jq -r '.[\"DBProxies\"][0][\"Endpoint\"]')",
"PGDATABASE=$(echo $SECRETSTRING | jq -r '.[\"dbname\"]')",
"PGPORT=$(echo $SECRETSTRING | jq -r '.[\"port\"]')",
"cd /home/ec2-user",
"cd aws-database-migration-samples/PostgreSQL/sampledb/v1/",
"PGHOST=${PROXYENDPOINT} PGPORT=${PGPORT} PGDATABASE=${PGDATABASE} PGUSER=${PGUSER} PGPASSWORD=${PGPASSWORD} psql -f install-postgresql.sql"
])),
desired_capacity=1,
min_capacity=1,
max_capacity=2
)
dbSecurityGroup.connections.allow_from(
other=AppASG,
port_range=ec2.Port.tcp(5432),
description="Allow pg connection from AppInstance"
)
AppASG.role.add_managed_policy(
policy=iam.ManagedPolicy.from_aws_managed_policy_name(
managed_policy_name="AmazonSSMManagedInstanceCore"
)
)
WindowsASG.role.add_managed_policy(
policy=iam.ManagedPolicy.from_aws_managed_policy_name(
managed_policy_name="AmazonSSMManagedInstanceCore"
)
)
AppASG.role.add_managed_policy(
policy=iam.ManagedPolicy.from_aws_managed_policy_name(
managed_policy_name="AmazonRDSFullAccess"
)
)
AppASG.role.add_managed_policy(
policy=iam.ManagedPolicy.from_aws_managed_policy_name(
managed_policy_name="AmazonEC2FullAccess "
)
)
WindowsASG.role.add_managed_policy(
policy=iam.ManagedPolicy.from_aws_managed_policy_name(
managed_policy_name="AmazonEC2FullAccess "
)
)
WindowsASG.role.add_managed_policy(
policy=iam.ManagedPolicy.from_aws_managed_policy_name(
managed_policy_name="SecretsManagerReadWrite "
)
)
AppASG.role.add_managed_policy(
policy=iam.ManagedPolicy.from_aws_managed_policy_name(
managed_policy_name="SecretsManagerReadWrite"
)
)
S3Endpoint = ec2.GatewayVpcEndpointAwsService(
name="S3"
)
TransitGW = ec2.CfnTransitGateway(
self, "TransitGW",
auto_accept_shared_attachments="enable",
default_route_table_association="enable",
default_route_table_propagation="enable"
)
app = core.App()
SessionManagerTgwControlTowerStack(app, "session-manager-tgw-control-tower")
app.synth()
| 38.301802
| 156
| 0.552628
|
c006e2459a82b3a12e9d9c2bcea003fa6a6f50fb
| 15,521
|
py
|
Python
|
tools/accuracy_checker/accuracy_checker/metrics/reid.py
|
apankratovantonp/open_model_zoo
|
e372d4173e50741a6828cda415d55c37320f89cd
|
[
"Apache-2.0"
] | 5
|
2020-03-09T07:39:04.000Z
|
2021-08-16T07:17:28.000Z
|
tools/accuracy_checker/accuracy_checker/metrics/reid.py
|
ananda89/open_model_zoo
|
e372d4173e50741a6828cda415d55c37320f89cd
|
[
"Apache-2.0"
] | null | null | null |
tools/accuracy_checker/accuracy_checker/metrics/reid.py
|
ananda89/open_model_zoo
|
e372d4173e50741a6828cda415d55c37320f89cd
|
[
"Apache-2.0"
] | 3
|
2020-07-06T08:45:26.000Z
|
2020-11-12T10:14:45.000Z
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict, namedtuple
from sklearn.metrics import auc, precision_recall_curve
# noinspection PyProtectedMember
from sklearn.metrics.base import _average_binary_score
import numpy as np
from ..representation import (
ReIdentificationClassificationAnnotation,
ReIdentificationAnnotation,
ReIdentificationPrediction
)
from ..config import BaseField, BoolField, NumberField
from .metric import FullDatasetEvaluationMetric
PairDesc = namedtuple('PairDesc', 'image1 image2 same')
class CMCScore(FullDatasetEvaluationMetric):
"""
Cumulative Matching Characteristics (CMC) score.
Config:
annotation: reid annotation.
prediction: predicted embeddings.
top_k: number of k highest ranked samples to consider when matching.
separate_camera_set: should identities from the same camera view be filtered out.
single_gallery_shot: each identity has only one instance in the gallery.
number_single_shot_repeats: number of repeats for single_gallery_shot setting.
first_match_break: break on first matched gallery sample.
"""
__provider__ = 'cmc'
annotation_types = (ReIdentificationAnnotation, )
prediction_types = (ReIdentificationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'top_k': NumberField(
value_type=int, min_value=1, default=1, optional=True,
description="Number of k highest ranked samples to consider when matching."
),
'separate_camera_set': BoolField(
optional=True, default=False, description="Should identities from the same camera view be filtered out."
),
'single_gallery_shot': BoolField(
optional=True, default=False, description="Each identity has only one instance in the gallery."
),
'first_match_break': BoolField(
optional=True, default=True, description="Break on first matched gallery sample."
),
'number_single_shot_repeats': NumberField(
value_type=int, optional=True, default=10,
description="Number of repeats for single_gallery_shot setting (required for CUHK)."
)
})
return parameters
def configure(self):
self.top_k = self.get_value_from_config('top_k')
self.separate_camera_set = self.get_value_from_config('separate_camera_set')
self.single_gallery_shot = self.get_value_from_config('single_gallery_shot')
self.first_match_break = self.get_value_from_config('first_match_break')
self.number_single_shot_repeats = self.get_value_from_config('number_single_shot_repeats')
def evaluate(self, annotations, predictions):
dist_matrix = distance_matrix(annotations, predictions)
gallery_cameras, gallery_pids, query_cameras, query_pids = get_gallery_query_pids(annotations)
_cmc_score = eval_cmc(
dist_matrix, query_pids, gallery_pids, query_cameras, gallery_cameras, self.separate_camera_set,
self.single_gallery_shot, self.first_match_break, self.number_single_shot_repeats
)
return _cmc_score[self.top_k - 1]
class ReidMAP(FullDatasetEvaluationMetric):
"""
Mean Average Precision score.
Config:
annotation: reid annotation.
prediction: predicted embeddings.
interpolated_auc: should area under precision recall curve be computed using trapezoidal rule or directly.
"""
__provider__ = 'reid_map'
annotation_types = (ReIdentificationAnnotation, )
prediction_types = (ReIdentificationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'interpolated_auc': BoolField(
optional=True, default=True, description="Should area under precision recall"
" curve be computed using trapezoidal rule or directly."
)
})
return parameters
def configure(self):
self.interpolated_auc = self.get_value_from_config('interpolated_auc')
def evaluate(self, annotations, predictions):
dist_matrix = distance_matrix(annotations, predictions)
gallery_cameras, gallery_pids, query_cameras, query_pids = get_gallery_query_pids(annotations)
return eval_map(
dist_matrix, query_pids, gallery_pids, query_cameras, gallery_cameras, self.interpolated_auc
)
class PairwiseAccuracy(FullDatasetEvaluationMetric):
__provider__ = 'pairwise_accuracy'
annotation_types = (ReIdentificationClassificationAnnotation, )
prediction_types = (ReIdentificationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'min_score': BaseField(
optional=True, default='train_median',
description="Min score for determining that objects are different. "
"You can provide value or use train_median value which will be calculated "
"if annotations has training subset."
)
})
return parameters
def configure(self):
self.min_score = self.get_value_from_config('min_score')
def evaluate(self, annotations, predictions):
embed_distances, pairs = get_embedding_distances(annotations, predictions)
min_score = self.min_score
if min_score == 'train_median':
train_distances, _train_pairs = get_embedding_distances(annotations, predictions, train=True)
min_score = np.median(train_distances)
embed_same_class = embed_distances < min_score
accuracy = 0
for i, pair in enumerate(pairs):
same_label = pair.same
out_same = embed_same_class[i]
correct_prediction = same_label and out_same or (not same_label and not out_same)
if correct_prediction:
accuracy += 1
return float(accuracy) / len(pairs)
class PairwiseAccuracySubsets(FullDatasetEvaluationMetric):
__provider__ = 'pairwise_accuracy_subsets'
annotation_types = (ReIdentificationClassificationAnnotation, )
prediction_types = (ReIdentificationPrediction, )
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'subset_number': NumberField(
optional=True, min_value=1, value_type=int, default=10, description="Number of subsets for separating."
)
})
return parameters
def configure(self):
self.subset_num = self.get_value_from_config('subset_number')
self.accuracy_metric = PairwiseAccuracy(self.config, self.dataset)
def evaluate(self, annotations, predictions):
subset_results = []
first_images_annotations = list(filter(
lambda annotation: (len(annotation.negative_pairs) > 0 or len(annotation.positive_pairs) > 0), annotations
))
idx_subsets = self.make_subsets(self.subset_num, len(first_images_annotations))
for subset in range(self.subset_num):
test_subset = self.get_subset(first_images_annotations, idx_subsets[subset]['test'])
test_subset = self.mark_subset(test_subset, False)
train_subset = self.get_subset(first_images_annotations, idx_subsets[subset]['train'])
train_subset = self.mark_subset(train_subset)
subset_result = self.accuracy_metric.evaluate(test_subset+train_subset, predictions)
subset_results.append(subset_result)
return np.mean(subset_results)
@staticmethod
def make_subsets(subset_num, dataset_size):
subsets = []
if subset_num > dataset_size:
raise ValueError('It is impossible to divide dataset on more than number of annotations subsets.')
for subset in range(subset_num):
lower_bnd = subset * dataset_size // subset_num
upper_bnd = (subset + 1) * dataset_size // subset_num
subset_test = [(lower_bnd, upper_bnd)]
subset_train = [(0, lower_bnd), (upper_bnd, dataset_size)]
subsets.append({'test': subset_test, 'train': subset_train})
return subsets
@staticmethod
def mark_subset(subset_annotations, train=True):
for annotation in subset_annotations:
annotation.metadata['train'] = train
return subset_annotations
@staticmethod
def get_subset(container, subset_bounds):
subset = []
for bound in subset_bounds:
subset += container[bound[0]: bound[1]]
return subset
def extract_embeddings(annotation, prediction, query):
return np.stack([pred.embedding for pred, ann in zip(prediction, annotation) if ann.query == query])
def get_gallery_query_pids(annotation):
gallery_pids = np.asarray([ann.person_id for ann in annotation if not ann.query])
query_pids = np.asarray([ann.person_id for ann in annotation if ann.query])
gallery_cameras = np.asarray([ann.camera_id for ann in annotation if not ann.query])
query_cameras = np.asarray([ann.camera_id for ann in annotation if ann.query])
return gallery_cameras, gallery_pids, query_cameras, query_pids
def distance_matrix(annotation, prediction):
gallery_embeddings = extract_embeddings(annotation, prediction, query=False)
query_embeddings = extract_embeddings(annotation, prediction, query=True)
return 1. - np.matmul(gallery_embeddings, np.transpose(query_embeddings)).T
def unique_sample(ids_dict, num):
mask = np.zeros(num, dtype=np.bool)
for indices in ids_dict.values():
mask[np.random.choice(indices)] = True
return mask
def eval_map(distance_mat, query_ids, gallery_ids, query_cams, gallery_cams, interpolated_auc=False):
number_queries, _number_gallery = distance_mat.shape
# Sort and find correct matches
indices = np.argsort(distance_mat, axis=1)
matches = (gallery_ids[indices] == query_ids[:, np.newaxis]) # type: np.ndarray
# Compute AP for each query
average_precisions = []
for query in range(number_queries):
# Filter out the same id and same camera
valid = (gallery_ids[indices[query]] != query_ids[query]) | (gallery_cams[indices[query]] != query_cams[query])
y_true = matches[query, valid]
y_score = -distance_mat[query][indices[query]][valid]
if not np.any(y_true):
continue
average_precisions.append(binary_average_precision(y_true, y_score, interpolated_auc=interpolated_auc))
if not average_precisions:
raise RuntimeError("No valid query")
return np.mean(average_precisions)
def eval_cmc(distance_mat, query_ids, gallery_ids, query_cams, gallery_cams, separate_camera_set=False,
single_gallery_shot=False, first_match_break=False, number_single_shot_repeats=10, top_k=100):
number_queries, _number_gallery = distance_mat.shape
if not single_gallery_shot:
number_single_shot_repeats = 1
# Sort and find correct matches
indices = np.argsort(distance_mat, axis=1)
matches = gallery_ids[indices] == query_ids[:, np.newaxis] # type: np.ndarray
# Compute CMC for each query
ret = np.zeros(top_k)
num_valid_queries = 0
for query in range(number_queries):
valid = get_valid_subset(
gallery_cams, gallery_ids, query, indices, query_cams, query_ids, separate_camera_set
) # type: np.ndarray
if not np.any(matches[query, valid]):
continue
ids_dict = defaultdict(list)
if single_gallery_shot:
gallery_indexes = gallery_ids[indices[query][valid]]
for j, x in zip(np.where(valid)[0], gallery_indexes):
ids_dict[x].append(j)
for _ in range(number_single_shot_repeats):
if single_gallery_shot:
# Randomly choose one instance for each id
# required for correct validation on CUHK datasets
# http://www.ee.cuhk.edu.hk/~xgwang/CUHK_identification.html
sampled = (valid & unique_sample(ids_dict, len(valid)))
index = np.nonzero(matches[query, sampled])[0]
else:
index = np.nonzero(matches[query, valid])[0]
delta = 1. / (len(index) * number_single_shot_repeats)
for j, k in enumerate(index):
if k - j >= top_k:
break
if first_match_break:
ret[k - j] += 1
break
ret[k - j] += delta
num_valid_queries += 1
if num_valid_queries == 0:
raise RuntimeError("No valid query")
return ret.cumsum() / num_valid_queries
def get_valid_subset(gallery_cams, gallery_ids, query_index, indices, query_cams, query_ids, separate_camera_set):
# Filter out the same id and same camera
valid = (
(gallery_ids[indices[query_index]] != query_ids[query_index]) |
(gallery_cams[indices[query_index]] != query_cams[query_index])
)
if separate_camera_set:
# Filter out samples from same camera
valid &= (gallery_cams[indices[query_index]] != query_cams[query_index])
return valid
def get_embedding_distances(annotation, prediction, train=False):
image_indexes = {}
for i, pred in enumerate(prediction):
image_indexes[pred.identifier] = i
pairs = []
for image1 in annotation:
if train != image1.metadata.get("train", False):
continue
for image2 in image1.positive_pairs:
pairs.append(PairDesc(image_indexes[image1.identifier], image_indexes[image2], True))
for image2 in image1.negative_pairs:
pairs.append(PairDesc(image_indexes[image1.identifier], image_indexes[image2], False))
embed1 = np.asarray([prediction[idx].embedding for idx, _, _ in pairs])
embed2 = np.asarray([prediction[idx].embedding for _, idx, _ in pairs])
return 0.5 * (1 - np.sum(embed1 * embed2, axis=1)), pairs
def binary_average_precision(y_true, y_score, interpolated_auc=True):
def _average_precision(y_true_, y_score_, sample_weight=None):
precision, recall, _ = precision_recall_curve(y_true_, y_score_, sample_weight)
if not interpolated_auc:
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve
return -1 * np.sum(np.diff(recall) * np.array(precision)[:-1])
return auc(recall, precision)
return _average_binary_score(_average_precision, y_true, y_score, average="macro")
| 38.418317
| 120
| 0.679918
|
cef3dfcf89b4c0012921ac579cf9f2950b4180b9
| 5,535
|
py
|
Python
|
pwncat/target.py
|
Mitul16/pwncat
|
b8d7876a9779c2c7796a9a29110d3f1cda721dff
|
[
"MIT"
] | 1,454
|
2020-05-07T02:20:52.000Z
|
2022-03-31T21:32:22.000Z
|
pwncat/target.py
|
akr3ch/pwncat
|
d67865bdaac60dd0761d0698062e7b443a62c6db
|
[
"MIT"
] | 187
|
2020-05-08T06:26:01.000Z
|
2022-03-07T21:15:29.000Z
|
pwncat/target.py
|
akr3ch/pwncat
|
d67865bdaac60dd0761d0698062e7b443a62c6db
|
[
"MIT"
] | 184
|
2020-05-07T02:31:58.000Z
|
2022-03-31T09:11:59.000Z
|
"""
A target is the data structure stored in the ZODB. It contains all enumerated
facts, installed implants, unique ID, last remote address identified and other
information needed across pwncat sessions to identify or interact with a target.
No information in this object is specific to a connection protocol or session.
"""
import enum
from typing import Tuple, Optional
import persistent
import persistent.list
from BTrees.OOBTree import OOBTree
class NAT(enum.Enum):
"""Indicates the current known state of NAT on the target host"""
UNKNOWN = enum.auto()
""" We currently don't have enough information to determine if NAT is used """
ENABLED = enum.auto()
""" NAT is definitely enabled. Public/private addresses differ. """
DISABLED = enum.auto()
""" NAT is definitely disabled. Public/private addresses are identical. """
class OS(enum.Enum):
"""Describes the operating system on the target host. This is normally
set by the platform type when connecting, however may be interrogated
from the target host directly. For example, in the case of similar OS's
like Linux, Mac, and BSD, the platform may double check the OS prior to
establishing a session.
If the OS doesn't match your platform specifically, session establishment
may fail, but any details collected so far will be stored (such as addresses
and target OS information).
"""
LINUX = enum.auto()
""" A linux-based operating system """
WINDOWS = enum.auto()
""" Windows NT based operating system """
MAC = enum.auto()
""" Apple Mac OS """
BSD = enum.auto()
""" A BSD variant """
UNKNOWN = enum.auto()
""" Unknown Operatin System """
class Target(persistent.Persistent):
"""Describes collected data on a target host. This replaces the database
in previous versions of pwncat. It collects enumeration facts, system info,
persistence state, and any other contextual information stored across
instances of pwncat. Properties added to this class are automatically stored
in the ZODB database as described by your configuration.
A target is initialized with no information, and has no requirement for what
data is available. Depending on the state of the active connection (if any)
and the type of system, some information may not be available. During
construction of a new session, some information is automatically queried such
as the public address (routable IP address from attacking perspective) and port
number, internal address (IP address from perspective of target) and port,
NAT state, hostname, and a platform specific unique identifier.
"""
def __init__(self):
self.name: Optional[str] = None
""" An optional friendly name that can be used to refer to this target """
self.public_address: Optional[Tuple[str, int]] = None
""" Public address as routable by the attacker """
self.platform: str = None
""" Name of the platform used to interact with this target """
self.internal_address: Optional[Tuple[str, int]] = None
""" Internal address as viewed by the target """
self.hostname: Optional[str] = None
""" Hostname from the targets perspective """
self.guid: Optional[str] = None
""" Globally unique identifier normally determined by a platform
specific algorithm. """
self.os: OS = OS.UNKNOWN
""" Target host operating system """
self.facts: persistent.list.PersistentList = persistent.list.PersistentList()
""" List of enumerated facts about the target host """
self.enumerate_state: OOBTree = OOBTree()
""" The state of all enumeration modules which drives the module schedule """
self.tampers: persistent.list.PersistentList = persistent.list.PersistentList()
""" List of files/properties of the target that have been modified and/or created. """
self.users: persistent.list.PersistentList = persistent.list.PersistentList()
""" List of users known on the target system (may not be all-encompassing depending on access) """
self.utilities: OOBTree() = OOBTree()
""" Mapping of utility names to paths. This is mainly used on Unix platforms to identify binaries available in the path. """
self.implants: persistent.list.PersistentList = persistent.list.PersistentList()
""" List of installed implants on this target host """
@property
def nat(self) -> NAT:
"""Determine if NAT is applied for this host. This simply tests
whether the target views it's IP in the same way we do. This simply
compares the public and internal addresses to infer the state of NAT
on the target network.
"""
if self.public_address is None or self.internal_address is None:
return NAT.UNKNOWN
return (
NAT.DISABLED
if self.public_address[0] == self.internal_address[0]
else NAT.ENABLED
)
def facts_with(self, **kwargs):
"""Return a generator yielding facts which match the given properties. This is
a relatively restrictive search and the properties must match exactly. For a more
general search of facts, you can use a Python generator expression over the ``facts``
list instead."""
return (
fact
for fact in self.facts
if all(getattr(fact, k, None) == v for k, v in kwargs.items())
)
| 44.637097
| 132
| 0.685456
|
a2f01771aff7cf5b4fbd1b7f7ea0beddf92a62d0
| 549
|
py
|
Python
|
bslint/messages/handler.py
|
alexmakii/bslint
|
0795467166ca10c362fecc12ac17765cb85b659b
|
[
"BSD-3-Clause"
] | null | null | null |
bslint/messages/handler.py
|
alexmakii/bslint
|
0795467166ca10c362fecc12ac17765cb85b659b
|
[
"BSD-3-Clause"
] | null | null | null |
bslint/messages/handler.py
|
alexmakii/bslint
|
0795467166ca10c362fecc12ac17765cb85b659b
|
[
"BSD-3-Clause"
] | 1
|
2017-04-12T09:39:54.000Z
|
2017-04-12T09:39:54.000Z
|
import bslint.messages.error_constants as err_const
import bslint.messages.print_constants as print_const
def get_error_msg(key, params=None):
return get_message(key, err_const.MESSAGE_TABLE, params, "")
def get_print_msg(key, params=None):
return get_message(key, print_const.MESSAGE_TABLE, params, "\n")
def get_message(key, message_table, params, extra_chars):
params = params or []
if key not in message_table:
raise ValueError(err_const.NO_SUCH_KEY)
return message_table[key].get_message(params) + extra_chars
| 30.5
| 68
| 0.765027
|
4d0f61d7894d643dc577190297c341ac36ea806e
| 36,172
|
py
|
Python
|
flexget/manager.py
|
tvcsantos/Flexget
|
e08ce2957dd4f0668911d1e56347369939e4d0a5
|
[
"MIT"
] | null | null | null |
flexget/manager.py
|
tvcsantos/Flexget
|
e08ce2957dd4f0668911d1e56347369939e4d0a5
|
[
"MIT"
] | null | null | null |
flexget/manager.py
|
tvcsantos/Flexget
|
e08ce2957dd4f0668911d1e56347369939e4d0a5
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import atexit
import codecs
import copy
import fnmatch
import logging
import os
import shutil
import signal
import sys
import threading
from contextlib import contextmanager
from datetime import datetime, timedelta
import pkg_resources
import sqlalchemy
import yaml
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import SingletonThreadPool
# These need to be declared before we start importing from other flexget modules, since they might import them
from flexget.utils.sqlalchemy_utils import ContextSession
Base = declarative_base()
Session = sessionmaker(class_=ContextSession)
from flexget import config_schema, db_schema, logger, plugin
from flexget.event import fire_event
from flexget.ipc import IPCClient, IPCServer
from flexget.options import CoreArgumentParser, get_parser, manager_parser, ParserError, unicode_argv
from flexget.task import Task
from flexget.task_queue import TaskQueue
from flexget.utils.tools import pid_exists
log = logging.getLogger('manager')
manager = None
DB_CLEANUP_INTERVAL = timedelta(days=7)
@sqlalchemy.event.listens_for(Session, 'before_commit')
def before_commit(session):
if not manager.has_lock and session.dirty:
log.debug('BUG?: Database writes should not be tried when there is no database lock.')
@sqlalchemy.event.listens_for(sqlalchemy.engine.Engine, 'connect')
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
# There were reported db problems with WAL mode on XFS filesystem, which is sticky and may have been turned
# on with certain FlexGet versions (e2c118e) #2749
cursor.execute('PRAGMA journal_mode=delete')
cursor.close()
class Manager(object):
"""Manager class for FlexGet
Fires events:
* manager.initialize
The first time the manager is initialized, before config is loaded
* manager.before_config_load
Before the config file is loaded from disk
* manager.before_config_validate
When updating the config, before the validator is run on it
* manager.config_updated
After a configuration file has been loaded or changed (and validated) this event is fired
* manager.startup
After manager has been initialized. This is when application becomes ready to use, however no database lock is
present, so the database must not be modified on this event.
* manager.lock_acquired
The manager does not always require a lock on startup, if one is requested, this event will run when it has been
acquired successfully
* manager.upgrade
If any plugins have declared a newer schema version than exists in the database, this event will be fired to
allow plugins to upgrade their tables
* manager.shutdown
When the manager is exiting
* manager.execute.completed
If execution in current process was completed
* manager.daemon.started
* manager.daemon.completed
* manager.db_cleanup
"""
unit_test = False
options = None
def __init__(self, args):
"""
:param args: CLI args
"""
global manager
assert not manager, 'Only one instance of Manager should be created at a time!'
if args is None:
# Decode all arguments to unicode before parsing
args = unicode_argv()[1:]
self.args = args
self.config_base = None
self.config_name = None
self.config_path = None
self.db_filename = None
self.engine = None
self.lockfile = None
self.database_uri = None
self.db_upgraded = False
self._has_lock = False
self.is_daemon = False
self.ipc_server = None
self.task_queue = None
self.persist = None
self.initialized = False
self.config = {}
try:
self.options, extra = CoreArgumentParser().parse_known_args(args)
except ParserError:
# If a non-built-in command was used, we need to parse with a parser that doesn't define the subparsers
self.options, extra = manager_parser.parse_known_args(args)
try:
self.find_config(create=False)
except:
logger.start(level=self.options.loglevel.upper(), to_file=False)
raise
else:
log_file = os.path.expanduser(self.options.logfile)
# If an absolute path is not specified, use the config directory.
if not os.path.isabs(log_file):
log_file = os.path.join(self.config_base, log_file)
logger.start(log_file, self.options.loglevel.upper(), to_console=not self.options.cron)
manager = self
log.debug('sys.defaultencoding: %s' % sys.getdefaultencoding())
log.debug('sys.getfilesystemencoding: %s' % sys.getfilesystemencoding())
log.debug('os.path.supports_unicode_filenames: %s' % os.path.supports_unicode_filenames)
if codecs.lookup(sys.getfilesystemencoding()).name == 'ascii' and not os.path.supports_unicode_filenames:
log.warning('Your locale declares ascii as the filesystem encoding. Any plugins reading filenames from '
'disk will not work properly for filenames containing non-ascii characters. Make sure your '
'locale env variables are set up correctly for the environment which is launching FlexGet.')
def __del__(self):
global manager
manager = None
def initialize(self):
"""
Load plugins, database, and config. Also initializes (but does not start) the task queue and ipc server.
This should only be called after obtaining a lock.
"""
if self.initialized:
raise RuntimeError('Cannot call initialize on an already initialized manager.')
plugin.load_plugins()
# Reparse CLI options now that plugins are loaded
self.options = get_parser().parse_args(self.args)
self.task_queue = TaskQueue()
self.ipc_server = IPCServer(self, self.options.ipc_port)
self.setup_yaml()
self.init_sqlalchemy()
fire_event('manager.initialize', self)
try:
self.load_config()
except ValueError as e:
log.critical('Failed to load config file: %s' % e.args[0])
raise
# cannot be imported at module level because of circular references
from flexget.utils.simple_persistence import SimplePersistence
self.persist = SimplePersistence('manager')
if db_schema.upgrade_required():
log.info('Database upgrade is required. Attempting now.')
fire_event('manager.upgrade', self)
if manager.db_upgraded:
fire_event('manager.db_upgraded', self)
fire_event('manager.startup', self)
self.initialized = True
@property
def tasks(self):
"""A list of tasks in the config"""
if not self.config:
return []
return self.config.get('tasks', {}).keys()
@property
def has_lock(self):
return self._has_lock
def execute(self, options=None, output=None, priority=1):
"""
Run all (can be limited with options) tasks from the config.
:param options: Either an :class:`argparse.Namespace` instance, or a dict, containing options for execution
:param output: If a file-like object is specified here, log messages and stdout from the execution will be
written to it.
:param priority: If there are other executions waiting to be run, they will be run in priority order,
lowest first.
:returns: a list of :class:`threading.Event` instances which will be
set when each respective task has finished running
"""
if options is None:
options = copy.copy(self.options.execute)
elif isinstance(options, dict):
options_namespace = copy.copy(self.options.execute)
options_namespace.__dict__.update(options)
options = options_namespace
task_names = self.tasks
# Handle --tasks
if options.tasks:
# Create list of tasks to run, preserving order
task_names = []
for arg in options.tasks:
matches = [t for t in self.tasks if fnmatch.fnmatchcase(unicode(t).lower(), arg.lower())]
if not matches:
msg = '`%s` does not match any tasks' % arg
log.error(msg)
if output:
output.write(msg)
continue
task_names.extend(m for m in matches if m not in task_names)
# Set the option as a list of matching task names so plugins can use it easily
options.tasks = task_names
# TODO: 1.2 This is a hack to make task priorities work still, not sure if it's the best one
task_names = sorted(task_names, key=lambda t: self.config['tasks'][t].get('priority', 65535))
finished_events = []
for task_name in task_names:
task = Task(self, task_name, options=options, output=output, priority=priority)
self.task_queue.put(task)
finished_events.append(task.finished_event)
return finished_events
def start(self):
"""
Starting point when executing from commandline, dispatch execution to correct destination.
If there is a FlexGet process with an ipc server already running, the command will be sent there for execution
and results will be streamed back.
If not, this will attempt to obtain a lock, initialize the manager, and run the command here.
"""
# If another process is started, send the execution to the running process
ipc_info = self.check_ipc_info()
if ipc_info:
try:
log.info('There is a FlexGet process already running for this config, sending execution there.')
client = IPCClient(ipc_info['port'], ipc_info['password'])
except ValueError as e:
log.error(e)
else:
try:
client.handle_cli(self.args)
except KeyboardInterrupt:
log.error('Disconnecting from daemon due to ctrl-c. Executions will still continue in the '
'background.')
except EOFError:
log.error('Connection from daemon was severed.')
return
# No running process, we start our own to handle command
with self.acquire_lock():
self.initialize()
self.handle_cli()
self._shutdown()
def handle_cli(self, options=None):
"""
Dispatch a cli command to the appropriate function.
* :meth:`.execute_command`
* :meth:`.daemon_command`
* :meth:`.webui_command`
* CLI plugin callback function
The manager should have a lock and be initialized before calling this method.
:param options: argparse options for command. Defaults to options that manager was instantiated with.
"""
if not options:
options = self.options
command = options.cli_command
options = getattr(options, command)
# First check for built-in commands
if command in ['execute', 'daemon', 'webui']:
if command == 'execute':
self.execute_command(options)
elif command == 'daemon':
self.daemon_command(options)
elif command == 'webui':
self.webui_command(options)
else:
# Otherwise dispatch the command to the callback function
options.cli_command_callback(self, options)
def execute_command(self, options):
"""
Handles the 'execute' CLI command.
If there is already a task queue running in this process, adds the execution to the queue.
If FlexGet is being invoked with this command, starts up a task queue and runs the execution.
Fires events:
* manager.execute.started
* manager.execute.completed
:param options: argparse options
"""
fire_event('manager.execute.started', self, options)
if self.task_queue.is_alive():
if len(self.task_queue):
log.verbose('There is a task already running, execution queued.')
finished_events = self.execute(options, output=logger.get_output())
if not options.cron:
# Wait until execution of all tasks has finished
for event in finished_events:
event.wait()
else:
self.task_queue.start()
self.ipc_server.start()
self.execute(options)
self.shutdown(finish_queue=True)
self.task_queue.wait()
fire_event('manager.execute.completed', self, options)
def daemon_command(self, options):
"""
Handles the 'daemon' CLI command.
Fires events:
* manager.daemon.started
* manager.daemon.completed
:param options: argparse options
"""
if options.action == 'start':
if self.is_daemon:
log.error('Daemon already running for this config.')
return
if options.daemonize:
self.daemonize()
try:
signal.signal(signal.SIGTERM, self._handle_sigterm)
except ValueError as e:
# If flexget is being called from another script, e.g. windows service helper, and we are not the
# main thread, this error will occur.
log.debug('Error registering sigterm handler: %s' % e)
self.is_daemon = True
fire_event('manager.daemon.started', self)
self.task_queue.start()
self.ipc_server.start()
self.task_queue.wait()
fire_event('manager.daemon.completed', self)
elif options.action in ['stop', 'reload', 'status']:
if not self.is_daemon:
log.error('There does not appear to be a daemon running.')
return
if options.action == 'status':
log.info('Daemon running. (PID: %s)' % os.getpid())
elif options.action == 'stop':
self.shutdown(options.wait)
elif options.action == 'reload':
log.info('Reloading config from disk.')
try:
self.load_config()
except ValueError as e:
log.error('Error loading config: %s' % e.args[0])
else:
log.info('Config successfully reloaded from disk.')
def webui_command(self, options):
"""
Handles the 'webui' CLI command.
:param options: argparse options
"""
if self.is_daemon:
log.error('Webui or daemon is already running.')
return
# TODO: make webui an enablable plugin in regular daemon mode
try:
pkg_resources.require('flexget[webui]')
except pkg_resources.DistributionNotFound as e:
log.error('Dependency not met. %s' % e)
log.error('Webui dependencies not installed. You can use `pip install flexget[webui]` to install them.')
self.shutdown()
return
if options.daemonize:
self.daemonize()
self.is_daemon = True
from flexget.ui import webui
self.task_queue.start()
self.ipc_server.start()
webui.start(self)
self.task_queue.wait()
def _handle_sigterm(self, signum, frame):
log.info('Got SIGTERM. Shutting down.')
self.shutdown(finish_queue=False)
def setup_yaml(self):
"""Sets up the yaml loader to return unicode objects for strings by default"""
def construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
return self.construct_scalar(node)
yaml.Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
# Set up the dumper to not tag every string with !!python/unicode
def unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
return node
yaml.add_representer(unicode, unicode_representer)
# Set up the dumper to increase the indent for lists
def increase_indent_wrapper(func):
def increase_indent(self, flow=False, indentless=False):
func(self, flow, False)
return increase_indent
yaml.Dumper.increase_indent = increase_indent_wrapper(yaml.Dumper.increase_indent)
yaml.SafeDumper.increase_indent = increase_indent_wrapper(yaml.SafeDumper.increase_indent)
def find_config(self, create=False):
"""
Find the configuration file.
:param bool create: If a config file is not found, and create is True, one will be created in the home folder
:raises: `IOError` when no config file could be found, and `create` is False.
"""
config = None
home_path = os.path.join(os.path.expanduser('~'), '.flexget')
options_config = os.path.expanduser(self.options.config)
possible = []
if os.path.isabs(options_config):
# explicit path given, don't try anything
config = options_config
possible = [config]
else:
log.debug('Figuring out config load paths')
try:
possible.append(os.getcwdu())
except OSError:
log.debug('current directory invalid, not searching for config there')
# for virtualenv / dev sandbox
if hasattr(sys, 'real_prefix'):
log.debug('Adding virtualenv path')
possible.append(sys.prefix.decode(sys.getfilesystemencoding()))
# normal lookup locations
possible.append(home_path)
if sys.platform.startswith('win'):
# On windows look in ~/flexget as well, as explorer does not let you create a folder starting with a dot
home_path = os.path.join(os.path.expanduser('~'), 'flexget')
possible.append(home_path)
else:
# The freedesktop.org standard config location
xdg_config = os.environ.get('XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config'))
possible.append(os.path.join(xdg_config, 'flexget'))
for path in possible:
config = os.path.join(path, options_config)
if os.path.exists(config):
log.debug('Found config: %s' % config)
break
else:
config = None
if create and not (config and os.path.exists(config)):
config = os.path.join(home_path, options_config)
log.info('Config file %s not found. Creating new config %s' % (options_config, config))
with open(config, 'w') as newconfig:
# Write empty tasks to the config
newconfig.write(yaml.dump({'tasks': {}}))
elif not config:
log.critical('Failed to find configuration file %s' % options_config)
log.info('Tried to read from: %s' % ', '.join(possible))
raise IOError('No configuration file found.')
if not os.path.isfile(config):
raise IOError('Config `%s` does not appear to be a file.' % config)
log.debug('Config file %s selected' % config)
self.config_path = config
self.config_name = os.path.splitext(os.path.basename(config))[0]
self.config_base = os.path.normpath(os.path.dirname(config))
self.lockfile = os.path.join(self.config_base, '.%s-lock' % self.config_name)
def load_config(self):
"""
Loads the config file from disk, validates and activates it.
:raises: `ValueError` if there is a problem loading the config file
"""
fire_event('manager.before_config_load', self)
with codecs.open(self.config_path, 'rb', 'utf-8') as f:
try:
raw_config = f.read()
except UnicodeDecodeError:
log.critical('Config file must be UTF-8 encoded.')
raise ValueError('Config file is not UTF-8 encoded')
try:
config = yaml.safe_load(raw_config) or {}
except Exception as e:
msg = str(e).replace('\n', ' ')
msg = ' '.join(msg.split())
log.critical(msg, exc_info=False)
print('')
print('-' * 79)
print(' Malformed configuration file (check messages above). Common reasons:')
print('-' * 79)
print('')
print(' o Indentation error')
print(' o Missing : from end of the line')
print(' o Non ASCII characters (use UTF8)')
print(' o If text contains any of :[]{}% characters it must be single-quoted ' \
'(eg. value{1} should be \'value{1}\')\n')
# Not very good practice but we get several kind of exceptions here, I'm not even sure all of them
# At least: ReaderError, YmlScannerError (or something like that)
if hasattr(e, 'problem') and hasattr(e, 'context_mark') and hasattr(e, 'problem_mark'):
lines = 0
if e.problem is not None:
print(' Reason: %s\n' % e.problem)
if e.problem == 'mapping values are not allowed here':
print(' ----> MOST LIKELY REASON: Missing : from end of the line!')
print('')
if e.context_mark is not None:
print(' Check configuration near line %s, column %s' % (e.context_mark.line, e.context_mark.column))
lines += 1
if e.problem_mark is not None:
print(' Check configuration near line %s, column %s' % (e.problem_mark.line, e.problem_mark.column))
lines += 1
if lines:
print('')
if lines == 1:
print(' Fault is almost always in this or previous line\n')
if lines == 2:
print(' Fault is almost always in one of these lines or previous ones\n')
# When --debug escalate to full stacktrace
if self.options.debug:
raise
raise ValueError('Config file is not valid YAML')
# config loaded successfully
log.debug('config_name: %s' % self.config_name)
log.debug('config_base: %s' % self.config_base)
# Install the newly loaded config
self.update_config(config)
def update_config(self, config):
"""
Provide a new config for the manager to use.
:raises: `ValueError` and rolls back to previous config if the provided config is not valid.
"""
old_config = self.config
try:
self.config = self.validate_config(config)
except ValueError as e:
for error in getattr(e, 'errors', []):
log.critical('[%s] %s', error.json_pointer, error.message)
log.debug('invalid config, rolling back')
self.config = old_config
raise
log.debug('New config data loaded.')
fire_event('manager.config_updated', self)
def save_config(self):
"""Dumps current config to yaml config file"""
# Back up the user's current config before overwriting
backup_path = os.path.join(self.config_base,
'%s-%s.bak' % (self.config_name, datetime.now().strftime('%y%m%d%H%M%S')))
log.debug('backing up old config to %s before new save' % backup_path)
shutil.copy(self.config_path, backup_path)
with open(self.config_path, 'w') as config_file:
config_file.write(yaml.dump(self.config, default_flow_style=False))
def config_changed(self):
"""Makes sure that all tasks will have the config_modified flag come out true on the next run.
Useful when changing the db and all tasks need to be completely reprocessed."""
from flexget.task import config_changed
for task in self.tasks:
config_changed(task)
def validate_config(self, config=None):
"""
Check all root level keywords are valid. Config may be modified by before_config_validate hooks. Modified
config will be returned.
:param config: Config to check. If not provided, current manager config will be checked.
:raises: `ValueError` when config fails validation. There will be an `errors` attribute with the schema errors.
:returns: Final validated config.
"""
if not config:
config = self.config
config = fire_event('manager.before_config_validate', config, self)
errors = config_schema.process_config(config)
if errors:
err = ValueError('Did not pass schema validation.')
err.errors = errors
raise err
else:
return config
def init_sqlalchemy(self):
"""Initialize SQLAlchemy"""
try:
if [int(part) for part in sqlalchemy.__version__.split('.')] < [0, 7, 0]:
print('FATAL: SQLAlchemy 0.7.0 or newer required. Please upgrade your SQLAlchemy.', file=sys.stderr)
sys.exit(1)
except ValueError as e:
log.critical('Failed to check SQLAlchemy version, you may need to upgrade it')
# SQLAlchemy
if self.database_uri is None:
self.db_filename = os.path.join(self.config_base, 'db-%s.sqlite' % self.config_name)
if self.options.test:
db_test_filename = os.path.join(self.config_base, 'test-%s.sqlite' % self.config_name)
log.info('Test mode, creating a copy from database ...')
if os.path.exists(self.db_filename):
shutil.copy(self.db_filename, db_test_filename)
self.db_filename = db_test_filename
# Different database, different lock file
self.lockfile = os.path.join(self.config_base, '.test-%s-lock' % self.config_name)
log.info('Test database created')
# in case running on windows, needs double \\
filename = self.db_filename.replace('\\', '\\\\')
self.database_uri = 'sqlite:///%s' % filename
if self.db_filename and not os.path.exists(self.db_filename):
log.verbose('Creating new database %s ...' % self.db_filename)
# fire up the engine
log.debug('Connecting to: %s' % self.database_uri)
try:
self.engine = sqlalchemy.create_engine(self.database_uri,
echo=self.options.debug_sql,
poolclass=SingletonThreadPool,
connect_args={'check_same_thread': False, 'timeout': 10})
except ImportError:
print('FATAL: Unable to use SQLite. Are you running Python 2.5 - 2.7 ?\n'
'Python should normally have SQLite support built in.\n'
'If you\'re running correct version of Python then it is not equipped with SQLite.\n'
'You can try installing `pysqlite`. If you have compiled python yourself, '
'recompile it with SQLite support.', file=sys.stderr)
sys.exit(1)
Session.configure(bind=self.engine)
# create all tables, doesn't do anything to existing tables
try:
Base.metadata.create_all(bind=self.engine)
except OperationalError as e:
if os.path.exists(self.db_filename):
print('%s - make sure you have write permissions to file %s' %
(e.message, self.db_filename), file=sys.stderr)
else:
print('%s - make sure you have write permissions to directory %s' %
(e.message, self.config_base), file=sys.stderr)
raise
def _read_lock(self):
"""
Read the values from the lock file. Returns None if there is no current lock file.
"""
if self.lockfile and os.path.exists(self.lockfile):
result = {}
with open(self.lockfile) as f:
lines = [l for l in f.readlines() if l]
for line in lines:
try:
key, value = line.split(b':', 1)
except ValueError:
log.debug('Invalid line in lock file: %s' % line)
continue
result[key.strip().lower()] = value.strip()
for key in result:
if result[key].isdigit():
result[key] = int(result[key])
result.setdefault('pid', None)
if not result['pid']:
log.error('Invalid lock file. Make sure FlexGet is not running, then delete it.')
elif not pid_exists(result['pid']):
return None
return result
return None
def check_lock(self):
"""Returns True if there is a lock on the database."""
lock_info = self._read_lock()
if not lock_info:
return False
# Don't count it if we hold the lock
if os.getpid() == lock_info['pid']:
return False
return True
def check_ipc_info(self):
"""If a daemon has a lock on the database, return info to connect to IPC."""
lock_info = self._read_lock()
if lock_info and 'port' in lock_info:
return lock_info
return None
@contextmanager
def acquire_lock(self, event=True):
"""
:param bool event: If True, the 'manager.lock_acquired' event will be fired after a lock is obtained
"""
acquired = False
try:
# Don't do anything if we already have a lock. This means only the outermost call will release the lock file
if not self._has_lock:
# Exit if there is an existing lock.
if self.check_lock():
with open(self.lockfile) as f:
pid = f.read()
print('Another process (%s) is running, will exit.' % pid.split('\n')[0], file=sys.stderr)
print('If you\'re sure there is no other instance running, delete %s' % self.lockfile,
file=sys.stderr)
sys.exit(1)
self._has_lock = True
self.write_lock()
acquired = True
if event:
fire_event('manager.lock_acquired', self)
yield
finally:
if acquired:
self.release_lock()
self._has_lock = False
def write_lock(self, ipc_info=None):
assert self._has_lock
with open(self.lockfile, 'w') as f:
f.write(b'PID: %s\n' % os.getpid())
if ipc_info:
for key in sorted(ipc_info):
f.write(b'%s: %s\n' % (key, ipc_info[key]))
def release_lock(self):
if os.path.exists(self.lockfile):
os.remove(self.lockfile)
log.debug('Removed %s' % self.lockfile)
else:
log.debug('Lockfile %s not found' % self.lockfile)
def daemonize(self):
"""Daemonizes the current process. Returns the new pid"""
if sys.platform.startswith('win'):
log.error('Cannot daemonize on windows')
return
if threading.activeCount() != 1:
log.critical('There are %r active threads. '
'Daemonizing now may cause strange failures.' % threading.enumerate())
log.info('Daemonizing...')
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
log.info('Daemonize complete. New PID: %s' % os.getpid())
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file('/dev/null', 'r')
so = file('/dev/null', 'a+')
se = file('/dev/null', 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# If we have a lock, update the lock file with our new pid
if self._has_lock:
self.write_lock()
def db_cleanup(self, force=False):
"""
Perform database cleanup if cleanup interval has been met.
Fires events:
* manager.db_cleanup
If interval was met. Gives session to do the cleanup as a parameter.
:param bool force: Run the cleanup no matter whether the interval has been met.
"""
expired = self.persist.get('last_cleanup', datetime(1900, 1, 1)) < datetime.now() - DB_CLEANUP_INTERVAL
if force or expired:
log.info('Running database cleanup.')
session = Session()
try:
fire_event('manager.db_cleanup', session)
session.commit()
finally:
session.close()
# Just in case some plugin was overzealous in its cleaning, mark the config changed
self.config_changed()
self.persist['last_cleanup'] = datetime.now()
else:
log.debug('Not running db cleanup, last run %s' % self.persist.get('last_cleanup'))
def shutdown(self, finish_queue=True):
"""
Request manager shutdown.
:param bool finish_queue: Should scheduler finish the task queue
"""
if not self.initialized:
raise RuntimeError('Cannot shutdown manager that was never initialized.')
self.task_queue.shutdown(finish_queue)
def _shutdown(self):
"""Runs when the manager is done processing everything."""
fire_event('manager.shutdown', self)
if not self.unit_test: # don't scroll "nosetests" summary results when logging is enabled
log.debug('Shutting down')
self.engine.dispose()
# remove temporary database used in test mode
if self.options.test:
if not 'test' in self.db_filename:
raise Exception('trying to delete non test database?')
if self._has_lock:
os.remove(self.db_filename)
log.info('Removed test database')
if not self.unit_test: # don't scroll "nosetests" summary results when logging is enabled
log.debug('Shutdown completed')
| 40.780158
| 120
| 0.595488
|
c37ed6aec9858119f329b590a81bb91d689fd540
| 3,337
|
py
|
Python
|
cloudframe/manager/builder.py
|
cloudken/faas-build
|
ab72151c7a518377f368c10136ecadb3e86430b7
|
[
"Apache-2.0"
] | null | null | null |
cloudframe/manager/builder.py
|
cloudken/faas-build
|
ab72151c7a518377f368c10136ecadb3e86430b7
|
[
"Apache-2.0"
] | null | null | null |
cloudframe/manager/builder.py
|
cloudken/faas-build
|
ab72151c7a518377f368c10136ecadb3e86430b7
|
[
"Apache-2.0"
] | null | null | null |
from six.moves import http_client
from datetime import datetime
import logging
import os
from cloudframe.common import exception
from cloudframe.common.config import HostConfig
from cloudframe.common.config import get_faas_buildinfo
from cloudframe.common.config import FAAS_CONFIG_PATH
from cloudframe.common.http_rpc import HRPC
from cloudframe.pipeline.ans_docker import Faas
LOG = logging.getLogger(__name__)
MAX_INS = 20
RES_STATUS_DONE = 'done'
RES_STATUS_INIT = 'initializing'
RES_STATUS_DOING = 'doing'
RES_STATUS_ERROR = 'error'
class FaaSBuilder(object):
def __init__(self, host_config, base_package):
config_file = FAAS_CONFIG_PATH + host_config
base_file = FAAS_CONFIG_PATH + base_package
self.isReady = False
if not (os.path.isfile(config_file) and os.path.isfile(base_file)):
LOG.error('FaaSBuilder: config %(cnf)s or package %(pkg)s is invalid.',
{'cnf': config_file, 'pkg': base_file})
return
try:
hc = HostConfig(config_file)
rv = hc.get_host_info()
self.hosts = rv[0]
self.host_global = rv[1]
self.driver = Faas(self.hosts, self.host_global['registry'], base_package)
self.pipelines = {}
LOG.debug('---- config info ----')
LOG.debug('---- global: %(global)s', {'global': self.host_global})
for host in self.hosts:
LOG.debug('---- host: %(host)s', {'host': host})
self.isReady = True
except Exception as e:
LOG.error('Read host_config(%(config)s) failed, error_info: %(error)s',
{'config': config_file, 'error': e})
def get(self, res_name):
if not self.isReady:
raise exception.HttpError
if res_name not in self.pipelines:
raise exception.NotFound
return http_client.OK, self.pipelines[res_name]
def create_pipeline(self, info):
if not self.isReady:
raise exception.HttpError
res_name = info['res_name']
faas_desc = info['faas_desc']
faas_pkg = info['faas_pkg']
LOG.debug('Pipeline for %(res)s begin...', {'res': res_name})
resource = {
'name': res_name,
'package': faas_pkg,
'created_at': datetime.now(),
'status': RES_STATUS_INIT
}
self.pipelines[res_name] = resource
try:
faas_input = get_faas_buildinfo(faas_desc, resource)
resource['status'] = RES_STATUS_DOING
LOG.debug('Get description end, %(res)s', {'res': resource})
self.driver.create(resource)
host = os.environ['FAAS_API_SERVER']
rpc = HRPC(host, '/serverless/v1/faas')
rpc.put(faas_input)
resource['status'] = RES_STATUS_DONE
resource['finished_at'] = datetime.now()
LOG.debug('Pipeline for %(res_name)s end.', {'res_name': res_name})
LOG.debug('Resource info: %(res)s', {'res': resource})
except Exception as e:
resource['status'] = RES_STATUS_ERROR
resource['finished_at'] = datetime.now()
LOG.error('Pipeline for %(res_name)s failed, error info: %(err)s',
{'res_name': res_name, 'err': e})
| 38.356322
| 86
| 0.603536
|
6208aedca6f392e06035f826e6922af4bd68bf3a
| 3,207
|
py
|
Python
|
requests/exceptions.py
|
hwms/requests
|
fa3fabc0732980230bae00bd8a30098f0a0bd30f
|
[
"Apache-2.0"
] | null | null | null |
requests/exceptions.py
|
hwms/requests
|
fa3fabc0732980230bae00bd8a30098f0a0bd30f
|
[
"Apache-2.0"
] | 3
|
2020-03-24T18:09:12.000Z
|
2021-02-02T22:28:25.000Z
|
requests/exceptions.py
|
hwms/requests
|
fa3fabc0732980230bae00bd8a30098f0a0bd30f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request.
"""
def __init__(self, *args, **kwargs):
"""Initialize RequestException with `request` and `response` objects."""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException): #@ReservedAssignment
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
"""The URL provided was somehow invalid."""
class InvalidHeader(RequestException, ValueError):
"""The header value provided was somehow invalid."""
class InvalidProxyURL(InvalidURL):
"""The proxy URL provided is invalid."""
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed"""
class RetryError(RequestException):
"""Custom retries logic failed"""
class UnrewindableBodyError(RequestException):
"""Requests encountered an error when trying to rewind a body"""
# Warnings
class RequestsWarning(Warning):
"""Base warning for Requests."""
pass
class FileModeWarning(RequestsWarning, DeprecationWarning):
"""A file was opened in text mode, but Requests determined its binary length."""
pass
class RequestsDependencyWarning(RequestsWarning):
"""An imported dependency doesn't match the expected version range."""
pass
| 25.251969
| 84
| 0.708762
|
4bfc3af22040261928a63c8e80461d789fbe86d4
| 19,982
|
py
|
Python
|
processit/preprocessor.py
|
tatkaal/preprocessor
|
f3002d976f3cedbb46d96122e47b1d922118abed
|
[
"MIT"
] | null | null | null |
processit/preprocessor.py
|
tatkaal/preprocessor
|
f3002d976f3cedbb46d96122e47b1d922118abed
|
[
"MIT"
] | null | null | null |
processit/preprocessor.py
|
tatkaal/preprocessor
|
f3002d976f3cedbb46d96122e47b1d922118abed
|
[
"MIT"
] | null | null | null |
# Install this
# pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib
import os
import re
import io
import unidecode
import pickle
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from pycontractions import Contractions
from autocorrect import Speller
from processit.file_reader import prepare_text
from processit.contractions import to_replace
from gensim import downloader as api
from processit.configurations import pretrained_model, file_storage, token_file, credentials_json
java_path = "C:/Program Files/Java/jdk1.8.0_261/bin/java.exe"
os.environ['JAVAHOME'] = java_path
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/drive.readonly']
class PreProcessor():
def __init__(self, file_path=None,doc_link=None,folder_link=None,remove_stopwords=True, lower=True, tokenize_word=True,contraction_method='mapping',
remove_numbers=True, remove_html_tags=True,
remove_punctuations=True, remove_accented_chars=True, remove_whitespace=True,
lemmatize_method='wordnet',
embedding_method='word2vec',
auto_correct=True):
"""
This package contains functions that can help during the
preprocessing of text data.
:param remove_stopwords: boolean
default value = True
:param replace_words: str
default value = regex
"""
if (type(remove_stopwords) != bool or
type(lower) != bool or
type(tokenize_word) != bool or
# type(tokenize_sent) != bool or
type(remove_numbers) != bool or
type(remove_html_tags) != bool or
type(remove_punctuations) != bool or
type(remove_accented_chars) != bool or
type(auto_correct) != bool or
type(remove_whitespace) != bool):
raise Exception("Error - expecting a boolean parameter")
if lemmatize_method not in ['wordnet', 'snowball']:
raise Exception("Error - lemmatizer method not supported")
else:
self.lemmatize = True
if contraction_method not in ['glove','word2vec','mapping']:
raise Exception("Error - contraction method not supported")
else:
self.contractions = True
if embedding_method not in ['glove','word2vec','bow']:
raise Exception("Error - embedding method not supported")
else:
self.word_embedding = True
if file_path == None and doc_link==None and folder_link==None:
raise Exception("Error - expecting the file path")
self.doc = None
self.sents = None
self.tweets = None
self.lemmatizer = None
self.file_path = file_path
self.doc_link = doc_link
self.folder_link = folder_link
self.lower = lower
self.remove_stopwords = remove_stopwords
self.contraction_method = contraction_method
self.embedding_method = embedding_method
self.remove_numbers = remove_numbers
self.remove_html_tags = remove_html_tags
self.remove_punctations = remove_punctuations
self.remove_accented_chars = remove_accented_chars
self.remove_whitespace = remove_whitespace
self.lemmatize_method = lemmatize_method
self.stopword_list = stopwords.words('english')
self.replacement_list = to_replace
self.tokenize_word = tokenize_word
# self.tokenize_sent = tokenize_sent
self.auto_correct = auto_correct
if self.lemmatize_method == 'wordnet':
self.lemmatizer = WordNetLemmatizer()
if self.lemmatize_method == 'snowball':
self.lemmatizer = SnowballStemmer('english')
def file_reader(self):
file_content = prepare_text(self.file_path, dolower=False)
return file_content
def doc_downloader(self,document_link,document_type,document_name):
# Extracting the ID from the given link
pattern = r"(?<=d/)(.+)(?=/)"
DOCUMENT_ID = re.findall(pattern,document_link)[0]
print (f"DOCUMENT ID: {DOCUMENT_ID}")
# Specifying the format in which the document will be downloaded
if document_type.lower() in ['docx',"doc"]:
file_format = "docx"
elif document_type.lower() in ['pdf']:
file_format = "pdf"
else:
print ("Document Format Not Supported. Only Docs, Doc and PDF are supported")
return None
creds = None
if os.path.exists(token_file):
with open(token_file,'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
credentials_json,SCOPES)
creds = flow.run_local_server(port=0)
with open(token_file,'wb') as token:
pickle.dump(creds,token)
service = build('drive','v3',credentials=creds)
file_name = '.'.join([document_name,file_format])
try:
print ("Downloading file")
request = service.files().get_media(fileId=DOCUMENT_ID)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fd=fh,request=request)
done = False
while done is False:
status, done = downloader.next_chunk()
print (f"Download {status.progress()*100}")
except:
print ("Downloading MS Word Document file")
request = service.files().export_media(fileId=DOCUMENT_ID,mimeType='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fd=fh,request=request)
done = False
while done is False:
status, done = downloader.next_chunk()
print (f"Download {status.progress()*100}")
fh.seek(0)
with open(os.path.join(file_storage,file_name),'wb') as f:
f.write(fh.read())
f.close()
print("SAVED")
def folder_downloader(self,folder_link):
# Extracting the ID from the given link
pattern = r'(?<=folders/)(\w+)'
DOCUMENT_ID = re.findall(pattern,folder_link)[0]
print (f"DOCUMENT ID: {DOCUMENT_ID}")
creds = None
if os.path.exists(token_file):
with open(token_file,'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
credentials_json,SCOPES)
creds = flow.run_local_server(port=0)
with open(token_file,'wb') as token:
pickle.dump(creds,token)
service = build('drive','v3',credentials=creds)
listofFiles = []
page_token = None
# docx_query = f"'{DOCUMENT_ID}' in parents and mimeType='application/vnd.openxmlformats-officedocument.wordprocessingml.document'"
# pdf_query = f"'{DOCUMENT_ID}' in parents and mimeType='application/pdf'"
# txt_query = f"'{DOCUMENT_ID}' in parents and mimeType='text/plain'"
query = f"'{DOCUMENT_ID}' in parents"
while True:
response = service.files().list(
q=query,
fields='nextPageToken, files(id, name)',
pageToken = page_token,
includeItemsFromAllDrives=True,
supportsAllDrives=True
).execute()
for file in response.get('files',[]):
listofFiles.append(file)
page_token = response.get('nextPageToken',None)
if page_token is None:
break
for item in listofFiles:
document_id = item['id']
file_name = item['name']
name_splitted = file_name.split(".")
if len(name_splitted) == 1:
file_name = '.'.join([file_name,"docx"])
try:
print ("Downloading docx file")
print (file_name)
request = service.files().get_media(fileId=document_id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fd=fh,request=request)
done = False
while done is False:
status, done = downloader.next_chunk()
print (f"Download {status.progress()*100}")
except:
print ("Downloading doc file")
print (file_name)
request = service.files().export_media(fileId=document_id,mimeType='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fd=fh,request=request)
done = False
while done is False:
status, done = downloader.next_chunk()
print (f"Download {status.progress()*100}")
fh.seek(0)
with open(file_storage+'/'+file_name,'wb') as f:
f.write(fh.read())
f.close()
def lower_fun(self):
"""
This function converts text to lower
"""
self.doc = self.doc.lower()
def remove_stopwords_fun(self):
"""
This function removes stopwords from doc.
It works by tokenizing the doc and then
checking if the word is present in stopwords
"""
# tokens = str(self.doc).split()
tokens = word_tokenize(self.doc)
cleaned_tokens = [token for token in tokens if token.lower() not in self.stopword_list]
self.doc = ' '.join(cleaned_tokens)
def word_embedding_fun(self):
# if(self.tokenize_sent==False):
# self.doc = sent_tokenize(self.doc)
if(self.tokenize_word==False):
self.tokenize_word_fun()
if self.embedding_method == 'glove':
model = api.load("glove-twitter-25")
vecs=[]
for x in self.doc:
vec = [model[i] for i in x]
vecs.append(vec)
self.doc = vecs
# print(vecs)
elif self.embedding_method == 'word2vec':
pass
elif self.embedding_method == 'bow':
pass
def mapping_decontraction(self,phrase):
cleaned_doc = []
for word in str(self.doc).split():
if word.lower() in self.replacement_list.keys():
cleaned_doc.append(self.replacement_list[word.lower()])
else:
cleaned_doc.append(word)
phrase = ' '.join(cleaned_doc)
return phrase
def contractions_fun(self):
"""
This function replaces words that are --
by checking a word if a word is present in a dictionary
if the word is present in dictionary then it is replaced
with its value from dictionary
"""
if self.contraction_method == 'mapping':
self.doc = self.mapping_decontraction(str(self.doc))
elif self.contraction_method == 'word2vec':
model = pretrained_model
cont = Contractions(model)
cont.load_models()
self.doc = list(cont.expand_texts([str(self.doc)],precise=True))[0]
elif self.contraction_method == 'glove':
model = api.load("glove-twitter-25")
cont = Contractions(kv_model=model)
cont.load_models()
self.doc = list(cont.expand_texts([str(self.doc)],precise=True))[0]
def remove_numbers_fun(self):
"""
This function uses regex to remve
all the numbers from the doc.
"""
self.doc = re.sub("[0-9]", "", self.doc)
self.doc = self.doc.strip()
self.doc = " ".join(self.doc.split())
def autocorrect_fun(self):
spell = Speller(lang='en')
self.doc = [spell(w) for w in word_tokenize(self.doc)]
def remove_html_tags_fun(self):
"""
This function uses regex's complile method
to remove all the HTML tags from the doc
"""
cleaner = re.compile('<.*?>')
cleaned_text = re.sub(cleaner, '', self.doc)
cleaned_text = re.sub('[\n\t]', '', cleaned_text)
self.doc = cleaned_text.strip()
self.doc = " ".join(self.doc.split())
def remove_punctations_fun(self):
"""
This function uses regex to remove alk the
punctations from the doc.
"""
self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)
self.doc = self.doc.strip()
self.doc = " ".join(self.doc.split())
def remove_accented_chars_fun(self):
"""remove accented characters from text, e.g. café"""
self.doc = unidecode.unidecode(self.doc)
def remove_whitespace_fun(self):
"""remove extra whitespaces from text"""
text = self.doc.strip()
self.doc = " ".join(text.split())
def tokenize_word_fun(self):
"""tokenizes the sentences to words"""
self.doc = word_tokenize(self.doc)
# def tokenize_sent_fun(self):
# """tokenizes the paragraphs to sentences"""
# self.sents = sent_tokenize(self.doc)
def lemmatize_fun(self):
"""
This function applies the stemming to the words
It can be operated with either WordNetLemmatizer
or Snowball Stemmer
---------------------------
Example:
lemmatize(method='snowball')
default value = 'wordnet
"""
cleaned_tokens = None
if self.lemmatize_method == 'wordnet':
cleaned_tokens = [self.lemmatizer.lemmatize(token) for token in self.doc]
elif self.lemmatize_method == 'snowball':
cleaned_tokens = [self.lemmatizer.stem(token) for token in self.doc]
self.doc = ' '.join(cleaned_tokens)
def add_stopword(self, *args):
"""
This function is used to add new stopwords
to the predefined list
Parameters - ["new_stopword"]
------------------------------
Example -
obj = NLP()
obj.add_stopword(["first_word", "second_word"])
"""
if self.remove_stopwords is False:
raise Exception("Please enable removal of stopwords")
if type(args) != list:
raise Exception("Error - pass stopwords in list")
for arg in args:
self.stopword_list.add(arg)
def print_stopwords(self):
"""
This function prints all the stopwords
that are present in the list
Return Type - list
------------------------------
Example
obj = NLP()
obj.print_stopwords()
"""
if self.stopword_list == []:
raise Exception("Error - stopword list is empty")
print(self.stopword_list)
def process(self):
"""
This function processes the doc
If the remove_stopwords flag is True
- it will remove stopwords from doc
If the clean_words flag is True
- it will clean the doc by replacing words
Parameters - [doc]
------------------------------
Example
obj = NLP()
obj.process(["process this text"])
How to use with pandas?
obj = NLP()
df = df['text].apply(obj.process)
"""
if self.file_path != None:
data = self.file_reader()
if self.doc_link != None:
self.doc_downloader(self.doc_link,"docx","testing_document")
path = file_storage+'/testing_document.docx'
data = prepare_text(path, dolower=False)
if self.folder_link != None:
self.folder_downloader(self.folder_link)
data = 'test'
output=[]
self.sents = sent_tokenize(data)
for doc in self.sents:
self.doc = doc
if self.lower is True:
self.lower_fun()
if self.contractions is True:
self.contractions_fun()
if self.remove_html_tags is True:
self.remove_html_tags_fun()
if self.remove_numbers is True:
self.remove_numbers_fun()
if self.remove_punctations is True:
self.remove_punctations_fun()
if self.remove_accented_chars is True:
self.remove_accented_chars_fun()
if self.remove_stopwords is True:
self.remove_stopwords_fun()
if self.remove_whitespace is True:
self.remove_whitespace_fun()
if self.auto_correct is True:
self.autocorrect_fun()
if self.lemmatize is True:
self.lemmatize_fun()
if self.tokenize_word is True:
self.tokenize_word_fun()
if self.word_embedding is True:
self.word_embedding_fun()
output.append(self.doc)
return output
def local_processor(path):
prepObj = PreProcessor(
file_path=path,
lower=True,
tokenize_word=False, #if false the output will be in list of sentences
remove_stopwords=True,
remove_numbers=True,
remove_html_tags=True,
remove_punctuations=True,
remove_accented_chars=True,
remove_whitespace=True,
auto_correct=True,
lemmatize_method='snowball',
embedding_method='word2vec',
contraction_method='mapping',
)
preprocessed = prepObj.process()
with open(file_storage+'/local_processed.txt','w', encoding='utf-8') as f:
f.write(str(preprocessed))
return preprocessed
def url_file_processor(path):
prepObj = PreProcessor(
doc_link=path,
lower=True,
tokenize_word=False, #if false the output will be in list of sentences
remove_stopwords=True,
remove_numbers=True,
remove_html_tags=True,
remove_punctuations=True,
remove_accented_chars=True,
remove_whitespace=True,
auto_correct=True,
lemmatize_method='snowball',
embedding_method='word2vec',
contraction_method='mapping',
)
preprocessed = prepObj.process()
with open(file_storage+'/url_file_processed.txt','w', encoding='utf-8') as f:
f.write(str(preprocessed))
return preprocessed
def url_folder_processor(path):
prepObj = PreProcessor(
folder_link=path,
lower=True,
tokenize_word=False, #if false the output will be in list of sentences
remove_stopwords=True,
remove_numbers=True,
remove_html_tags=True,
remove_punctuations=True,
remove_accented_chars=True,
remove_whitespace=True,
auto_correct=True,
lemmatize_method='snowball',
embedding_method='word2vec',
contraction_method='mapping',
)
preprocessed = prepObj.process()
with open(file_storage+'/url_folder_processed.txt','w', encoding='utf-8') as f:
f.write(str(preprocessed))
return preprocessed
| 38.57529
| 157
| 0.590381
|
b314c31995d4c078e1b65049b12c9366a038b216
| 5,432
|
py
|
Python
|
src/main.py
|
gfjiangly/RCNet
|
ef6860f23943eb8e21fdec565019f2f8eda17673
|
[
"MIT"
] | null | null | null |
src/main.py
|
gfjiangly/RCNet
|
ef6860f23943eb8e21fdec565019f2f8eda17673
|
[
"MIT"
] | null | null | null |
src/main.py
|
gfjiangly/RCNet
|
ef6860f23943eb8e21fdec565019f2f8eda17673
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import torch
import torch.utils.data
from opts import opts
from models.model import create_model, load_model, save_model
from models.data_parallel import DataParallel
from logger import Logger
from datasets.dataset_factory import get_dataset
from trains.train_factory import train_factory
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
# torch.backends.cudnn.benchmark = False
Dataset = get_dataset(opt.dataset, opt.task)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print(torch.version.cuda)
print(torch.backends.cudnn.version())
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv, opt.fpn)
# from thop import profile
# input = torch.randn(1, 3, 512, 512)
# flops, params = profile(model, (1, 3, 512, 512), device='cuda')
# from thop.utils import clever_format
# flops = clever_format(flops, "%.3f")
# params = clever_format(params, "%.3f")
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
print('Setting up data...')
val_loader = torch.utils.data.DataLoader(
Dataset(opt, 'val'),
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
if opt.test:
_, preds = trainer.val(0, val_loader)
val_loader.dataset.run_eval(preds, opt.save_dir)
return
train_dataset = Dataset(opt, 'trainval')
if opt.weighted:
import _pickle as pickle
weights = pickle.load(open('/CenterNet/data/log_weights.pkl', 'rb'))
# seq_sample = torch.utils.data.sampler.SequentialSampler(train_dataset)
# train_loader = torch.utils.data.DataLoader(
# train_dataset,
# shuffle=False,
# # num_workers=opt.num_workers,
# num_workers=0,
# pin_memory=False,
# # drop_last=True,
# sampler=seq_sample
# )
# for inds in train_loader:
# data = train_dataset[inds[0]['inds']]
# print(data)
sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=opt.batch_size,
# shuffle=False,
num_workers=opt.num_workers,
# num_workers=0,
pin_memory=True,
# drop_last=True,
sampler=sampler
)
# for i in range(100):
# print(torch.multinomial(torch.tensor(weights, dtype=torch.double), 16, replacement=True))
else:
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True,
# collate_fn=collate_fn
)
print('Starting training...')
best = 1e10
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
with torch.no_grad():
log_dict_val, preds = trainer.val(epoch, val_loader)
for k, v in log_dict_val.items():
logger.scalar_summary('val_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if log_dict_val[opt.metric] < best:
best = log_dict_val[opt.metric]
save_model(os.path.join(opt.save_dir, 'model_best.pth'),
epoch, model)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write('\n')
if epoch in opt.lr_step:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close()
if __name__ == '__main__':
opt = opts().parse()
main(opt)
| 36.952381
| 104
| 0.591863
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.