content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def outline_to_mask(line, x, y):
"""Create mask from outline contour
Parameters
----------
line: array-like (N, 2)
x, y: 1-D grid coordinates (input for meshgrid)
Returns
-------
mask : 2-D boolean array (True inside)
Examples
--------
>>> from shapely.geometry import Point
>>> poly = Point(0,0).buffer(1)
>>> x = np.linspace(-5,5,100)
>>> y = np.linspace(-5,5,100)
>>> mask = outline_to_mask(poly.boundary, x, y)
"""
mpath = mplp.Path(line)
X, Y = np.meshgrid(x, y)
points = np.array((X.flatten(), Y.flatten())).T
mask = mpath.contains_points(points).reshape(X.shape)
return mask | 1c1ab70ed949b10a052aae1b17239a5d7a08da64 | 3,633,300 |
def check_ContentType():
"""
HowTo make Pre-Processing for all requests.
But: it also can be managed in View Class, just like django-rest-framework
"""
if request.method != 'GET':
if (not request.content_type) or ('application/json' not in request.content_type):
msg = jsonify(
{"error": "content_type:'%s' not supported, please use 'application/json'." % request.content_type}
)
return msg, 400 | 7fc35fe40621a1cf0486d681b294ff636f293c06 | 3,633,301 |
import hashlib
def _hash_feature(feature):
"""Calculate SHA256 hash of feature geometry as WKT"""
geom = shape(feature["geometry"])
return hashlib.sha256(geom.to_wkt().encode("utf-8")).hexdigest() | bd1dc2ad46f0960a042066152b5622febe530411 | 3,633,302 |
import socket
def ssdp_scan(address=None, service=None, timeout=None):
"""
Returns a list of responses to an SSDP request
"""
if address is None:
address = DEFAULT_ADDR
if service is None:
service = DEFAULT_SERVICE
if timeout is None:
timeout = DEFAULT_TIMOUT
message = MESSAGE_TEMPLATE.format(host=address[0], port=address[1], service=service)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.settimeout(DEFAULT_TIMOUT)
sock.sendto(message.encode(), address)
output = list()
while True:
try:
data = sock.recv(1024).decode()
output.append(data)
except socket.timeout:
break
return output | 43a0f557ed3f8b5b8a9c8085ce71a0e5a45a2f31 | 3,633,303 |
def random_walk_timeseries(length: int = 10, freq: str = 'D', mean: float = 0, std: float = 1,
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> 'TimeSeries':
"""
Creates a random walk TimeSeries by sampling a gaussian distribution with mean 'mean' and
standard deviation 'std'. The first value is one such random sample. Every subsequent value
is equal to the previous value plus a random sample.
:param mean: The mean of the gaussian distribution that is sampled at each step.
:param std: The standard deviation of the gaussian distribution that is sampled at each step.
:param length: The length of the returned TimeSeries.
:param freq: The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected.
:param start_ts: The time index of the first entry in the returned TimeSeries.
:return: A random walk TimeSeries created as indicated above.
"""
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.cumsum(np.random.normal(mean, std, size=length))
return TimeSeries.from_times_and_values(times, values) | 754a52be186f6f05fd70c8019c61cf6c27059680 | 3,633,304 |
def provider_for(platform: str, source: str) -> ContentProvider:
"""
A factory method that returns the appropriate data provider. Throws an exception to let you know if the
arguments are unsupported.
:param platform: One of the PLATFORM_* constants above.
:param source: One of the PLATFORM_SOURCE>* constants above.
:return:
"""
if (platform == PLATFORM_TWITTER) and (source == PLATFORM_SOURCE_TWITTER):
platform_provider = TwitterTwitterProvider(TWITTER_API_BEARER_TOKEN)
elif (platform == PLATFORM_REDDIT) and (source == PLATFORM_SOURCE_PUSHSHIFT):
platform_provider = RedditPushshiftProvider()
elif (platform == PLATFORM_YOUTUBE) and (source == PLATFORM_SOURCE_YOUTUBE):
platform_provider = YouTubeYouTubeProvider(YOUTUBE_API_KEY)
elif (platform == PLATFORM_ONLINE_NEWS) and (source == PLATFORM_SOURCE_MEDIA_CLOUD):
platform_provider = OnlineNewsMediaCloudProvider(MEDIA_CLOUD_API_KEY)
else:
raise UnknownProviderException(platform, source)
return platform_provider | 8f77453c6ec02d9bae571f12cd2de2a3420976b5 | 3,633,305 |
def create_app(config_name: str = "development") -> Flask:
"""
Factory for the creation of a Flask app.
:param config_name: the key for the config setting to use
:type config_name: str
:return: app: a Flask app instance
"""
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init_app(app)
jwt.init_app(app)
ma.init_app(app)
@app.errorhandler(ValidationError)
def handle_marshmallow_validation_error(err):
return jsonify(error=str(err)), 400
@app.errorhandler(401)
def resource_not_found(err):
return jsonify(error=str(err)), 401
@app.errorhandler(404)
def resource_not_found(err):
return jsonify(error=str(err)), 404
@app.errorhandler(409)
def resource_not_found(err):
return jsonify(error=str(err)), 409
app.register_blueprint(keynotes, url_prefix="/keynotes")
app.register_blueprint(meetings, url_prefix="/meetings")
app.register_blueprint(members, url_prefix="/members")
app.register_blueprint(permissions, url_prefix="/permissions")
app.register_blueprint(projects, url_prefix="/projects")
app.register_blueprint(roles, url_prefix="/roles")
app.register_blueprint(speakers, url_prefix="/speakers")
return app | ca38c3bb82e19db20aff7a26724f00e848d54b5b | 3,633,306 |
def oneorzero(argument):
"""Conversion function for the various options that let you choose between 1 and 0."""
return directives.choice(argument, ('0', '1')) | 4de687a99c56c5a7e2074d0ae58312690f115472 | 3,633,307 |
def encode(s, c):
"""
s is the scret code
c is the clear text
"""
secret_code_list = list(s)
clear_text_list = list(c)
encoded_text_list= []
count = 0
for letter in clear_text_list:
if letter == ' ':
encoded_text_list.append(' ')
continue
encoded_letter = (ord(secret_code_list[count]) - ord('A') +
ord(letter) - ord('A')) % 26 + ord('A')
encoded_letter = chr(encoded_letter)
count = (count + 1 ) % (len(secret_code_list))
encoded_text_list.append(encoded_letter)
encoded = ''.join(encoded_text_list)
return encoded | 3af6297fb79b77c542b19789ccf9bc0668f6afd5 | 3,633,308 |
import sqlite3
def test_ap_wpa2_eap_sql(dev, apdev, params):
"""WPA2-Enterprise connection using SQLite for user DB"""
try:
except ImportError:
return "skip"
dbfile = os.path.join(params['logdir'], "eap-user.db")
try:
os.remove(dbfile)
except:
pass
con = sqlite3.connect(dbfile)
with con:
cur = con.cursor()
cur.execute("CREATE TABLE users(identity TEXT PRIMARY KEY, methods TEXT, password TEXT, remediation TEXT, phase2 INTEGER)")
cur.execute("CREATE TABLE wildcards(identity TEXT PRIMARY KEY, methods TEXT)")
cur.execute("INSERT INTO users(identity,methods,password,phase2) VALUES ('user-pap','TTLS-PAP','password',1)")
cur.execute("INSERT INTO users(identity,methods,password,phase2) VALUES ('user-chap','TTLS-CHAP','password',1)")
cur.execute("INSERT INTO users(identity,methods,password,phase2) VALUES ('user-mschap','TTLS-MSCHAP','password',1)")
cur.execute("INSERT INTO users(identity,methods,password,phase2) VALUES ('user-mschapv2','TTLS-MSCHAPV2','password',1)")
cur.execute("INSERT INTO wildcards(identity,methods) VALUES ('','TTLS,TLS')")
cur.execute("CREATE TABLE authlog(timestamp TEXT, session TEXT, nas_ip TEXT, username TEXT, note TEXT)")
try:
params = int_eap_server_params()
params["eap_user_file"] = "sqlite:" + dbfile
hostapd.add_ap(apdev[0]['ifname'], params)
eap_connect(dev[0], apdev[0], "TTLS", "user-mschapv2",
anonymous_identity="ttls", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2")
dev[0].request("REMOVE_NETWORK all")
eap_connect(dev[1], apdev[0], "TTLS", "user-mschap",
anonymous_identity="ttls", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAP")
dev[1].request("REMOVE_NETWORK all")
eap_connect(dev[0], apdev[0], "TTLS", "user-chap",
anonymous_identity="ttls", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=CHAP")
eap_connect(dev[1], apdev[0], "TTLS", "user-pap",
anonymous_identity="ttls", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=PAP")
finally:
os.remove(dbfile) | cf5139cfc264e18fdb8e60eb4bce75797506db48 | 3,633,309 |
import numpy
def polyfit(data, time_axis, masked_array, outlier_threshold):
"""Fit polynomial to data."""
if not masked_array:
if outlier_threshold:
data, outlier_idx = timeseries.outlier_removal(data, outlier_threshold)
coeffs = numpy.ma.polyfit(time_axis, data, 3)[::-1]
elif data.mask.sum() > 0:
coeffs = numpy.array([data.fill_value]*4)
else:
if outlier_threshold:
data, outlier_idx = timeseries.outlier_removal(data, outlier_threshold)
coeffs = numpy.ma.polyfit(time_axis, data, 3)[::-1]
return coeffs | c58b04e31bea8a028ee464d4b70515c4e5b1e7e2 | 3,633,310 |
def update_board(position, board, player):
"""
Update the board with the user input position if position not taken
returns board, True=position taken or False=position not taken and board updated
args: position (int 1-9, user input)
board (np.array 2d)
player ("X" or "O")
"""
#make position 1-9 compatible with an 3x3 2d array indexed 0-8
position = position - 1
#logic to find row,col, uncomment 2 lines to print/test
#print(position, 'int(/3) ', int(position/3))
#print(position, '%3 ',position%3)
#find position in array, obtain row/col index
row = int(position/3)
if position>2: col = position%3
else: col = position
#If position not taken, update board
if board[row][col] == '-':
board[row][col] = player
return board, False
#else position is taken, do not update board
else:
return board, True | eb53d24c4976499e6611c97757d0c33b4cb3254f | 3,633,311 |
def find_deployed_version(package_name, environment, version=None,
revision=None, apptypes=None, apptier=False):
"""Find a given deployed version for a given package in a given
environment for all related app types; search for full tier
or host only deployment specifically
"""
if apptier:
subq = (
Session.query(
Package.pkg_name,
Package.version,
Package.revision,
AppDefinition.app_type,
AppDeployment.environment
).join(AppDeployment)
.join(AppDefinition)
.filter(Package.pkg_name == package_name)
.filter(AppDeployment.environment == environment)
.filter(AppDeployment.status != 'invalidated'))
if apptypes is not None:
subq = subq.filter(AppDefinition.app_type.in_(apptypes))
if version is not None:
subq = subq.filter(Package.version == version)
if revision is not None:
subq = subq.filter(Package.revision == revision)
subq = (subq.order_by(AppDeployment.realized.desc(), AppDeployment.id.desc())
.subquery(name='t_ordered'))
# The actual column name must be used in the subquery
# usage below; DB itself should be corrected
versions = (Session.query(subq.c.appType,
subq.c.version,
subq.c.revision)
.group_by(subq.c.appType, subq.c.environment)
.all())
else:
hostsq = (Session.query(Host.hostname, Host.app_id,
Package.version, Package.revision)
.join(AppDefinition)
.join(HostDeployment)
.join(Package)
.filter(Package.pkg_name == package_name)
.filter(Host.environment == environment))
if apptypes is not None:
hostsq = hostsq.filter(AppDefinition.app_type.in_(apptypes))
versions = (hostsq.all())
return versions | 57bcd217e0a63a610f78a5ce49365578d7540d3e | 3,633,312 |
import itertools
import math
def stochastic_block_model(sizes, p, nodelist=None, seed=None,
directed=False, selfloops=False, sparse=True):
"""Returns a stochastic block model graph.
This model partitions the nodes in blocks of arbitrary sizes, and places
edges between pairs of nodes independently, with a probability that depends
on the blocks.
Parameters
----------
sizes : list of ints
Sizes of blocks
p : list of list of floats
Element (r,s) gives the density of edges going from the nodes
of group r to nodes of group s.
p must match the number of groups (len(sizes) == len(p)),
and it must be symmetric if the graph is undirected.
nodelist : list, optional
The block tags are assigned according to the node identifiers
in nodelist. If nodelist is None, then the ordering is the
range [0,sum(sizes)-1].
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
directed : boolean optional, default=False
Whether to create a directed graph or not.
selfloops : boolean optional, default=False
Whether to include self-loops or not.
sparse: boolean optional, default=True
Use the sparse heuristic to speed up the generator.
Returns
-------
g : NetworkX Graph or DiGraph
Stochastic block model graph of size sum(sizes)
Raises
------
NetworkXError
If probabilities are not in [0,1].
If the probability matrix is not square (directed case).
If the probability matrix is not symmetric (undirected case).
If the sizes list does not match nodelist or the probability matrix.
If nodelist contains duplicate.
Examples
--------
>>> sizes = [75, 75, 300]
>>> probs = [[0.25, 0.05, 0.02],
... [0.05, 0.35, 0.07],
... [0.02, 0.07, 0.40]]
>>> g = nx.stochastic_block_model(sizes, probs, seed=0)
>>> len(g)
450
>>> H = nx.quotient_graph(g, g.graph['partition'], relabel=True)
>>> for v in H.nodes(data=True):
... print(round(v[1]['density'], 3))
...
0.245
0.348
0.405
>>> for v in H.edges(data=True):
... print(round(1.0 * v[2]['weight'] / (sizes[v[0]] * sizes[v[1]]), 3))
...
0.051
0.022
0.07
See Also
--------
random_partition_graph
planted_partition_graph
gaussian_random_partition_graph
gnp_random_graph
References
----------
.. [1] Holland, P. W., Laskey, K. B., & Leinhardt, S.,
"Stochastic blockmodels: First steps",
Social networks, 5(2), 109-137, 1983.
"""
# Check if dimensions match
if len(sizes) != len(p):
raise nx.NetworkXException("'sizes' and 'p' do not match.")
# Check for probability symmetry (undirected) and shape (directed)
for row in p:
if len(p) != len(row):
raise nx.NetworkXException("'p' must be a square matrix.")
if not directed:
p_transpose = [list(i) for i in zip(*p)]
for i in zip(p, p_transpose):
for j in zip(i[0], i[1]):
if abs(j[0] - j[1]) > 1e-08:
raise nx.NetworkXException("'p' must be symmetric.")
# Check for probability range
for row in p:
for prob in row:
if prob < 0 or prob > 1:
raise nx.NetworkXException("Entries of 'p' not in [0,1].")
# Check for nodelist consistency
if nodelist is not None:
if len(nodelist) != sum(sizes):
raise nx.NetworkXException("'nodelist' and 'sizes' do not match.")
if len(nodelist) != len(set(nodelist)):
raise nx.NetworkXException("nodelist contains duplicate.")
else:
nodelist = range(0, sum(sizes))
# Setup the graph conditionally to the directed switch.
block_range = range(len(sizes))
if directed:
g = nx.DiGraph()
block_iter = itertools.product(block_range, block_range)
else:
g = nx.Graph()
block_iter = itertools.combinations_with_replacement(block_range, 2)
# Split nodelist in a partition (list of sets).
size_cumsum = [sum(sizes[0:x]) for x in range(0, len(sizes) + 1)]
g.graph['partition'] = [set(nodelist[size_cumsum[x]:size_cumsum[x + 1]])
for x in range(0, len(size_cumsum) - 1)]
# Setup nodes and graph name
for block_id, nodes in enumerate(g.graph['partition']):
for node in nodes:
g.add_node(node, block=block_id)
g.name = "stochastic_block_model"
# Test for edge existence
parts = g.graph['partition']
for i, j in block_iter:
if i == j:
if directed:
if selfloops:
edges = itertools.product(parts[i], parts[i])
else:
edges = itertools.permutations(parts[i], 2)
else:
edges = itertools.combinations(parts[i], 2)
if selfloops:
edges = itertools.chain(edges, zip(parts[i], parts[i]))
for e in edges:
if seed.random() < p[i][j]:
g.add_edge(*e)
else:
edges = itertools.product(parts[i], parts[j])
if sparse:
if p[i][j] == 1: # Test edges cases p_ij = 0 or 1
for e in edges:
g.add_edge(*e)
elif p[i][j] > 0:
while True:
try:
logrand = math.log(seed.random())
skip = math.floor(logrand / math.log(1 - p[i][j]))
# consume "skip" edges
next(itertools.islice(edges, skip, skip), None)
e = next(edges)
g.add_edge(*e) # __safe
except StopIteration:
break
else:
for e in edges:
if seed.random() < p[i][j]:
g.add_edge(*e) # __safe
return g | 4beb0e20381aa65927a8b60278f8de6cb06f64ca | 3,633,313 |
def get_text_and_links(wikitext):
"""
Obtain text and links from a wikipedia text.
"""
parsed = wtp.parse(wikitext)
basic_info = parsed.sections[0]
saved_links = {}
num_links = len(basic_info.wikilinks)
for i in range(num_links):
index = num_links - i - 1
link = basic_info.wikilinks[index]
original_span = link.span
start = original_span[0]
end = original_span[1]
target = link.target
text = link.text
if not target.startswith('w:'):
basic_info[start:end] = ""
move_to_left = end - start
else:
basic_info[original_span[0]:original_span[1]] = text
move_to_left = end - start - len(text)
saved_links = shift_all(saved_links, move_to_left)
if target.startswith('w:'):
new_end = end - move_to_left
saved_links[tuple([start, new_end])] = target
return basic_info, saved_links | a0b05f72c12529b655dda216e32e129b5fcaad8f | 3,633,314 |
import signal
def SobelOperator(image, n):
""" 构建了 Sobel 平滑算子和差分算子后,通过这两个算子来完成图像矩阵与 Sobel 算子的 same 卷积,
函数 SobelOperator 实现该功能:
图像矩阵先与垂直方向上的平滑算子卷积得到的卷积结果,
再与水平方向上的差分算子卷积,
这样就得到了图像矩阵与sobel_x 核的卷积。
与该过程类似,图像矩阵先与水平方向上的平滑算子卷积得到的卷积结果,
再与垂直方向上的差分算子卷积,
这样就得到了图像矩阵与 sobel_y 核的卷积。
Args:
image ([ndarray]): 进行 Sobel 算子的原始输入图像
n ([int]): 进行 Sobel 算子的阶数
Returns:
[ndarray]: 水平方向上的 Sobel 卷积结果;垂直方向上的卷积结果
"""
pascalSmoothKernel = PascalSmooth(n)
pascalDiffKernel = PascalDiff(n)
# -------- 与水平方向上 Sobel 卷积核进行卷积 --------
# 可分离卷积核 1. 先进行垂直方向的平滑
img_sobel_x = signal.convolve2d(image, pascalSmoothKernel.transpose(), mode="same")
# 可分离卷积核 2. 再进行水平方向的差分
img_sobel_x = signal.convolve2d(img_sobel_x, pascalDiffKernel, mode="same")
# -------- 与水平方向上 Sobel 卷积核进行卷积 --------
# 可分离卷积核 1. 先进行垂直方向的平滑
img_sobel_y = signal.convolve2d(image, pascalSmoothKernel, mode="same")
# 可分离卷积核 2. 再进行水平方向的差分
img_sobel_y = signal.convolve2d(img_sobel_x, pascalDiffKernel.transpose(), mode="same")
return img_sobel_x, img_sobel_y | e39b807ecff2e78f289a918e6f85ef7fced84427 | 3,633,315 |
import typing
import pickle
def read_model(model_file: typing.IO) -> SVR:
"""Read the model from the given file."""
return pickle.loads(model_file.read()) | 32a49bd37da2b6fb33a64d0ff334b8e323030169 | 3,633,316 |
def create_nonfixations(stimuli, fixations, index, adjust_n = True, adjust_history=True):
"""Create nonfixations from fixations for given index
stimuli of different sizes will be rescaled to match the
target stimulus
"""
x_factors, y_factors = calculate_nonfixation_factors(stimuli, index)
non_fixations = fixations[fixations.n != index]
other_ns = non_fixations.n
non_fixations.x = non_fixations.x * x_factors[other_ns]
non_fixations.y = non_fixations.y * y_factors[other_ns]
if adjust_history:
non_fixations.x_hist = non_fixations.x_hist * x_factors[other_ns][:, np.newaxis]
non_fixations.y_hist = non_fixations.y_hist * y_factors[other_ns][:, np.newaxis]
if adjust_n:
non_fixations.n = np.ones(len(non_fixations.n), dtype=int)*index
return non_fixations | 5c4462dd4bb5a3565158a9b2b085b88651d8b5a5 | 3,633,317 |
def get_commit_log(url, revnum):
"""Return the log message for a specific integer revision
number."""
out = launchsvn("log --incremental -r%d %s" % (revnum, url))
return recode_stdout_to_file("".join(out[1:])) | fb079051926292fdabf69da99008c6dd5dab47c9 | 3,633,318 |
import torch
def cov(x, rowvar=False, bias=False, ddof=None, aweights=None):
"""
Estimates covariance matrix like numpy.cov
https://github.com/pytorch/pytorch/issues/19037
"""
# ensure at least 2D
if x.dim() == 1:
x = x.view(-1, 1)
# treat each column as a data point, each row as a variable
if rowvar and x.shape[0] != 1:
x = x.t()
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
w = aweights
if w is not None:
if not torch.is_tensor(w):
w = torch.tensor(w, dtype=torch.float)
w_sum = torch.sum(w)
avg = torch.sum(x * (w / w_sum)[:, None], 0)
else:
avg = torch.mean(x, 0)
# Determine the normalization
if w is None:
fact = x.shape[0] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof * torch.sum(w * w) / w_sum
xm = x.sub(avg.expand_as(x))
if w is None:
X_T = xm.t()
else:
X_T = torch.mm(torch.diag(w), xm).t()
c = torch.mm(X_T, xm)
c = c / fact
return c.squeeze() | 376e89804374979fc21b1412d9db5ed588555d40 | 3,633,319 |
def tle_fmt_int(num, digits=5):
""" Return an integer right-aligned string with DIGITS of precision, all blank if num=0
Ignores sign.
"""
if num:
num = abs(num)
else:
return " "*digits
string_int = "{:>{DIGITS}d}".format(num,DIGITS=digits)
return string_int | 8db7938e7a88e68c4a22013b10debbc4f5a9ca72 | 3,633,320 |
import six
def SignalCollection(metasignals): # noqa: C901
"""Class factory for ``SignalCollection`` objects."""
@six.add_metaclass(MetaCollection)
class SignalCollection(object):
_metasignals = metasignals
def __init__(self, psr):
self.psrname = psr.name
# instantiate all the signals with a pulsar
self._signals = [metasignal(psr) for metasignal in self._metasignals]
self._residuals = psr.residuals
self._set_cache_parameters()
def __add__(self, other):
return PTA([self, other])
# TODO: this could be implemented more cleanly
def _set_cache_parameters(self):
""" Sets the cache for various signal types."""
self.white_params = []
self.basis_params = []
self.delay_params = []
for signal in self._signals:
if signal.signal_type == "white noise":
self.white_params.extend(signal.ndiag_params)
elif signal.signal_type in ["basis", "common basis"]:
# to support GP coefficients, and yet do the right thing
# for common GPs, which do not have coefficients yet
self.delay_params.extend(getattr(signal, "delay_params", []))
self.basis_params.extend(signal.basis_params)
elif signal.signal_type in ["deterministic"]:
self.delay_params.extend(signal.delay_params)
else:
msg = "{} signal type not recognized! Caching ".format(signal.signal_type)
msg += "may not work correctly for this signal."
logger.error(msg)
# def cache_clear(self):
# for instance in [self] + self.signals:
# kill = [attr for attr in instance.__dict__ if attr.startswith("_cache")]
#
# for attr in kill:
# del instance.__dict__[attr]
# a candidate for memoization
@property
def params(self):
return sorted({param for signal in self._signals for param in signal.params}, key=lambda par: par.name)
@property
def param_names(self):
ret = []
for p in self.params:
if p.size:
for ii in range(0, p.size):
ret.append(p.name + "_{}".format(ii))
else:
ret.append(p.name)
return ret
@property
def signals(self):
return self._signals
def set_default_params(self, params):
for signal in self._signals:
signal.set_default_params(params)
def _combine_basis_columns(self, signals):
"""Given a set of Signal objects, each of which may return an
Fmat (through get_basis()), return a dict (indexed by signal)
of integer arrays that map individual Fmat columns to the
combined Fmat.
Note: The Fmat returned here is simply meant to initialize the
matrix to save computations when calling `get_basis` later.
"""
idx, hashlist, cc, nrow = {}, [], 0, None
for signal in signals:
Fmat = signal.get_basis()
if Fmat is not None:
nrow = Fmat.shape[0]
if not signal.basis_params:
idx[signal] = []
for i, column in enumerate(Fmat.T):
colhash = hash(column.tobytes())
if signal.basis_combine and colhash in hashlist:
# if we're combining the basis for this signal
# and we have seen this column already, make a note
# of where it was
j = hashlist.index(colhash)
idx[signal].append(j)
else:
# if we're not combining or we haven't seen it already
# save the hash and make a note it's new
hashlist.append(colhash)
idx[signal].append(cc)
cc += 1
elif signal.basis_params:
nf = Fmat.shape[1]
idx[signal] = list(range(cc, cc + nf))
cc += nf
if not idx:
return {}, None
else:
ncol = len(np.unique(sum(idx.values(), [])))
return ({key: np.array(idx[key]) for key in idx.keys()}, np.zeros((nrow, ncol)))
# goofy way to cache _idx
def __getattr__(self, par):
if par in ("_idx", "_Fmat"):
self._idx, self._Fmat = self._combine_basis_columns(self._signals)
return getattr(self, par)
else:
raise AttributeError("{} object has no attribute {}".format(self.__class__, par))
@cache_call("white_params")
def get_ndiag(self, params):
ndiags = [signal.get_ndiag(params) for signal in self._signals]
return sum(ndiag for ndiag in ndiags if ndiag is not None)
@cache_call("delay_params")
def get_delay(self, params):
delays = [signal.get_delay(params) for signal in self._signals]
return sum(delay for delay in delays if delay is not None)
@cache_call("delay_params")
def get_detres(self, params):
return self._residuals - self.get_delay(params)
# since this function has side-effects, it can only be cached
# with limit=1, so it will run again if called with params different
# than the last time
@cache_call("basis_params", limit=1)
def get_basis(self, params={}):
for signal in self._signals:
if signal in self._idx:
self._Fmat[:, self._idx[signal]] = signal.get_basis(params)
return self._Fmat
def get_phiinv(self, params):
return self.get_phi(params).inv()
# returns a KernelMatrix object
def get_phi(self, params):
if self._Fmat is None:
return None
phi = KernelMatrix(self._Fmat.shape[1])
for signal in self._signals:
if signal in self._idx:
phi = phi.add(signal.get_phi(params), self._idx[signal])
return phi
@cache_call(["basis_params", "white_params", "delay_params"])
def get_TNr(self, params):
T = self.get_basis(params)
if T is None:
return None
Nvec = self.get_ndiag(params)
res = self.get_detres(params)
return Nvec.solve(res, left_array=T)
@cache_call(["basis_params", "white_params"])
def get_TNT(self, params):
T = self.get_basis(params)
if T is None:
return None
Nvec = self.get_ndiag(params)
return Nvec.solve(T, left_array=T)
@cache_call(["white_params", "delay_params"])
def get_rNr_logdet(self, params):
Nvec = self.get_ndiag(params)
res = self.get_detres(params)
return Nvec.solve(res, left_array=res, logdet=True)
# TO DO: cache how?
def get_logsignalprior(self, params):
return sum(signal.get_logsignalprior(params) for signal in self._signals)
return SignalCollection | 627959eb695979f1b8922a5da60ae4a88144ddb0 | 3,633,321 |
def get_status():
"""Return classifier status."""
return 'ok' | 84aedac3659ac2321867b02d3f6e7acb523923a3 | 3,633,322 |
def get_config_info() -> dict:
"""Gets the config from core sqlfluff and sqlfluff plugins and merges them."""
plugin_manager = get_plugin_manager()
configs_info = plugin_manager.hook.get_configs_info()
return {
k: v for config_info_dict in configs_info for k, v in config_info_dict.items()
} | c51f5853a54080189c37b1dea49da126d402854d | 3,633,323 |
def landing_page(request, page):
"""Return resource landing page context."""
edit_resource = check_resource_mode(request)
return get_page_context(page, request.user, resource_edit=edit_resource, request=request) | 1a78491694a004ed784c50e8205c7341cbe58452 | 3,633,324 |
import torch
def _smooth_l1_loss(pred: Tensor, target: Tensor, beta: float = 1.) -> Tensor:
"""(F.smooth_l1_loss())
:param pred: shape(N, In)
:param target: shape(N, In)
:param beta: smooth线
:return: ()"""
diff = torch.abs(target - pred)
return torch.mean(torch.where(diff < beta, 0.5 * diff ** 2 / beta, diff - 0.5 * beta)) | 33cbfbf66360f9dd9d473b82c0f2103af19df676 | 3,633,325 |
import scipy
def solve_assignment(weights, exclude_zero=False):
"""Finds matching that maximizes sum of edge weights.
Args:
weights: 2D array of edge weights.
exclude_zero: Exclude pairs with zero weight from result.
Returns:
Integer array of pairs with shape [num_matches, 2].
"""
rs, cs = scipy.optimize.linear_sum_assignment(weights)
# Ensure that shape is correct if empty.
if not rs.size:
return np.empty([0, 2], dtype=int)
pairs = np.stack([rs, cs], axis=-1)
if exclude_zero:
is_nonzero = ~(weights[pairs[:, 0], pairs[:, 1]] == 0)
pairs = pairs[is_nonzero]
return pairs | d73d2c8b5dc7d5c5cbd08f38a4cc21d0b1ebf62a | 3,633,326 |
import os
import logging
def _initXDG():
"""Initialize config path per XDG basedir-spec and resolve the final location of state file storage.
Returns:
str: file path to state file as per XDG spec and current env.
"""
# per the XDG basedir-spec we adhere to $XDG_CONFIG_HOME if it's set, otherwise assume $HOME/.config
xdg_config_home = ''
if 'XDG_CONFIG_HOME' in os.environ:
xdg_config_home = os.environ['XDG_CONFIG_HOME']
logging.debug('XDG basedir overriden: {0}'.format(xdg_config_home))
else:
xdg_config_home = "%s/.config" % os.path.expanduser('~')
# XDG base-dir: "If, when attempting to write a file, the destination directory is non-existant an attempt should be made to create it with permission 0700. If the destination directory exists already the permissions should not be changed."
if not os.path.isdir(xdg_config_home):
logging.debug('XDG basedir does not exist, creating: {0}'.format(xdg_config_home))
os.mkdir(xdg_config_home, 0o0700)
# finally create our own config dir
config_dir = "%s/%s" % (xdg_config_home, 'python-cozify')
if not os.path.isdir(config_dir):
logging.debug('XDG local dir does not exist, creating: {0}'.format(config_dir))
os.mkdir(config_dir, 0o0700)
state_file = "%s/python-cozify.cfg" % config_dir
logging.debug('state_file determined to be: {0}'.format(state_file))
return state_file | edb3a0355b16a339dcb3112f4fdd99492eb48457 | 3,633,327 |
import os
def get_stdout(jobname, jobid):
"""Get stdout for job <jobid>
Returns:
200 OK: file (on success)
404 Not Found: Job not found (on NotFoundWarning)
404 Not Found: Result not found (on NotFoundWarning)
500 Internal Server Error (on error)
"""
user = set_user()
try:
# Get job properties from DB
# job = Job(jobname, jobid, user, get_results=True)
logname = 'stdout'
logroot = '{}/{}'.format(JOBDATA_PATH, jobid)
if not os.path.isfile(os.path.join(logroot, logname + '.log')):
raise storage.NotFoundWarning('Log "{}" NOT FOUND for job "{}"'.format(logname, jobid))
# Return file
return static_file(logname + '.log', root=logroot, mimetype='text')
except JobAccessDenied as e:
abort_403(str(e))
except storage.NotFoundWarning as e:
abort_404(str(e))
except:
abort_500_except() | 11c859cc2100ae08b7f2d1724de657c9ded54f1b | 3,633,328 |
def stepwise_kpca(X, gamma, n_components):
"""
Implementation of a RBF kernel PCA.
Arguments:
X: A MxN dataset as NumPy array where the samples are stored as rows (M),
and the attributes defined as columns (N).
gamma: A free parameter (coefficient) for the RBF kernel.
n_components: The number of components to be returned.
"""
# Calculating the squared Euclidean distances for every pair of points
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Converting the pairwise distances into a symmetric MxM matrix.
mat_sq_dists = squareform(sq_dists)
# Computing the MxM kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Centering the symmetric NxN kernel matrix.
N = K.shape[0]
one_n = np.ones((N,N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenvalues in descending order with corresponding
# eigenvectors from the symmetric matrix.
eigvals, eigvecs = eigh(K)
# Obtaining the i eigenvectors that corresponds to the i highest eigenvalues.
X_pc = np.column_stack((eigvecs[:,-i] for i in range(1,n_components+1)))
return X_pc | 3e0abd47e5527191e681f68ccb9ed71587a5adb2 | 3,633,329 |
import functools
def make_val_and_grad_fn(value_fn):
"""Function decorator to compute both function value and gradient.
For example:
```
@tff.math.make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(scales * (x - minimum) ** 2, axis=-1)
```
Turns `quadratic` into a function that accepts a point as a `Tensor` as input
and returns a tuple of two `Tensor`s with the value and the gradient of the
defined quadratic function evaluated at the input point.
This is useful for constucting functions to optimize with tff.math.optimizer
methods.
Args:
value_fn: A python function to decorate.
Returns:
The decorated function.
"""
@functools.wraps(value_fn)
def val_and_grad(x):
return value_and_gradient(value_fn, x)
return val_and_grad | f08e889d62ce5d7e94e70bf96ae0ec8bca31f931 | 3,633,330 |
def showCatalog():
"""This showCatalog handler will diplay the home page for both a user not
logged in and one for a logged in user.
"""
categories = session.query(Category).order_by(asc(Category.name))
# Limit the query to a maximum of 10 and order by the most recent
# items added, which is identified by the highest id numbers.
#
items = session.query(Item).order_by(Item.id.desc()).limit(10)
if 'username' not in login_session:
picture = False
user_name = False
return render_template('publiccatalog.html',
categories=categories,
items=items,
picture=picture,
user_name=user_name,
)
else:
picture = login_session['picture']
return render_template('catalog.html',
categories=categories,
items=items,
picture=picture,
user_name=login_session['username'],
) | e2a50c7478536fe573b6f7860e0976de3a832692 | 3,633,331 |
def is_fill_compute_seq(seq):
"""Test whether *seq* can be converted to a FillComputeSeq.
True only if it is a FillCompute element
or contains at least one such,
and it is not a Source sequence.
"""
if is_source(seq):
return False
is_fcseq = False
try:
is_fcseq = any(map(is_fill_compute_el, seq))
except TypeError:
# seq is non-iterable
pass
if is_fill_compute_el(seq):
is_fcseq = True
return is_fcseq | f6cca52b2ed7065ae950eca48071120c83a735bc | 3,633,332 |
def upload_model(uploadfile, name=None): # noqa: E501
"""upload_model
# noqa: E501
:param uploadfile: The model YAML file to upload. Can be a GZip-compressed TAR file (.tgz, .tar.gz) or a YAML file (.yaml, .yml). Maximum size is 32MB.
:type uploadfile: werkzeug.datastructures.FileStorage
:param name:
:type name: str
:rtype: ApiModel
"""
return util.invoke_controller_impl() | 95a759bc6d5cc806a8a8df1945816275784ab132 | 3,633,333 |
from mpl_toolkits.mplot3d import Axes3D
import mcubes
def visual_callback_3d(fig=None, plot_each=1):
"""
Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 3D images.
Parameters
----------
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
plot_each : positive integer
The plot will be updated once every `plot_each` calls to the callback
function.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`.
"""
# PyMCubes package is required for `visual_callback_3d`
try:
except ImportError:
raise ImportError("PyMCubes is required for 3D `visual_callback_3d`")
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax = fig.add_subplot(111, projection='3d')
plt.pause(0.001)
counter = [-1]
def callback(levelset):
counter[0] += 1
if (counter[0] % plot_each) != 0:
return
if ax.collections:
del ax.collections[0]
coords, triangles = mcubes.marching_cubes(levelset, 0.5)
ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
triangles=triangles)
plt.pause(0.1)
return callback | 1f69691a983f9afabbe53e761e95138a1aa64e40 | 3,633,334 |
def _erfint(x):
"""
Integral of the error function.
Parameters
----------
x : float or array
Argument.
Returns
-------
float or array
Integral of the error function.
"""
return x * erf(x) - 1.0/np.sqrt(np.pi) * (1.0-np.exp(-x**2)) | e397f62fb35caf5d71eaebd3eb9dc8ce0ba5c9d5 | 3,633,335 |
def model(x, n_pos, mask_miss1, mask_miss2, is_train=False, reuse=None, data_format='channels_last'):
"""Defines the entire pose estimation model."""
def _conv2d(x, c, filter_size, strides, act, padding, name):
return Conv2d(
x, c, filter_size, strides, act, padding, W_init=W_init, b_init=b_init, name=name, data_format=data_format)
def _maxpool2d(x, name):
return MaxPool2d(x, (2, 2), (2, 2), padding='SAME', name=name, data_format=data_format)
def concat(inputs, name):
if data_format == 'channels_last':
concat_dim = -1
elif data_format == 'channels_first':
concat_dim = 1
else:
raise ValueError('invalid data_format: %s' % data_format)
return ConcatLayer(inputs, concat_dim, name=name)
def state1(cnn, n_pos, mask_miss1, mask_miss2, is_train):
"""Define the first stage of openpose."""
with tf.variable_scope("stage1/branch1"):
b1 = _conv2d(cnn, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c1')
b1 = _conv2d(b1, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c2')
b1 = _conv2d(b1, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c3')
b1 = _conv2d(b1, 128, (1, 1), (1, 1), tf.nn.relu, 'VALID', 'c4')
b1 = _conv2d(b1, n_pos, (1, 1), (1, 1), None, 'VALID', 'confs')
if is_train:
b1.outputs = b1.outputs * mask_miss1
with tf.variable_scope("stage1/branch2"):
b2 = _conv2d(cnn, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c1')
b2 = _conv2d(b2, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c2')
b2 = _conv2d(b2, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c3')
b2 = _conv2d(b2, 128, (1, 1), (1, 1), tf.nn.relu, 'VALID', 'c4')
b2 = _conv2d(b2, 38, (1, 1), (1, 1), None, 'VALID', 'pafs')
if is_train:
b2.outputs = b2.outputs * mask_miss2
return b1, b2
def stage2(cnn, b1, b2, n_pos, maskInput1, maskInput2, is_train, scope_name):
"""Define the archuecture of stage 2 and so on."""
with tf.variable_scope(scope_name):
net = concat([cnn, b1, b2], 'concat')
with tf.variable_scope("branch1"):
b1 = _conv2d(net, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c1')
b1 = _conv2d(b1, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c2')
b1 = _conv2d(b1, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c3')
b1 = _conv2d(b1, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c4')
b1 = _conv2d(b1, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c5')
b1 = _conv2d(b1, 128, (1, 1), (1, 1), tf.nn.relu, 'VALID', 'c6')
b1 = _conv2d(b1, n_pos, (1, 1), (1, 1), None, 'VALID', 'conf')
if is_train:
b1.outputs = b1.outputs * maskInput1
with tf.variable_scope("branch2"):
b2 = _conv2d(net, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c1')
b2 = _conv2d(b2, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c2')
b2 = _conv2d(b2, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c3')
b2 = _conv2d(b2, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c4')
b2 = _conv2d(b2, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'c5')
b2 = _conv2d(b2, 128, (1, 1), (1, 1), tf.nn.relu, 'VALID', 'c6')
b2 = _conv2d(b2, 38, (1, 1), (1, 1), None, 'VALID', 'pafs')
if is_train:
b2.outputs = b2.outputs * maskInput2
return b1, b2
def vgg_network(x):
x = x - 0.5
# input layer
net_in = InputLayer(x, name='input')
# conv1
net = _conv2d(net_in, 64, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'conv1_1')
net = _conv2d(net, 64, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'conv1_2')
net = _maxpool2d(net, 'pool1')
# conv2
net = _conv2d(net, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'conv2_1')
net = _conv2d(net, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'conv2_2')
net = _maxpool2d(net, 'pool2')
# conv3
net = _conv2d(net, 256, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'conv3_1')
net = _conv2d(net, 256, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'conv3_2')
net = _conv2d(net, 256, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'conv3_3')
net = _maxpool2d(net, 'pool3')
# conv4
net = _conv2d(net, 512, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'conv4_1')
net = _conv2d(net, 512, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'conv4_2')
net = _conv2d(net, 256, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'conv4_3')
net = _conv2d(net, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', 'conv4_4')
return net
with tf.variable_scope('model', reuse):
## Feature extraction part
cnn = vgg_network(x)
b1_list = []
b2_list = []
## stage 1
b1, b2 = state1(cnn, n_pos, mask_miss1, mask_miss2, is_train)
b1_list.append(b1)
b2_list.append(b2)
## stage 2 ~ 6
# for i in range(2, 7):
with tf.variable_scope("stage1/branch2"): # TODO: fix indent here and the names in npz
for i in [5, 6]: # only 3 stage in total
b1, b2 = stage2(cnn, b1, b2, n_pos, mask_miss1, mask_miss2, is_train, scope_name='stage%d' % i)
b1_list.append(b1)
b2_list.append(b2)
net = tl.layers.merge_networks([b1, b2])
return cnn, b1_list, b2_list, net | 895f9e8695b0564eb8fa3e2d5af320916d443025 | 3,633,336 |
from typing import Optional
from typing import Union
def zeros(shape: Optional[Union[int, tuple, list]] = None) -> 'ArrayInterval':
"""
Instantiate an `ArrayInterval` filled with zeros.
Note: The difference from numpy is that the argument shape is optional.
When shape is None, some operations aren't supported because the
length is unknown.
e.g. array_interval[:] fails because the length is unknown, while
array_interval[:1000] works.
Args:
shape: `None`, `int` or `tuple`/`list` that contains one `int`.
Returns:
`ArrayInterval` filled with zeros
Examples:
>>> ai = zeros(10)
>>> ai
ArrayInterval("", shape=(10,))
>>> ai[2:3] = 1
>>> ai
ArrayInterval("2:3", shape=(10,))
>>> ai[:] # getitem converts the ArrayInterval to numpy
array([False, False, True, False, False, False, False, False, False,
False])
>>> ai = zeros()
>>> ai
ArrayInterval("", shape=None)
>>> ai[2:3] = 1
>>> ai
ArrayInterval("2:3", shape=None)
>>> ai[:]
Traceback (most recent call last):
...
RuntimeError: You tried to slice an ArrayInterval with unknown shape without a stop value.
This is not supported, either the shape has to be known
or you have to specify a stop value for the slice (i.e. array_interval[:stop])
You called the array interval with:
array_interval[slice(None, None, None)]
>>> ai[:10] # getitem converts the ArrayInterval to numpy
array([False, False, True, False, False, False, False, False, False,
False])
"""
ai = ArrayInterval.__new__(ArrayInterval)
if isinstance(shape, int):
shape = [shape]
if shape is not None:
assert len(shape) == 1, shape
shape = tuple(shape)
ai.shape = shape
return ai | 2d6348e327798b3019c6fd2d42827c64660cad02 | 3,633,337 |
def anova_total_mean_square(Ns, means, sigmas):
"""
This function performs an average over multiple sets of observations, each with its own standard deviation.
For example: 5 simulations compute RMSDs+-Sigma of the same protein. What is the aggregate?
See: http://www.burtonsys.com/climate/composite_standard_deviations.html
Here, Ns = the number of observations in each copy, means +- sigma are the values of each.
"""
copies=len(Ns)
grand_total = np.sum( Ns )
grand_mean = np.multiply(Ns, means) / grand_total
GSS = np.sum( [ Ns[i]*(means[i] - grand_mean)**2.0 for i in range(copies) ] )
ESS = np.sum( [ (Ns[i]-1)*sigmas[i]**2.0 for i in range(copies) ] )
return (GSS+ESS)/(grand_total-1) | b3cc0aa7fd66521f6692bd10c66b1e7b00e59566 | 3,633,338 |
def compute_lsb(n_bits, fsr_min, fsr_max, half_bit=None):
"""
Computes the least significant bit (LSB) magnitude in the stage MDAC and
sub-ADC to achieve a desired full scale range (FSR). The input FSR and
output FSR are assumed to be the same so only one value is returned.
:param n_bits: Number of bits of the stage
:type n_bits: :class:`str`, :class:`float`, :class:`int`
:param fsr_min: Minimum value of the full voltage scale
:type fsr_min: :class:`float`
:param fsr_min: Maximum value of the full voltage scale
:type fsr_min: :class:`float`
:param half_bit: If the number of bits is half-bit or not
:type half_bit: :class:`bool`
:returns: The voltage value of the LSB.
:rtype: :class:`float`
.. seealso :: :func:`parse_bits`, for n_bits and half_bit specification.
"""
n_bits, half_bit = parse_bits(n_bits, half_bit)
n_codes = compute_n_codes(n_bits, half_bit)
diff = fsr_max - fsr_min
if half_bit:
lsb = diff/(n_codes + 1)
else:
lsb = diff/n_codes
return lsb | 03520c71de04e654841c47c40a109431d315163d | 3,633,339 |
def proximity_matrix(rf, X, normalize=True):
"""
Calculate proximity matrix
:param rf:
:param X:
:param normalize:
:return:
"""
leaves = rf.apply(X)
n_trees = leaves.shape[1]
prox_mat = np.zeros((leaves.shape[0], leaves.shape[0]))
for i in range(n_trees):
a = leaves[:, i]
prox_mat += 1 * np.equal.outer(a, a)
if normalize:
prox_mat = prox_mat / n_trees
return prox_mat | 781c08f242e52afa7430761124a344456c0efec1 | 3,633,340 |
def upload_to_s3_v2(local_path: str, bucket_name: str, object_name: str):
"""
path_output: local dir file path
bucket_name: name of s3 bucket
key_path: key path + file name = object name
"""
s3 = boto3.client("s3")
response = s3.upload_file(local_path, bucket_name, object_name)
return response | 844b1c07daeb44f49e1071516da0802d843d4790 | 3,633,341 |
def marks(category, mark=None, category_marks=None, public=False):
"""Assign marks to a test or suite of tests, grouped by a category."""
def decorator(test_item):
if mark is None and category_marks is None:
raise ValueError("One of mark or category_marks must be defined")
test_item.__marks_category__ = category
test_item.__marks_mark__ = mark
test_item.__marks_category_marks__ = category_marks
test_item.__marks_public__ = public
return test_item
return decorator | 2d47a8df4f610dbc081dd57fce169e2f89b88ca4 | 3,633,342 |
import secrets
def get_random_ua():
"""return a random user-agent string from file"""
# stop condition, file does not exists, not readable...
# + file operation
with open('headers.txt') as hbuffer:
all = hbuffer.readlines()
return secrets.choice(all).strip() | 0d6a924c07bbad2398966bed590bf3307f5c475d | 3,633,343 |
def is_nondecreasing(arr):
""" Returns true if the sequence is non-decreasing. """
return all([x <= y for x, y in zip(arr, arr[1:])]) | 593ac54669ef217e258380bf41ce067935ee53f0 | 3,633,344 |
def underscore_to_camelcase(value):
"""
Converts underscore notation (something_named_this) to camelcase notation (somethingNamedThis)
>>> underscore_to_camelcase('country_code')
'countryCode'
>>> underscore_to_camelcase('country')
'country'
>>> underscore_to_camelcase('price_GBP')
'priceGBP'
>>> underscore_to_camelcase('recommended_horizontal_resolution')
'recommendedHorizontalResolution'
>>> underscore_to_camelcase('postal_or_zip_code')
'postalOrZipCode'
>>> underscore_to_camelcase('test_ABC_test')
'testABCTest'
"""
words = value.split('_')
return '%s%s' % (words[0], ''.join(x if x.isupper() else x.capitalize() for x in words[1:])) | 94bb5c007d3b50112c62ca9b3e97c5bf4f155fff | 3,633,345 |
def findCenter(S):
"""Find the approximate center atom of a structure.
The center of the structure is the atom closest to (0.5, 0.5, 0.5)
Returns the index of the atom.
"""
best = -1
bestd = len(S)
center = [0.5, 0.5, 0.5] # the cannonical center
for i in range(len(S)):
d = S.lattice.dist(S[i].xyz, center)
if d < bestd:
bestd = d
best = i
return best | 634945a5560b3791f3835f3da090decd1b06b933 | 3,633,346 |
def add_noise(rots, level):
"""adds random noise to a rotation matrix."""
noised_rots = [[np.random.uniform(-level,level,4)]*23]*rots.shape[0]
noised_rots = Quaternions(np.array(noised_rots))
return rots+noised_rots | 8886f11b90a2d0098b4776100ee9834d433012fb | 3,633,347 |
def content_tree_update(request):
"""Returns all content of a given contenttree."""
assert request.contenttree.patched, "contenttree should be patched here..."
# SANITIZE JSON DATA
jsoncontent = request.json_body['content']
data = remove_unvalidated_fields(jsoncontent, peerreview_update_schema)
modificationDate = data['update_date']
assert modificationDate, "modification date is missing (invalid format?)"
# Load Peerreviews
peerreview_manager = PeerreviewManager(request, contenttree=request.contenttree)
content_peerreviews = peerreview_manager.load_modified_peerreviews(modificationDate)
return({
'OK': True,
'update_date': arrow.utcnow(),
'peerreviews': content_peerreviews
}) | dcfc1932aa9fec431e138f205a143b792b6130f7 | 3,633,348 |
def compute_overlap_region(db_ref, db_new):
"""
Computes the overlapping/shared region between two images.
Outputs:
- Corner coordinates of the overlapping region in the SRS
- Corresponding pixel indexes in both input images
"""
cornerCoord_ref = gdal.Info(db_ref, format='json')['cornerCoordinates']
cornerCoord_new = gdal.Info(db_new, format='json')['cornerCoordinates']
cc_val_ref = [cornerCoord_ref.get(key) for key in cornerCoord_ref.keys() if key != 'center']
[ul_ref, dummy, lr_ref, _] = cc_val_ref
cc_val_new = [cornerCoord_new.get(key) for key in cornerCoord_new.keys() if key != 'center']
[ul_new, dummy, lr_new, _] = cc_val_new
# Checks if the images cover exactly the same regions.
if cc_val_ref == cc_val_new:
print("The images cover the same region (left corner:[{}, {}], right corner:[{}, {}])."
.format(ul_ref[0], ul_ref[1], lr_ref[0], lr_ref[1]))
return [ul_ref, lr_ref], [[0, 0], [db_ref.RasterXSize, db_ref.RasterYSize]], \
[[0, 0], [db_ref.RasterXSize, db_ref.RasterYSize]]
# Computes the overlapping region
overlap_corners = [[max(ul_ref[0], ul_new[0]), min(ul_ref[1], ul_new[1])],[min(lr_ref[0], lr_new[0]),
max(lr_ref[1], lr_new[1])]]
# Checks if the overlapping region is physically possible. If not, then the images don't cover the same region
if overlap_corners[0][0] > overlap_corners[1][0] or overlap_corners[0][1] < overlap_corners[1][1]:
print("The two regions represented by the images don't overlap.")
return None, None, None
print("Found an overlapping regions (left corner:[{}, {}], right corner:[{}, {}])."
.format(overlap_corners[0][0], overlap_corners[0][1], overlap_corners[1][0], overlap_corners[1][1]))
# If a shared region is found then compute the pixels indexes corresponding to its corner coordinates in
# for both images
col_ul_ref, row_ul_ref = geo2pix(db_ref.GetGeoTransform(), overlap_corners[0][0], overlap_corners[0][1])
col_lr_ref, row_lr_ref = geo2pix(db_ref.GetGeoTransform(), overlap_corners[1][0], overlap_corners[1][1])
col_ul_new, row_ul_new = geo2pix(db_new.GetGeoTransform(), overlap_corners[0][0], overlap_corners[0][1])
col_lr_new, row_lr_new = geo2pix(db_new.GetGeoTransform(), overlap_corners[1][0], overlap_corners[1][1])
return overlap_corners, [[row_ul_ref, col_ul_ref], [row_lr_ref, col_lr_ref]], [[row_ul_new, col_ul_new],
[row_lr_new, col_lr_new]] | 664f1825c75c0b33d0b0c5ba4686d132790ad363 | 3,633,349 |
from typing import List
import math
def _calculate_team_size_score(
projects: List[dict], assignments: List[AssignmentTuple], project: dict, student: dict
) -> int:
"""Calculates the weighted score based on how far away from the average team size this project would
be after assigning the student
Args:
projects (List[dict]): All of the projects
assignments (List[AssignmentTuple]): All of the current assignments
project (dict): The project the student is being scored for
student (dict): The student being scored
Returns:
int: A score representing how far the project size would be from the average
"""
student_track = student["fields"][SURVEY_TRACK_FIELD]
# Calculate the current average team size for this track
average_team_size = _get_average_team_size_for_track(projects, assignments, student_track)
# Calculate the current size of the team for the track
project_team_member_count = sum(
assignment.project["fields"]["id"] == project["fields"]["id"]
and assignment.student["fields"][SURVEY_TRACK_FIELD] == student_track
for assignment in assignments
)
score = math.ceil(TEAM_SIZE_WEIGHT * (average_team_size - project_team_member_count))
return score | 47fcb0ea3feb637b558e6df10abd0aa993dbf965 | 3,633,350 |
import string
def encode(data):
""" Encodes a string to the 'cstring' encoding supported by the replay DTD.
Args:
data: string value to be encoded
Returns:
String containing the encoded value
Raises:
None
"""
chars = string.letters + string.digits + " ?!:."
return ''.join([x if x in chars else "\\x%02x" % ord(x) for x in data]) | 9fc3482c53eed42678aa3c7cbbf915c85b28cc68 | 3,633,351 |
import types
from typing import Dict
from typing import Any
from typing import List
def gen_frame_symbol(
data: types.PulseInstruction, formatter: Dict[str, Any], device: device_info.DrawerBackendInfo
) -> List[drawings.TextData]:
"""Generate a frame change symbol with instruction meta data from provided frame instruction.
Stylesheets:
- The `frame_change` style is applied.
- The symbol type in unicode is specified in `formatter.unicode_symbol.frame_change`.
- The symbol type in latex is specified in `formatter.latex_symbol.frame_change`.
Args:
data: Frame change instruction data to draw.
formatter: Dictionary of stylesheet settings.
device: Backend configuration.
Returns:
List of `TextData` drawings.
"""
if data.frame.phase == 0 and data.frame.freq == 0:
return []
style = {
"zorder": formatter["layer.frame_change"],
"color": formatter["color.frame_change"],
"size": formatter["text_size.frame_change"],
"va": "center",
"ha": "center",
}
program = []
for inst in data.inst:
if isinstance(inst, (instructions.SetFrequency, instructions.ShiftFrequency)):
try:
program.append(f"{inst.__class__.__name__}({inst.frequency:.2e} Hz)")
except TypeError:
# parameter expression
program.append(f"{inst.__class__.__name__}({inst.frequency})")
elif isinstance(inst, (instructions.SetPhase, instructions.ShiftPhase)):
try:
program.append(f"{inst.__class__.__name__}({inst.phase:.2f} rad.)")
except TypeError:
# parameter expression
program.append(f"{inst.__class__.__name__}({inst.phase})")
meta = {
"total phase change": data.frame.phase,
"total frequency change": data.frame.freq,
"program": ", ".join(program),
"t0 (cycle time)": data.t0,
"t0 (sec)": data.t0 * data.dt if data.dt else "N/A",
}
text = drawings.TextData(
data_type=types.SymbolType.FRAME,
channels=data.inst[0].channel,
xvals=[data.t0],
yvals=[0],
text=formatter["unicode_symbol.frame_change"],
latex=formatter["latex_symbol.frame_change"],
ignore_scaling=True,
meta=meta,
styles=style,
)
return [text] | 8b38222cd294f42eb58dfcc36b10361ce7869a81 | 3,633,352 |
def leanlauncher_download_version(version=LATEST_VERSION, path=DEFAULT_INSTALL_PATH):
"""
Installs the specified version of Minecraft to the specified path
Parameters:
version (str): the version of Minecraft to be installed, by default the latest
path (str): the path to install Minecraft to, by default ~/.LeanLauncher/versions
Returns:
True on successful install, False on error
"""
try:
minecraft_launcher_lib.install.install_minecraft_version(version, path)
except minecraft_launcher_lib.exceptions.VersionNotFound:
print(f"Not a valid version {version}, please try again")
return False
print(f"Minecraft version {version} successfully installed to {path}!")
return True | 35193ec3ca5d1ad1b23b84d35fec002997bb948e | 3,633,353 |
def Weierstrass_Enneper(f, g, z, imag_unit=1j):
"""
Compute the Weierstrass Enneper parametrization for given 'Weierstrass data'.
:param sympy expression f: h'/g, with h the height function
:param sympy expression g: Gauss map.
:param sympy variable z: Complex variable.
:param imag_unit: Representation of imaginary unit, e.g. 1j (numpy) or sympy.I.
:return: Weierstrass Enneper parametrization as a tuple of sympy expressions.
"""
psi1 = sp.integrate(f*(1-g**2)/2, z)
psi2 = imag_unit*sp.integrate(f*(1+g**2)/2, z)
psi3 = sp.integrate(f*g, z)
return psi1, psi2, psi3 | a47648e52600fd5fff37fb0ee7477b67c587ad0b | 3,633,354 |
import os
def build_lstm(seq_length):
"""Builds an LSTM in Keras."""
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(22),
input_shape=(seq_length, 3)), # output_shape=(batch, 44)
tf.keras.layers.Dense(4, activation="sigmoid") # (batch, 4)
])
model_path = os.path.join("./netmodels", "LSTM")
print("Built LSTM.")
if not os.path.exists(model_path):
os.makedirs(model_path)
return model, model_path | 392fbb4491c5469499a714e4e4f2a8d65c62c1e3 | 3,633,355 |
def human_size(size_bytes):
"""
format a size in bytes into a 'human' file size, e.g. B, KB, MB, GB, TB, PB
Note that bytes will be reported in whole numbers but KB and above will have
greater precision. e.g. 43 B, 443 KB, 4.3 MB, 4.43 GB, etc
"""
suffixes_table = [('B', 0), ('KB', 1), ('MB', 1), ('GB', 2), ('TB', 2), ('PB', 2)]
num = float(size_bytes)
for suffix, precision in suffixes_table:
if num < UNIT_SIZE:
break
num /= UNIT_SIZE
if precision == 0:
formatted_size = "%d" % num
else:
formatted_size = str(round(num, ndigits=precision))
return "%s%s" % (formatted_size, suffix) | d3489ee85c419711d82e35003426ef2915143c17 | 3,633,356 |
def html_color_to_rgba(html_colour, alpha):
"""
:param html_colour: Colour string like FF0088
:param alpha: Alpha value (opacity)
:return: RGBA semitransparent version of colour for use in css
"""
html_colour = html_colour.upper()
if html_colour[0] == '#':
html_colour = html_colour[1:]
r_str = html_colour[0:2]
g_str = html_colour[2:4]
b_str = html_colour[4:6]
r = int(r_str, 16)
g = int(g_str, 16)
b = int(b_str, 16)
return 'rgba(%s, %s, %s, %s)' % (r, g, b, alpha) | 4f28938aa89d62198cc3052a480e0e0744560a79 | 3,633,357 |
from typing import OrderedDict
def _assign_category_colors(uses, cmap, use_colors=None, assigned_colors=None):
"""Set a dictionary of nice colors for the use blocks.
Options allow specifing pre-defined elements for some categories."""
use_colors = OrderedDict() if use_colors is None else use_colors
assigned_colors = (
OrderedDict() if assigned_colors is None else assigned_colors
)
available_idxs = list(range(cmap.N))
used_colors = set(use_colors.values())
for cmap_idx in range(cmap.N):
# Skip gray because it is confusing against background
this_cmap = cmap(cmap_idx)
if this_cmap[0] == this_cmap[1] and this_cmap[0] == this_cmap[2]:
available_idxs.remove(cmap_idx)
# If a color has been used already, don't reuse it for something else
if cmap(cmap_idx) in used_colors:
available_idxs.remove(cmap_idx)
for use, cmap_idx in assigned_colors.items():
if cmap_idx in available_idxs:
available_idxs.remove(cmap_idx)
use_colors[use] = cmap(cmap_idx)
else:
assert use_colors[use] == cmap(cmap_idx)
for use in uses:
if use not in use_colors:
use_idx = available_idxs[0]
use_colors[use] = cmap(use_idx)
available_idxs.remove(use_idx)
return use_colors | 085d0ca707990c84cd51464ee8f65f90500b7060 | 3,633,358 |
def GetClientContext(client_id, token):
"""Get context for the given client id.
Get platform, os release, and arch contexts for the client.
Args:
client_id: The client_id of the host to use.
token: Token to use for access.
Returns:
array of client_context strings
"""
client_context = []
client = aff4.FACTORY.Open(client_id, token=token)
system = client.Get(client.Schema.SYSTEM)
if system:
client_context.append("Platform:%s" % system)
release = client.Get(client.Schema.OS_RELEASE)
if release:
client_context.append(utils.SmartStr(release))
arch = utils.SmartStr(client.Get(client.Schema.ARCH)).lower()
# Support synonyms for i386.
if arch == "x86":
arch = "i386"
if arch:
client_context.append("Arch:%s" % arch)
return client_context | 3ecddfeb58e99d06951aab4fe359bc1291b43a2a | 3,633,359 |
import math
def calculate_slope_intercept(line):
"""
Calculating slope nd intercept for a line
"""
for x1, y1, x2, y2 in line:
if x2-x1 == 0:
return math.inf, 0
slope = (y2-y1)/(x2-x1)
intercept = y1 - slope * x1
return slope, intercept | e21ff81a36cef7a995a98f7adf0302e8397a8139 | 3,633,360 |
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "I'm the Magic Conch Shell. Ask me a question"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Please ask me a question"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session)) | 8a900efd3ef7129c1a9ff408e49591945dad83a1 | 3,633,361 |
from typing import Optional
def rmcgs(A: np.ndarray, m: Optional[int] = None, r: Optional[int] = None) -> np.ndarray:
"""
Compute the product: B <- G * S * A where G has size m * r and elements from the standard
normal distribution, rescaled by 1/sqrt(m), and S is a CountSketch of size r * n. The matrix
G is not explicitly formed, and its elements are computed on-the-fly only when required.
Args:
A (np.ndarray): matrix A in row-major ordering (C_CONTIGUOUS)
m (int): number of rows for the Gaussian sketch. If zero, the Gaussian sketch will
not be applied. If None, it defaults to 2*d.
r (int): number of rows to use for the CountSketch transform. If zero, this transform
will not be applied. If None, it defaults to 5 * (d**2 + d).
Returns:
np.ndarray: matrix B in row-major ordering (C_CONTIGUOUS)
"""
assert_dtype(A, 'float64')
assert_contiguous_type(A, 'C_CONTIGUOUS')
n = int(A.shape[0])
d = int(A.shape[1])
r = r if r is not None else 5 * (d**2 + d)
m = m if m is not None else 2 * d
if (m > n) or (r > n):
raise ValueError(f'Either m={m} or r={r} is larger than n={n}, the number of rows of A.')
n_rows_B = m or r # if m is zero, fallback to r
B = np.zeros((n_rows_B, d))
ext_lib.rmcgs(d, m, n, r, A.ctypes.data_as(c_void_p), B.ctypes.data_as(c_void_p))
return B | 25325d1b2c75a07468fdce63fb5481da99c93105 | 3,633,362 |
def start_multi_svf():
"""
This is function for satrt multi SVF GUI
:return: result, details
"""
app.logger.info("Try to start multi SVF GUI for test")
cli_rest_port_list = []
svf_num = int(request.form.get("svf_num"))
try:
cli_rest_port_list = StartMultiSvf(svf_num=svf_num, logger=app.logger).start_multi_svf_ports()
except Exception as e:
app.logger.error(e)
if len(cli_rest_port_list) != 0:
start_multi_svf_result = "success"
else:
start_multi_svf_result = "fail"
result = {"start_multi_svf_result": start_multi_svf_result,
"cli_rest_port_list": cli_rest_port_list,
}
return result | 1dd3585e5fcfa96d225645d382ee5ab064bc4073 | 3,633,363 |
import glob
def get_files_by_pattern(root, pattern='a/b/*.ext', strip_root=False):
"""Optionally to only return matched sub paths."""
# Get the abspath of each directory images.
ret = glob.glob(osp.join(root, pattern))
# exclude the root str, so the ret is spec['patterns']. such as ['images/train/*.jpg]
if strip_root:
ret = [r[len(root) + 1:] for r in ret]
return ret | 905e4c4d08d228074a8036cdf5511f9eb7330f8c | 3,633,364 |
import torch
def gumbel_softmax(logits, temperature=1, hard=False):
"""
ST-gumple-softmax
input: [*, n_class]
return: flatten --> [*, n_class] an one-hot vector
"""
y = gumbel_softmax_sample(logits, temperature)
if not hard:
return y
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1])
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
# Set gradients w.r.t. y_hard gradients w.r.t. y
y_hard = (y_hard - y).detach() + y
return y_hard | 4ce2b64115c4a4ce87677aa99e0422c00524c5d3 | 3,633,365 |
def _load_augmentation_aug_all():
""" Load image augmentation model """
def sometimes(aug):
return iaa.Sometimes(0.5, aug)
return iaa.Sequential(
[
# apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.5), # vertically flip 20% of all images
# crop images by -5% to 10% of their height/width
sometimes(iaa.CropAndPad(
percent=(-0.05, 0.1),
pad_mode='constant',
pad_cval=(0, 255)
)),
sometimes(iaa.Affine(
# scale images to 80-120% of their size, individually per axis
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
# translate by -20 to +20 percent (per axis)
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-45, 45), # rotate by -45 to +45 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
# use nearest neighbour or bilinear interpolation (fast)
order=[0, 1],
# if mode is constant, use a cval between 0 and 255
cval=(0, 255),
# use any of scikit-image's warping modes
# (see 2nd image from the top for examples)
mode='constant'
)),
# execute 0 to 5 of the following (less important) augmenters per
# image don't execute all of them, as that would often be way too
# strong
iaa.SomeOf((0, 5),
[
# convert images into their superpixel representation
sometimes(iaa.Superpixels(
p_replace=(0, 1.0), n_segments=(20, 200))),
iaa.OneOf([
# blur images with a sigma between 0 and 3.0
iaa.GaussianBlur((0, 3.0)),
# blur image using local means with kernel sizes
# between 2 and 7
iaa.AverageBlur(k=(2, 7)),
# blur image using local medians with kernel sizes
# between 2 and 7
iaa.MedianBlur(k=(3, 11)),
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(
0.75, 1.5)), # sharpen images
iaa.Emboss(alpha=(0, 1.0), strength=(
0, 2.0)), # emboss images
# search either for all edges or for directed edges,
# blend the result with the original image using a blobby mask
iaa.SimplexNoiseAlpha(iaa.OneOf([
iaa.EdgeDetect(alpha=(0.5, 1.0)),
iaa.DirectedEdgeDetect(
alpha=(0.5, 1.0), direction=(0.0, 1.0)),
])),
# add gaussian noise to images
iaa.AdditiveGaussianNoise(loc=0, scale=(
0.0, 0.05*255), per_channel=0.5),
iaa.OneOf([
# randomly remove up to 10% of the pixels
iaa.Dropout((0.01, 0.1), per_channel=0.5),
iaa.CoarseDropout((0.03, 0.15), size_percent=(
0.02, 0.05), per_channel=0.2),
]),
# invert color channels
iaa.Invert(0.05, per_channel=True),
# change brightness of images (by -10 to 10 of original value)
iaa.Add((-10, 10), per_channel=0.5),
# change hue and saturation
iaa.AddToHueAndSaturation((-20, 20)),
# either change the brightness of the whole image (sometimes
# per channel) or change the brightness of subareas
iaa.OneOf([
iaa.Multiply(
(0.5, 1.5), per_channel=0.5),
iaa.FrequencyNoiseAlpha(
exponent=(-4, 0),
first=iaa.Multiply(
(0.5, 1.5), per_channel=True),
second=iaa.ContrastNormalization(
(0.5, 2.0))
)
]),
# improve or worsen the contrast
iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5),
iaa.Grayscale(alpha=(0.0, 1.0)),
# move pixels locally around (with random strengths)
sometimes(iaa.ElasticTransformation(
alpha=(0.5, 3.5), sigma=0.25)),
# sometimes move parts of the image around
sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
],
random_order=True
)
],
random_order=True
) | e39a08f0166d8a6a379427a895b196891bf70fe1 | 3,633,366 |
import logging
import traceback
def zk_get_mq_servers(zookeeper_servers, logger = logging.getLogger(__name__)):
"""
Get list of mq servers from zookeeper
:param zookeeper_servers: list of zookeeper servers
:param logger: logger to use
:return: list of mq servers or None
"""
mq_servers = None
# connect to zookeeper
zk = KazooClient(hosts=zookeeper_servers)
try:
zk.start(timeout=20)
except KazooTimeoutError:
logger.error('Failed to connect to zookeeper!')
return None
# get server list
try:
mq_servers = zk.get_children(constants.WORKER_CONFIG_MQ_SERVERS)
except Exception:
logger.error('Failed to obtain MQ server list from zookeeper!')
traceback.print_exc()
mq_servers = None
# close connection
try:
zk.stop()
zk.close()
except Exception:
logger.error('Failed to close connection to zookeeper!')
traceback.print_exc()
# if server list was not obtained, exit
if not mq_servers:
return None
# parse mq server list obtained from zookeeper
mq_servers = cf.server_list(mq_servers)
return mq_servers | 2201ac305ea1b44fe8baa0582bfc84bf39472dbe | 3,633,367 |
import os
def create_list_of_file_lists(possible_storage_locations, dir_path_to_files, dir_name_appendix):
"""This function creates a list of the files within each of the file storage locations so the user can inspect them to look for comparison options."""
# Output list of files
files_to_compare = []
for location in possible_storage_locations:
location_filepath = dir_path_to_files + location + "_" + dir_name_appendix
print("Filepath: ", location_filepath)
os.chdir(location_filepath)
all_files = os.listdir()
for file in all_files:
files_to_compare.append(file)
return files_to_compare | 11886751557e6994e3994f820dee21848584e265 | 3,633,368 |
from datetime import datetime
def _make_todays_date() -> str:
""" build today's date as a standard format """
return datetime.now().strftime("%a %d-%b") | fdb9bc420689081586ac19fe91a17ea871576d59 | 3,633,369 |
import re
def add_review_suggestion_flags(df,
text_col,
result_col='result_binary'):
"""
attempt to add on some logical "manual review suggested" flags onto cases to reduce false positive/negative classifications. currently
flags cases with "flora" in text, >=1 species capture, and currently classifid as negative. inspired by some challenging to classify cases in our 2d validation set.
"""
### flora flag testing
flora_bool1=df[text_col].apply(lambda x: re.search(r'flora',str(x).lower())is not None)
flora_bool2=df['species_capt'].apply(lambda x: len(x))>0
flora_bool3= df['result_num']==0
flora_flag= (flora_bool1) & (flora_bool2) & (flora_bool3)
df['flora_flag']=0
df.loc[flora_flag,'flora_flag']=1
return(df) | e2083d65f54b82dd9eba19b6b2d32806e2cd086d | 3,633,370 |
def create3DMatrix(data, trialTable, events, trialList, trialDur, fs, normalize, baselineDur=0.1):
"""
"""
trials = trialTable.copy()
trials = trials[trials['trialNum'].isin(trialList)]
totalTrialNum = np.max(trials['trialNum'])
m = trials.shape[0]
print m, totalTrialNum
electrodeNumber = data.shape[1]
trialSamples = int(np.round((trialDur+baselineDur)*fs))
# number of features: each sample for each electrode
n = int(np.round(trialDur*fs*electrodeNumber))
# Get trial data
X = np.zeros((m, trialSamples, electrodeNumber))
print 'creating matrix of shape (trials=%d, time=%ds, electrodes=%d)' % (X.shape[0],
X.shape[1]/fs,
X.shape[2])
count = 0
for i in range(totalTrialNum+1):
# Check if this trial is in our subset
if (i in trialList.unique()):
trial = getTrialDataNP(data.values, events=events,
trialNum=i, baselineDur=baselineDur,
startOffset=0,
trialDur=trialDur, fs=fs)
# Normalization
if (normalize):
trial = normalizeFromBaseline(trial,
baselineDur=baselineDur, fs=fs)
X[count, :, :] = trial
count += 1
print X.shape
return X | 1627f506a03bb7f07c30c6c70581ac40bd64d7fc | 3,633,371 |
def followed_list(username):
"""关注列表
"""
current_user = models.get_current_user()
user = models.get_user(username=username)
page = request.args.get('page', 1, type=int)
followed_list = user.followed.paginate(page, error_out=False)
user_list = [i.followed for i in followed_list.items]
return render_template('user.html',
page=page,
user=user,
current_user=current_user,
followed_list=user_list) | 5634ce5e7fc6b344f7ad2b3461e9f294cee225d3 | 3,633,372 |
def add_edge_degree(graph, k=3):
"""
Add k edges to defend based on top edge degree centrality entries :cite:`tong2012gelling`.
:param graph: an undirected NetworkX graph
:param k: number of edges to add
:return: a list of edges to add
"""
info = defaultdict(list)
info['added'] = get_central_edges(graph, k, method='deg')
return info | eded75ffc4eabe155124fe95023694df367f3d01 | 3,633,373 |
def number_of_fishers():
""" Prompt the user for the number of fishermen entering the draw."""
try:
number = int(input("How many fishermen will enter the competition: "))
return number
except ValueError:
print("Please enter an integer for the number of competing fishermen") | bd3ff25865d67851c8a1742a8cfa808a317716f0 | 3,633,374 |
def plot_histogram(df, x, bins, xlabel=None, ylabel=None, title=None, figsize=(8, 5)):
"""
"""
fig = plt.figure(figsize=figsize)
ax = fig.gca()
ax.hist(df[x], bins=bins, color='#8d1a93')
ax.set_xlabel(xlabel, fontsize=16)
ax.set_ylabel(ylabel, fontsize=16)
ax.set_title(title, fontsize = 18)
return fig | 42801361386edea3065974bdca2a589be0b964ef | 3,633,375 |
def pygmo_gaco(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
#
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
kernel_size=63,
speed_parameter_q=1.0,
oracle=0.0,
accuracy=0.01,
threshold=1,
speed_of_std_values_convergence=7,
stopping_max_n_without_improvements=100000,
stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS_GLOBAL,
focus=0.0,
cache=False,
):
"""Minimize a scalar function using the generalized ant colony algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
if isinstance(speed_of_std_values_convergence, float):
if not speed_of_std_values_convergence.is_integer():
raise ValueError(
"The speed_of_std_values_convergence parameter must be an integer. "
f"You specified {speed_of_std_values_convergence}."
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"ker": kernel_size,
"q": speed_parameter_q,
"oracle": oracle,
"acc": accuracy,
"threshold": threshold,
"n_gen_mark": int(speed_of_std_values_convergence),
"impstop": stopping_max_n_without_improvements,
"evalstop": stopping_max_criterion_evaluations,
"focus": focus,
"memory": cache,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="gaco",
algo_options=algo_options,
)
return res | f8cf4e423b928393e4c028412da4f302376fb248 | 3,633,376 |
def get_project_by_id(project_id: str) -> Project:
"""
Get a project by its project_id, with project model and project data joined.
:param project_id: project id of the project
:return: Project with the project id
"""
query = (
Project.select(Project, ProjectModel, ProjectData)
.join_from(Project, ProjectModel, JOIN.LEFT_OUTER)
.join_from(Project, ProjectData, JOIN.LEFT_OUTER)
.where(Project.project_id == project_id)
.group_by(Project)
)
project = None
for res in query:
project = res
break
if not project:
_LOGGER.error("could not find project with project_id {}".format(project_id))
raise HTTPNotFoundError(
"could not find project with project_id {}".format(project_id)
)
project.model = None
for model in project.models:
project.model = model
break
return project | 2a8986c8e2541f43d30bee2cb1bcac14df68b716 | 3,633,377 |
def hist_similarity(image_1, image_2):
"""color hist based image similarity
@param image_1: np.array(the first input image)
@param image_2: np.array(the second input image)
@return similarity: float(range from [0,1], the bigger the more similar)
"""
if image_1.ndim == 2 and image_2.ndim == 2:
hist_1 = cv2.calcHist([image_1], [0], None, [256], [0.0, 255.0])
hist_2 = cv2.calcHist([image_2], [0], None, [256], [0.0, 255.0])
similarity = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CORREL)
elif image_1.ndim == 3 and image_2.ndim == 3:
"""R,G,B split"""
b_1, g_1, r_1 = cv2.split(image_1)
b_2, g_2, r_2 = cv2.split(image_2)
hist_b_1 = cv2.calcHist([b_1], [0], None, [256], [0.0, 255.0])
hist_g_1 = cv2.calcHist([g_1], [0], None, [256], [0.0, 255.0])
hist_r_1 = cv2.calcHist([r_1], [0], None, [256], [0.0, 255.0])
hist_b_2 = cv2.calcHist([b_2], [0], None, [256], [0.0, 255.0])
hist_g_2 = cv2.calcHist([g_2], [0], None, [256], [0.0, 255.0])
hist_r_2 = cv2.calcHist([r_2], [0], None, [256], [0.0, 255.0])
similarity_b = cv2.compareHist(hist_b_1,hist_b_2,cv2.cv.CV_COMP_CORREL)
similarity_g = cv2.compareHist(hist_g_1,hist_g_2,cv2.cv.CV_COMP_CORREL)
similarity_r = cv2.compareHist(hist_r_1,hist_r_2,cv2.cv.CV_COMP_CORREL)
sum_bgr = similarity_b + similarity_g + similarity_r
similarity = sum_bgr/3.
else:
gray_1 = cv2.cvtColor(image_1,cv2.cv.CV_RGB2GRAY)
gray_2 = cv2.cvtColor(image_2,cv2.cv.CV_RGB2GRAY)
hist_1 = cv2.calcHist([gray_1], [0], None, [256], [0.0, 255.0])
hist_2 = cv2.calcHist([gray_2], [0], None, [256], [0.0, 255.0])
similarity = cv2.compareHist(hist_1, hist_2, cv2.cv.CV_COMP_CORREL)
return similarity | 76358cff7b3a33f44fecefd289805a1fea88e1c4 | 3,633,378 |
def slotter_obj():
""" Return basic slotter object """
return Slotter() | f4b8805c8ca26bfc22da79b49a9c287c35428f86 | 3,633,379 |
def get_ncbi_enrichment_domains():
""" Find all domains matched to given node id, then return dictionary with all domains as
result. All domains should have matching indices e.g. regulon[1] should be data from
matching same node as uniprot[1].
"""
# TODO: Validate incoming data using webargs + Marshmallow
data = request.get_json()
node_ids = data.get('nodeIds')
tax_id = data.get('taxID')
domains = data.get('domains')
nodes = {}
if node_ids is not None and tax_id is not None:
kg = get_kg_service()
regulon = kg.get_regulon_genes(node_ids) if Domain.REGULON.value in domains else {}
biocyc = kg.get_biocyc_genes(node_ids, tax_id) if Domain.BIOCYC.value in domains else {}
go = kg.get_go_genes(node_ids) if Domain.GO.value in domains else {}
string = kg.get_string_genes(node_ids) if Domain.STRING.value in domains else {}
uniprot = kg.get_uniprot_genes(node_ids) if Domain.UNIPROT.value in domains else {}
kegg = kg.get_kegg_genes(node_ids) if Domain.KEGG.value in domains else {}
nodes = {
node_id: {
'regulon': regulon.get(node_id, None),
'uniprot': uniprot.get(node_id, None),
'string': string.get(node_id, None),
'go': go.get(node_id, None),
'biocyc': biocyc.get(node_id, None),
'kegg': kegg.get(node_id, None),
'node_id': node_id
} for node_id in node_ids}
return jsonify({'result': nodes}), 200 | 2a60168fe953b00aca1870deb1692e018d9297ac | 3,633,380 |
def gaussian(wavelength, w, sigma, amp=1., norm=True):
"""
Computes a gaussian for a given central wavelength, sigma and amp
.. math::
G = \\frac{A}{\sigma \sqrt{2 \pi}} \exp{\left( \\frac{ (w - w_0)^2 }{2 \sigma^2 } \\right) }
Args:
wavelength (np.ndarray): wavelength array to calculate spec on
w (float): central wavelength (same units as wavelength array)
sigma (float): sigma for gaussian (same units as w)
amp (float): amplitude of spectrum, default=1.0
norm (bool): if true, the gaussian will be normalized, default=True
to integrate to 1 over infinity then the amp factor will be multiplied
Returns:
np.ndarray: spectrum evaluated on wavelength array
"""
if norm:
norm = 1. / (sigma * np.sqrt(2. * np.pi))
else:
norm = 1.
exp = np.exp(-0.5 * (wavelength - w) ** 2 / sigma ** 2)
return amp * norm * exp | 7e757691fbe27641a4cfd983678dd0ccd2cbdfbd | 3,633,381 |
def summarize_samples(samples, run_parallel):
"""Back compatibility for existing pipelines. Should be replaced with summary when ready.
"""
return samples | 20c742e751f9ea1f783572f031fe144baf73293e | 3,633,382 |
def get_keywords(string):
"""Get keywords for a given string.
Args:
string (str): A string to get keywords for.
Returns:
(list): A list of keywords.
"""
keywords = string.lower().split(' ')
keywords = [x.strip() for x in keywords if x]
keywords = list(set(keywords))
return keywords | 8d4e0781701dc3574583baf417c573967638e86f | 3,633,383 |
def calc_mass_loading_factor(OIII_results, OIII_error, hbeta_results, hbeta_error, hbeta_no_outflow_results, hbeta_no_outflow_error, statistical_results, z, header):
"""
Calculates the mass loading factor
eta = M_out/SFR
Using the calc_sfr.calc_sfr_koffee and the calc_mass_outflow_rate functions
Parameters
----------
OIII_results : :obj:'~numpy.ndarray'
array of outflow results from KOFFEE for OIII line. Used to calculate
the outflow velocity. Should be (7, statistical_results.shape)
OIII_error : :obj:'~numpy.ndarray'
array of the outflow result errors from KOFFEE for OIII line
hbeta_results : :obj:'~numpy.ndarray'
array of outflow results from KOFFEE for Hbeta line. Used to calculate
the Sigma SFR. Should be (7, statistical_results.shape)
hbeta_error : :obj:'~numpy.ndarray'
array of the outflow result errors from KOFFEE for Hbeta line
hbeta_no_outflow_results : :obj:'~numpy.ndarray'
array of single gaussian results from KOFFEE for Hbeta line. Used to
calculate the Sigma SFR. Should be (4, statistical_results.shape)
hbeta_no_outflow_error : :obj:'~numpy.ndarray'
array of the single gaussian result errors from KOFFEE for Hbeta line
statistical_results : :obj:'~numpy.ndarray'
array of statistical results from KOFFEE.
z : float
redshift
header : FITS header object
the header from the fits file
Returns
-------
mlf_out : :obj:'~numpy.ndarray'
mass loading factor
mlf_max : :obj:'~numpy.ndarray'
maximum mass loading factor if R_min is 350pc
mlf_min : :obj:'~numpy.ndarray'
minimum mass loading factor if R_max is 2000pc
"""
#calculate the mass outflow rate (in g/s)
m_out, m_out_max, m_out_min = calc_mass_outflow_rate(OIII_results, OIII_error, hbeta_results, hbeta_error, statistical_results, z)
#calculate the SFR (I wrote this to give the answer without units...)
#(I should probably change that!)
sfr, sfr_err, total_sfr, sigma_sfr, sfr_surface_density_err = calc_sfr.calc_sfr_koffee(hbeta_results, hbeta_error, hbeta_no_outflow_results, hbeta_no_outflow_error, statistical_results, z, header, include_extinction=False, include_outflow=False)
#put the units back onto the sfr (M_sun/yr)
sfr = sfr * (u.solMass/u.yr)
#put the sfr into g/s
sfr = sfr.to(u.g/u.s)
#calculate mass loading factor
mlf = m_out/sfr
mlf_max = m_out_max/sfr
mlf_min = m_out_min/sfr
return mlf, mlf_max, mlf_min | 6504b328e749e98eb4b3612533701e562973d882 | 3,633,384 |
import os as _os
from ..io import json as _json
import _pickle
def _write_calccache(calc_cache, key_fn, val_fn, json_too=False, comm=None):
"""
Write `caclcache`, a dictionary of compact polys, to disk in two files,
one for the keys and one for the values.
This function can be called by multiple ranks and passed `comm` to
synchronize collecting and writing a single set of cache files.
Parameters
----------
calc_cache : dict
The cache of calculated (compact) polynomial to save to disk.
key_fn, val_fn : str
key and value filenames.
json_too : bool, optional
When true, the keys are also written in JSON format (to facilitate
python2 & 3 compatibility)
comm : mpi4py.MPI.comm
Communicator for synchronizing across multiple ranks (each with different
`calc_cache` args that need to be gathered.
Returns
-------
None
"""
keys = list(calc_cache.keys())
def conv_key(ky): # converts key to native python objects for faster serialization (but *same* hashing)
return (ky[0], ky[1].tonative(), ky[2].tonative(), tuple([x.tonative() for x in ky[3]]))
ckeys = [conv_key(x) for x in keys]
#Gather keys onto rank 0 processor if necessary
# (Note: gathering relies on .gather and .Gather using the *same* rank ordering)
if comm is not None:
ckeys_list = comm.gather(ckeys, root=0)
else:
ckeys_list = [ckeys]
if (comm is None) or (comm.Get_rank() == 0):
ckeys = list(_itertools.chain(*ckeys_list))
print("Writing cache of size = ", len(ckeys))
with _gzip.open(key_fn, 'wb') as f:
_pickle.dump(ckeys, f, protocol=_pickle.HIGHEST_PROTOCOL)
print("Wrote %s" % key_fn)
if json_too: # for Python 2 & 3 compatibility
key_fn_json = _os.path.splitext(key_fn)[0] + ".json"
with open(key_fn_json, 'w') as f:
_json.dump(ckeys, f)
print("Wrote %s" % key_fn_json)
if len(keys) > 0: # some procs might have 0 keys (e.g. the "scheduler")
values = [calc_cache[k] for k in keys]
vtape = []; ctape = []
for v in values:
vt, ct = v # .compact() # Now cache hold compact polys already
vtape.append(vt)
ctape.append(ct)
vtape = _np.concatenate(vtape)
ctape = _np.concatenate(ctape)
if comm is not None:
comm.allgather(vtape.dtype)
comm.allgather(ctape.dtype)
else:
#Need to create vtape and ctape of length 0 and *correct type*
if comm is not None:
vtape_types = comm.allgather(None)
ctape_types = comm.allgather(None)
else:
vtape_types = ctape_types = [] # will cause us to use default type below
for typ in vtape_types:
if typ is not None:
vtape = _np.zeros(0, typ); break
else:
vtape = _np.zeros(0, _np.int64) # default type = int64
for typ in ctape_types:
if typ is not None:
ctape = _np.zeros(0, typ); break
else:
ctape = _np.zeros(0, complex) # default type = complex
#Gather keys onto rank 0 processor if necessary
if comm is not None:
sizes = comm.gather(vtape.size, root=0)
recvbuf = (_np.empty(sum(sizes), vtape.dtype), sizes) \
if (comm.Get_rank() == 0) else None
comm.Gatherv(sendbuf=vtape, recvbuf=recvbuf, root=0)
if comm.Get_rank() == 0: vtape = recvbuf[0]
sizes = comm.gather(ctape.size, root=0)
recvbuf = (_np.empty(sum(sizes), ctape.dtype), sizes) \
if (comm.Get_rank() == 0) else None
comm.Gatherv(sendbuf=ctape, recvbuf=recvbuf, root=0)
if comm.Get_rank() == 0: ctape = recvbuf[0]
if comm is None or comm.Get_rank() == 0:
_np.savez_compressed(val_fn, vtape=vtape, ctape=ctape)
print("Wrote %s" % val_fn) | d18f913c61c025acb294bda6b48457f57319ff8f | 3,633,385 |
def truecircle(radius, rho):
"""Create a "true" circular mask with anti-aliasing.
Parameters
----------
samples : `int`, optional
number of samples in the square output array
radius : `float`, optional
radius of the shape in the square output array. radius=1 will fill the
rho : `numpy.ndarray`
radial coordinate, 2D
Returns
-------
`numpy.ndarray`
nonbinary ndarray representation of the mask
Notes
-----
Based on a more general algorithm by Jim Fienup
"""
if radius == 0:
return np.zeros_like(rho)
else:
samples = rho.shape[0]
one_pixel = 2 / samples
radius_plus = radius + (one_pixel / 2)
intermediate = (radius_plus - rho) * (samples / 2)
return np.minimum(np.maximum(intermediate, 0), 1) | e721d99d99b89ca24637e20d9577b2725cc29525 | 3,633,386 |
def distancia(ponto1, ponto2):
"""
Calcula a distância entre dois pontos
"""
xdif = ponto2.getx() - ponto1.getx()
ydif = ponto2.gety() - ponto1.gety()
dif = (xdif**2 + ydif**2)**0.5
return dif | 36a980a1081133fb6496585c25cca5782ceef06d | 3,633,387 |
import time
def foo(x, sleep_time):
"""Dummy function for the tests"""
time.sleep(sleep_time)
return [{"type": "objective", "name": "objective", "value": x}] | 3d55a0b0776acec0badd10e38be724afc3015c2f | 3,633,388 |
def aug_ims(ims, fliplr=0, flipud=0, T=0):
"""Augment images with flips and transposition."""
ims_aug = np.array(ims, copy=True)
for i in range(len(ims_aug)):
if fliplr: # flip left right
ims_aug[i] = np.fliplr(ims_aug[i])
if flipud: # flip up down
ims_aug[i] = np.flipud(ims_aug[i])
if T: # transpose
ims_aug[i,:,:,0] = ims_aug[i,:,:,0].T
return ims_aug | 59f8c44f0efcb70c17f828351b0f59787c3dd677 | 3,633,389 |
def bt_search(btree, key):
"""基于二叉树查询操作"""
bt = btree
while bt is not None:
entry = bt.data
if key < entry.key:
bt = bt.left
elif key > entry.key:
bt = bt.right
else:
return entry.values
return None | 1b358087c10a4d0d6fe79b023340fafeafb81914 | 3,633,390 |
def make_reply(msgname, types, arguments, major):
"""Helper method for constructing a reply message from a list or tuple
Parameters
----------
msgname : str
Name of the reply message.
types : list of kattypes
The types of the reply message parameters (in order).
arguments : list of objects
The (unpacked) reply message parameters.
major : integer
Major version of KATCP to use when packing types
"""
status = arguments[0]
if status == "fail":
return Message.reply(
msgname, *pack_types((Str(), Str()), arguments, major))
if status == "ok":
return Message.reply(
msgname, *pack_types((Str(),) + types, arguments, major))
raise ValueError("First returned value must be 'ok' or 'fail'.") | 9c55089e1d6fe6b5a4345f444f2551a2c493f2e3 | 3,633,391 |
def empty_coord():
"""Return an empty coordinate tensor representing 1 residue-level pad character."""
coord_padding = np.zeros((NUM_COORDS_PER_RES, 3))
coord_padding[:] = GLOBAL_PAD_CHAR
return coord_padding | e4d8c4f24ebed354f5b083a4fc072e354c79a149 | 3,633,392 |
def grib_clone(msgid_src):
"""
@brief Create a copy of a message.
Create a copy of a given message (\em msgid_src) resulting in a new
message in memory (\em msgid_dest) identical to the original one.
\b Examples: \ref grib_clone.py "grib_clone.py"
@param msgid_src id of message to be cloned
@return id of clone
@exception GribInternalError
"""
err, newmsgid_src = _internal.grib_c_clone(msgid_src, 0)
GRIB_CHECK(err)
return newmsgid_src | c01b3f626d11be8d218fdcd598e472c5fe748272 | 3,633,393 |
def layernorm(x, epsilon=1e-5, name='lnconv'):
"""Layer Normalization for conv. x must be [NCHW]"""
shape = x.get_shape().as_list()
with tf.variable_scope(name):
beta = tf.get_variable("beta", [1, shape[1], 1, 1], initializer=tf.constant_initializer(0.))
gamma = tf.get_variable("gamma", [1, shape[1], 1, 1], initializer=tf.constant_initializer(1.))
mean, var = tf.nn.moments(x, range(1, len(shape)), keep_dims=True)
return beta * (x - mean) / tf.sqrt(var + epsilon) + gamma | 9ae3bb3f6e0238de92f167bf45b05e3095541665 | 3,633,394 |
def create_graph_from_edges(edges):
"""
Create a graph from the `edges`
"""
G = nx.Graph()
for e in edges:
p1 = e[0]
p2 = e[1]
dist = LA.norm(np.array(p2) - np.array(p1))
G.add_edge(p1, p2, weight=dist)
return G | ac06c424fcfde720fbb4457c0baced0a0a41567d | 3,633,395 |
def far_field(frequency, radius, current, r, theta):
"""
Calculate the electric and magnetic far fields for a small circular loop.
:param r: The range to the field point (m).
:param theta: The angle to the field point (rad).
:param frequency: The operating frequency (Hz).
:param radius: The radius of the small circular loop (m).
:param current: The current on the small circular loop (A).
:return: The electric and magnetic far fields (V/m) & (A/m).
"""
# Calculate the wavenumber
k = 2.0 * pi * frequency / c
# Calculate the wave impedance
eta = sqrt(mu_0 / epsilon_0)
# Define the radial-component of the electric far field (V/m)
e_r = 0.0
# Define the theta-component of the electric far field (V/m)
e_theta = 0.0
# Define the phi-component of the electric far field (V/m)
e_phi = exp(-1j * k * r) * (eta * k * radius * current) / (2.0 * r) * j1(k * radius * sin(theta))
# Define the r-component of the magnetic far field (A/m)
h_r = (1j * k * radius**2 * current / (2.0 * r**2) * cos(theta)/(1j * k * r) + 1.0) * exp(-1j * k * r)
# Define the theta-component of the magnetic far field (A/m)
h_theta = -exp(-1j * k * r) * current * k * radius / (2.0 * r) * j1(k * radius * sin(theta))
# Define the phi-component of the magnetic far field (A/m)
h_phi = 0.0
# Return all six components of the far field
return e_r, e_theta, e_phi, h_r, h_theta, h_phi | 29940432e3e4dbc427398a18e1d026d9e4c205c3 | 3,633,396 |
def rng_laplace(lambd=1, trunc=None):
"""
Generate random numbers from a Laplace distribution
Parameters
----------
lambd: float
The scale of the distribution
trunc: None, tuple
Specifies whether the distribution is truncated. If it's
not None then it must be a 2-tuple indicating the range where
the distribution is defined.
Returns
-------
float
A Laplace distributed number, optionally in a
specific range if the distribution is truncated.
"""
while True:
while True:
u = 2 * _rnd.random() - 1
if u != 0:
break
s = -1 if u < 0 else 1
v = -s * lambd * _math.log(s * u)
if not trunc or trunc[0] <= v <= trunc[1]:
break
return v | d4ad9c23c5edef20babe59671e0464ef70ed74ae | 3,633,397 |
from typing import Sequence
from typing import Dict
from typing import List
def settings_to_connections(
settings: amicus.options.Configuration,
suffixes: Sequence[str]) -> Dict[str, List[str]]:
"""[summary]
Args:
settings (amicus.options.Configuration): [description]
suffixes (Sequence[str]): [description]
Returns:
Dict[str, List[str]]: [description]
"""
connections = {}
for name, section in settings.items():
component_keys = [k for k in section.keys() if k.endswith(suffixes)]
for key in component_keys:
prefix, suffix = amicus.tools.divide_string(key)
values = amicus.tools.listify(section[key])
if prefix == suffix:
if name in connections:
connections[name].extend(values)
else:
connections[name] = values
else:
if prefix in connections:
connections[prefix].extend(values)
else:
connections[prefix] = values
return connections | e2ec469014d5d26848d011feda717191ae8c452b | 3,633,398 |
def radec_from_pointing_object(pointing,
# default output in degrees
as_radians=False,
as_string=False):
"""Astropy object to ICRS format as strings"""
pnt_radec = pointing.transform_to(ICRS())
if as_string:
ra_hms, dec_dms = radec_to_string(pnt_radec.ra,
pnt_radec.dec)
return ra_hms, dec_dms
elif as_radians:
return pnt_radec.ra.rad, pnt_radec.dec.rad
else:
return pnt_radec.ra.deg, pnt_radec.dec.deg | 8cdd5c6671ccfd12624df88dfdfce0f6e0f41856 | 3,633,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.