content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def applyAlign(mrt,al):
"""
Takes meaning representation triples (mrt) and combines with alignments
"""
for alignment in al.split():
# Alignment: x9:arg0:sell:x11-39
fromNode,rest = alignment.split(":",1)
rest,toAlign = rest.rsplit("-",1)
edgeLabel,toNode = rest.rsplit(":",1)
hasAligned = False
for i in xrange(len(mrt)):
if mrt[i][0] == fromNode and mrt[i][2] == toNode and mrt[i][1][0] == edgeLabel:
mrt[i][1].align.add(toAlign)
hasAligned = True
if not hasAligned:
print "Alignment failure in sentence #%d: (%s,%s,%s,%s)"%(i,fromNode,edgeLabel,toNode,toAlign)
return mrt | 098f7a42e2661938138c703d9e0c337816c1dcd4 | 3,631,900 |
def get_unverified_jwt_claims(encoded_token):
"""
Returns the Headers of an encoded JWT without verifying the actual signature of JWT.
Note: The signature is not verified so the header parameters
should not be fully trusted until signature verification is complete
:param encoded_token: The encoded JWT to get the Header from.
:return: JWT header parameters as python dict()
"""
return jwt.get_unverified_claims(encoded_token) | b041ab4579c6907c229bf3dd590e8ea559de24c5 | 3,631,901 |
def dos_orbitals(
folder,
orbitals,
output='dos_orbitals.png',
fill=True,
alpha=0.3,
linewidth=1.5,
sigma=0.05,
energyaxis='x',
color_list=None,
legend=True,
total=True,
figsize=(4, 3),
erange=[-6, 6],
spin='up',
soc_axis=None,
combination_method='add',
fontsize=12,
save=True,
shift_efermi=0,
):
"""
This function plots the orbital projected density of states.
Parameters:
folder (str): This is the folder that contains the VASP files
orbitals (list): List of orbitals to compare
| 0 = s
| 1 = py
| 2 = pz
| 3 = px
| 4 = dxy
| 5 = dyz
| 6 = dz2
| 7 = dxz
| 8 = dx2-y2
| 9 = fy3x2
| 10 = fxyz
| 11 = fyz2
| 12 = fz3
| 13 = fxz2
| 14 = fzx3
| 15 = fx3
output (str): File name of the resulting plot.
fill (bool): Determines wether or not to fill underneath the plot
alpha (float): Alpha value for the fill
linewidth (float): Linewidth of lines
sigma (float): Standard deviation for gaussian filter
energyaxis (str): Determines the axis to plot the energy on ('x' or 'y')
color_list (list): List of colors that is the same length as the number of projections
in the plot.
legend (bool): Determines whether to draw the legend or not
total (bool): Determines wheth to draw the total density of states or not
spin (str): Which spin direction to parse ('up' or 'down')
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list): Energy range for the DOS plot ([lower bound, upper bound])
combination_method (str): If spin == 'both', this determines if the spin up and spin down
desnities are added or subtracted. ('add' or 'sub')
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
dos = Dos(shift_efermi=shift_efermi, folder=folder, spin=spin, combination_method=combination_method)
fig = plt.figure(figsize=figsize, dpi=400)
ax = fig.add_subplot(111)
_figure_setup_dos(ax=ax, fontsize=fontsize, energyaxis=energyaxis)
dos.plot_orbitals(
ax=ax,
orbitals=orbitals,
fill=fill,
alpha=alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis=energyaxis,
color_list=color_list,
legend=legend,
total=total,
erange=erange,
)
plt.tight_layout(pad=0.4)
if save:
plt.savefig(output)
else:
return fig, ax | 85fa9e17eaaf62e801e6439b135ff1c1ed86150d | 3,631,902 |
from .ginzburg_landau import GinzburgLandau2Components
from .flory_huggins import FloryHuggins2Components
from .general import FreeEnergy
from typing import Union
def get_free_energy_single(
free_energy: Union[str, FreeEnergyBase] = "ginzburg-landau"
) -> FreeEnergyBase:
"""get free energy for systems with a single effective component
Args:
free_energy (str or FreeEnergyBase):
Defines the expression to used for the local part of the free energy
density. This can either be string for common choices of free
energies ('ginzburg-landau' or 'flory-huggins'), or an instance of
:class:`~phasesep.free_energies.base.FreeEnergyBase`,
which provides methods for evaluating the local free energy density
and chemical potentials.
Returns:
FreeEnergyBase: An instance of
:class:`~phasesep.free_energies.base.FreeEnergyBase` that
represents the free energy
"""
if free_energy == "ginzburg-landau":
f_local: FreeEnergyBase = GinzburgLandau2Components()
elif free_energy == "flory-huggins":
f_local = FloryHuggins2Components()
elif isinstance(free_energy, str):
raise ValueError(f"Free energy `{free_energy}` is not defined")
else:
f_local = free_energy
# check some properties for consistency
if f_local.dim != 1:
raise ValueError(f"Too many components ({f_local.dim})")
if isinstance(f_local, FreeEnergy) and not f_local.squeeze_dims:
raise ValueError(
"Free energy for single component must have `squeeze_dims=True`."
)
return f_local | d40d29543a943eb5fc7a9122627d1635aaa8fe43 | 3,631,903 |
import sys
import termios
import tty
def unbuffered_input():
"""Read a single character from stdin, without waiting for the enter key."""
# Adapted from http://code.activestate.com/recipes/134892/
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setcbreak(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
# Echo the user input (to emulate what happens with buffered input)
print(ch)
return ch | 40914c5b5582b7d23f6bde6db1ca242852239cf1 | 3,631,904 |
def convert_bin_to_text(bin_str: str) -> str:
"""Convert a string of binary to text.
Parameters:
-----------
bin_str:
string: A string of binary, terminating with 00000000.
Returns:
--------
text:
string: A plaintext representation of the binary string.
"""
# get number of characters, less one for the terminating 00000000 bit.
num_chars = int(len(bin_str)/8) - 1
print(bin_str)
text = ""
for i in range(num_chars):
ascii_val = int(bin_str[i*8:(i+1)*8:], 2)
text += chr(ascii_val)
return text | 8890ff192ae4b6e01401dd7f018bf8906c3c37ce | 3,631,905 |
import select
def _retrieve_transaction_type(t_type: str, connection) -> RowProxy:
""" Retrieves Transaction Type
Args:
ttype (str): The transaction type that represents the trasaction being recorded 'archive' or 'compress'.
Returns:
RowProxy: The transaction_type
"""
transaction = select([transaction_type.c.type_id]).where(transaction_type.c.type == t_type)
rp = connection.execute(transaction)
record = rp.first()
return record | f6b883b7e524f3dff5757f46de11e6fb10af66f2 | 3,631,906 |
def myDijkstra(graph, source, start, end):
"""
Implements Dijkstra's single source shortest path algorithm
for a directed graph
Parameters:
graph: the graph we are working on
source (int): the vertex choose as source
start (string): beginning date in format "MM/YYYY"
end (string): ending date in format "MM/YYYY"
Returns:
prev (dict): a dictionary with vertex as key and last previous vertex in the path from the source as value
-1 if it is not reachable
dist (dict): a dictionary with vertex as key and the total distance from the source as value
"""
# date from string to int
start = convertDate(start)
end = convertDate(end)
visited = set()
unvisited = set(graph.keys())
dist = dict()
prev = dict()
# set all the initial values:
# - infinity to distances
# - -1 to previous node
for u in unvisited:
dist[u] = float('inf')
prev[u] = -1
dist[source] = 0
# mark source as visited
visited.add(source)
# iterate until:
# - there is something to visit
# or
# - all the neighbors of the current node are not marked as visited
while len(unvisited) > 0 or not set(neighbor.keys()).issubset(visited):
# choose the correct node
current_node = getMinUnvisited(unvisited, dist)
unvisited.remove(current_node)
visited.add(current_node)
neighbor = getNeighbors(current_node,graph, start, end)
for u in unvisited.intersection(set(neighbor.keys())):
# updating the cost if necessary
new_dist = dist[current_node] + neighbor[u]
if new_dist < dist[u]:
dist[u] = new_dist
prev[u] = current_node
return prev, dist | 3296b510dafe4e3b08550ae771e137f7139f4eea | 3,631,907 |
def scale_on_x_list(x_list, scaler):
"""Scale list of ndarray.
"""
return [scaler.transform(e) for e in x_list] | 2fbe36cb23e99ca6eaf277fb5509e2e997ec4a52 | 3,631,908 |
from .model_store import get_model_file
import os
def get_centernet(backbone,
backbone_out_channels,
classes,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create CenterNet model with specific parameters.
Parameters
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
classes : int
Number of classes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
A network.
"""
channels = [256, 128, 64]
net = CenterNet(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net | 3599aea3bb88eebf4c91c4d2bc3c3d1f30b713ce | 3,631,909 |
def calc_distance(origin, destination):
"""
title::
calc_distance
description::
Great-circle distance between two points on a sphere from their longitudes
and latitudes.
author::
Stackoverflow User: user2514381
https://stackoverflow.com/questions/17273120/distance-calculation-in-python-for-google-earth-coordinates
"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km. earth
dlat = radians(lat2-lat1)
dlon = radians(lon2-lon1)
a = (sin(dlat/2) * sin(dlat/2) + cos(radians(lat1)) * cos(radians(lat2)) *
sin(dlon/2) * sin(dlon/2))
c = 2 * atan2(sqrt(a), sqrt(1-a))
d = radius * c
return d | 02f9e63970e9e2f561cea095c045e08618e5e444 | 3,631,910 |
import tokenizers
def tokenize(string,tokenizer = tokenizers.keras):
"""
Tokenizes a string using the selected tokenizer.
:param string: the string to tokenize
:param tokenizer: which tokenizer to use (nltk or keras)
:return: the list of tokens
"""
if tokenizer == tokenizers.nltk:
return nltk.word_tokenize(string.lower())
elif tokenizer == tokenizers.keras:
return keras.preprocessing.text.text_to_word_sequence(string)
else:
raise NotImplementedError() | 8d158f3bb97356724a1f1438bc8e5d91314af367 | 3,631,911 |
import math
def move_point(pt: XY, distance: float, degrees: float) -> XY:
"""
Create a new point that is the original point moved by distance (m) in direction degrees.
"""
x = pt.x + distance * math.cos(math.radians(degrees))
y = pt.y + distance * math.sin(math.radians(degrees))
return XY(x, y) | 51fa927ca06525b91985af652ed7579ed72903a9 | 3,631,912 |
import jinja2
from datetime import datetime
def _generate_follow_up(the_date, vms, template='delete_followup.html'):
"""Create the HTML email body stating what VMs were randomly deleted.
:Returns: String
:param the_date: The specific time when vLab randomly deleted a user's VM(s).
:type the_date: Integer
:param vms: The names of the VM(s) deleted.
:type vms: List
"""
with open(_get_template_abs(template)) as the_file:
template_data = the_file.read()
message = jinja2.Template(template_data).render(the_date=datetime.fromtimestamp(the_date, timezone.utc),
vms=vms)
return message | 8a72a21dd653c86fc4e6fb4bd5246b84b88300cd | 3,631,913 |
def find_empty_node(grid):
"""There should be one and only one empty node. Find it
and return its location as a tuple."""
for x in range(len(grid)):
row = grid[x]
for y in range(len(row)):
if row[y][USED] == 0:
return (x, y)
# else:
# print x, y, row[y][USED]
raise Exception('No empty node found') | 778b4424f4bcb45db093a40e879628b13f3d9a4f | 3,631,914 |
import hashlib
def md5(ori_str):
""" MD5加密算法
:param ori_str: 原始字符串
:return: 加密后的字符串
"""
md5_obj = hashlib.md5()
md5_obj.update(ori_str.encode("utf8"))
return md5_obj.hexdigest() | 75efc3226c2f0355ce4b988acd6dcd1a95ea8294 | 3,631,915 |
import getpass
def getuser() -> str:
"""
Get the username of the current user.
Will leverage the ``getpass`` package.
Returns:
str: The username of the current user
"""
return getpass.getuser() | 3f6053e9aba37f7eafcd7735d7509af290fd3940 | 3,631,916 |
import requests
def order_depth(type_id: int, region_id: int = 10000002, system_id: int = None, order_type: str = 'sell'):
"""
Pulls the orders for a specified typeid in a region
Args:
type_id: typeid to pull the market orders for
region_id: the region the orders should be pulled from. the default region_id is the Forge
system_id: (optional) limits the orders to the systemID they were placed in
order_type: specifies what orders should be returned ('sell, 'buy' or 'all')
Returns:
DataFrame: Returns a dataframe with the market orders
NoneType: Returns None if the response is empty or the response code is anything other than 200
"""
if order_type not in ['sell', 'buy', 'all']:
raise ValueError("You didn't specify a valid order type ('sell','buy','all')")
if type_id not in invTypes['typeID'].values:
raise ValueError(f"{type_id} is not a valid type_id")
if region_id not in mapRegions['regionID'].values:
raise ValueError(f"{region_id} is not a valid region_id")
if system_id is not None and system_id not in mapSolarSystems['solarSystemID'].values:
raise ValueError(f"{system_id} is not a valid system_id")
r = requests.get(
f"https://esi.evetech.net/latest/markets/{region_id}/orders/?datasource=tranquility&order_type={order_type}&page=1&type_id={type_id}")
orders = pd.DataFrame(r.json())
if r.status_code != 200:
raise ValueError(f"request returned status code {r.status_code}")
if orders.empty:
return None
pages = int(r.headers['X-Pages'])
if pages > 1:
for i in range(2, pages + 1):
r = requests.get(
f"https://esi.evetech.net/latest/markets/{region_id}/orders/?datasource=tranquility&order_type={order_type}&page={i}&type_id={type_id}").json()
orders = orders.append(r, ignore_index=True)
if system_id is not None:
orders = orders.loc[orders['system_id'] == system_id]
if order_type == 'sell':
return orders.sort_values('price', ascending=True).reset_index(drop=True)
if order_type == 'buy':
return orders.sort_values('price', ascending=False).reset_index(drop=True)
return orders | da369f5732e642bf352be85541f52dd1531d0512 | 3,631,917 |
import ctypes
def get_normal_amps():
"""This parameter will deliver the normal ampere rating for the active PDElement."""
return dsslib.CktElementF(ctypes.c_int32(0), ctypes.c_double(0)) | 16a97d32658a5e6e19d99b952724116d5c729857 | 3,631,918 |
def cal_pipe_equivalent_length(tot_bui_height_m, panel_prop, total_area_module):
"""
To calculate the equivalent length of pipings in buildings
:param tot_bui_height_m: total heights of buildings
:type tot_bui_height_m: float
:param panel_prop: properties of the solar panels
:type panel_prop: dict
:param total_area_module: total installed module area
:type total_area_module: float
:return: equivalent lengths of pipings in buildings
:rtype: dict
"""
# local variables
lv = panel_prop['module_length_m'] # module length
total_area_aperture = total_area_module * panel_prop['aperture_area_ratio']
number_modules = round(total_area_module / panel_prop['module_area_m2']) # this is an estimation
# main calculation
l_ext_mperm2 = (2 * lv * number_modules / total_area_aperture) # pipe length within the collectors
l_int_mperm2 = 2 * tot_bui_height_m / total_area_aperture # pipe length from building substation to roof top collectors
Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture
pipe_equivalent_lengths = {'Leq_mperm2': Leq_mperm2, 'l_ext_mperm2': l_ext_mperm2, 'l_int_mperm2': l_int_mperm2}
return pipe_equivalent_lengths | 60c95cc1c5a38876095a77f4e68ab3b0df6280a3 | 3,631,919 |
def embedding_to_padding(maxlen, sequence_length):
""" Calculates the padding mask based on `sequence_length`.
Args:
maxlen: The maximum sequence length.
sequence_length: Length of each sequence in `emb`,
a Tensor with shape [batch_size, ]
Returns: A float Tensor with shape [batch_size, maximum_sequence_length],
where 1.0 for padding and 0.0 for non-padding.
"""
seq_mask = 1. - tf.sequence_mask(
lengths=tf.to_int32(sequence_length),
maxlen=tf.to_int32(maxlen),
dtype=tf.float32) # 1.0 for padding
return seq_mask | fdd0660e7e9edbaa6523ac266dcd44873b8efccf | 3,631,920 |
from typing import List
import tqdm
import csv
def csv_fat_cross_time(arrival_enum: ArrivalEnum,
list_number_servers: List[int],
perform_param: PerformParameter, opt_method: OptMethod,
mc_dist: MonteCarloDist, target_util: float) -> dict:
"""Chooses parameters by Monte Carlo type random choice."""
total_iterations = 10**5
time_ratio = {"Number_of_servers": "Ratio"}
for number_servers in list_number_servers:
print(f"number of servers = {number_servers}")
# 1 Parameter for service
param_array = mc_enum_to_dist(arrival_enum=arrival_enum,
mc_dist=mc_dist,
number_flows=number_servers,
number_servers=number_servers,
total_iterations=total_iterations)
time_array = np.empty([total_iterations, 2])
for i in tqdm(range(total_iterations)):
if arrival_enum == ArrivalEnum.DM1:
arrive_list = [
DM1(lamb=param_array[i, j]) for j in range(number_servers)
]
elif arrival_enum == ArrivalEnum.MMOOFluid:
arrive_list = [
MMOOCont(mu=param_array[i, j],
lamb=param_array[i, number_servers + j],
peak_rate=param_array[i, 2 * number_servers + j])
for j in range(number_servers)
]
else:
raise NotImplementedError(f"Arrival parameter "
f"{arrival_enum.name} is infeasible")
service_list = [
ConstantRateServer(
rate=param_array[i,
arrival_enum.number_parameters() *
number_servers + j])
for j in range(number_servers)
]
fat_cross_setting = FatCrossPerform(arr_list=arrive_list,
ser_list=service_list,
perform_param=perform_param)
computation_necessary = True
# print(res_array[i, ])
if target_util > 0.0:
util = fat_cross_setting.approximate_utilization()
if util < target_util or util > 1:
time_array[i, ] = np.nan
computation_necessary = False
if computation_necessary:
# time_standard, time_lyapunov = compare_time()
time_array[i, 0], time_array[i, 1] = compare_time(
setting=fat_cross_setting,
opt_method=opt_method,
number_l=number_servers - 1)
print(
time_array_to_results(arrival_enum=arrival_enum,
time_array=time_array,
number_servers=number_servers,
time_ratio=time_ratio))
filename = (f"time_{perform_param.to_name()}_{arrival_enum.name}"
f"_{opt_method.name}.csv")
with open(filename, 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in time_ratio.items():
writer.writerow([key, value])
return time_ratio | 5929ab69781ef979255f79f01d840fca336f19cd | 3,631,921 |
def service_status() -> Response:
"""
Service status endpoint.
Returns ``200 OK`` if the service is up and ready to handle requests.
"""
data, code, headers = controllers.service_status(request.params)
response: Response = jsonify(data)
response.status_code = code
response.headers.extend(headers) # type: ignore
return response | dbdb33253cc2a74d4c02a91e711eba7731be60de | 3,631,922 |
import os
def handler500(request, exception=None, template_name='templates/500.html'):
"""500 Error Page Controller"""
controller = Controller()
helpers = Helpers()
logger = helpers.get_logger(__name__)
if exception is not None:
logger.error("Server Error: %(exception)s" % {
"exception": exception
})
template_name = 'templates/500.html'
controller.autoload_options()
controller.context_push({
"page_title": _("500 · %s") % controller.context_get("app_name", os.getenv("APP_NAME", "Silverback"))
})
return render(request, template_name, controller.context_get(), status=500) | 03d1ad0a6aabc9fa117fc37047144ff4b9561305 | 3,631,923 |
def gromov_wasserstein2(C1, C2, p, q, loss_fun, epsilon,
max_iter=1000, tol=1e-9, verbose=False, log=False):
"""
Returns the gromov-wasserstein discrepancy between the two measured similarity matrices
(C1,p) and (C2,q)
The function solves the following optimization problem:
.. math::
\GW_Dist = \min_T \sum_{i,j,k,l} L(C1_{i,k},C2_{j,l})*T_{i,j}*T_{k,l}-\epsilon(H(T))
Where :
C1 : Metric cost matrix in the source space
C2 : Metric cost matrix in the target space
p : distribution in the source space
q : distribution in the target space
L : loss function to account for the misfit between the similarity matrices
H : entropy
Parameters
----------
C1 : ndarray, shape (ns, ns)
Metric cost matrix in the source space
C2 : ndarray, shape (nt, nt)
Metric costfr matrix in the target space
p : ndarray, shape (ns,)
distribution in the source space
q : ndarray, shape (nt,)
distribution in the target space
loss_fun : string
loss function used for the solver either 'square_loss' or 'kl_loss'
epsilon : float
Regularization term >0
max_iter : int, optional
Max number of iterations
tol : float, optional
Stop threshold on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gw_dist : float
Gromov-Wasserstein distance
"""
if log:
gw, logv = gromov_wasserstein(
C1, C2, p, q, loss_fun, epsilon, max_iter, tol, verbose, log)
else:
gw = gromov_wasserstein(C1, C2, p, q, loss_fun,
epsilon, max_iter, tol, verbose, log)
if loss_fun == 'square_loss':
gw_dist = np.sum(gw * tensor_square_loss(C1, C2, gw))
elif loss_fun == 'kl_loss':
gw_dist = np.sum(gw * tensor_kl_loss(C1, C2, gw))
if log:
return gw_dist, logv
else:
return gw_dist | 5a30cc1ea70bfc6c0310d791f6bb61d37df59b83 | 3,631,924 |
import copy
def parseOptions(json_options):
"""Parse the raw son options.
Parses the parameter values into ranges and adds missing information
that can be inferred from other values.
Returns parsed options as dictionary"""
parsed = dict(json_options)
if "algorithms" in json_options:
algorithms = copy.deepcopy(json_options["algorithms"])
else:
algorithms = list()
pruned_alg = list()
for alg in algorithms:
if "ignore" in alg and alg["ignore"] == "true":
continue
else:
pruned_alg.append(alg)
algorithms = pruned_alg
for alg in algorithms:
# check for positional/named arguments
if "parameters_type" in alg:
checkParameterTypes(alg["parameters_location"], alg["parameters_type"], extractName(alg["executable"]))
else:
pt = checkParameterTypes(alg["parameters_location"], alg=extractName(alg["executable"]))
alg["parameters_type"] = pt
option = dict()
for k in alg["parameters_location"]:
if k in alg["parameters_values"]:
option[k] = parseParameterValues( alg["parameters_values"][k])
elif "common_parameter_values" in json_options and k in json_options["common_parameter_values"]:
option[k] = parseParameterValues( json_options["common_parameter_values"][k])
else:
raise ConfigException("Key '{}' not found in \"parameters_values\" or \"common_parameters_values\"".format(k))
alg["parameters_values"] = option
parsed["algorithms"] = algorithms
return parsed | d254c1fd36245119bca67f19935e4a76dcb8e592 | 3,631,925 |
def numeric_type(num):
""" Verify that a value is given as a numeric data type.
Return the number if the type is sensible or raise ValueError
if not.
"""
if num is None:
num = 0
elif not (isinstance(num, int) or \
isinstance(num, long) or \
isinstance(num, float)):
raise ValueError("value (%s) must be either a number or None" % num)
return num | 3ef13db9477c0278e69bb7c2293083e00d01d48a | 3,631,926 |
from typing import Union
async def load(payload: None, context: EventContext, *,
item_id: str, update_status: bool = False) -> Union[Something, SomethingNotFound]:
"""
Loads json file from redis as `Something` instance
:param payload: unused
:param context: EventContext
:param item_id: str, item id to load
:return: Loaded `Something` object or None if not found or validation fails
"""
assert redis
my_id = item_id + str(randrange(0, 1999))
logger.info(context, "load", extra=extra(something_id=my_id))
something = await redis.get(key=my_id, datatype=Something)
if something is None:
logger.warning(context, "item not found", extra=extra(something_id=my_id))
return SomethingNotFound(str('redis'), my_id)
return something | c56fcd15e9c7151c2b58c34c4cf32a36c9488a3a | 3,631,927 |
def format_parameters(section):
"""Format the "Parameters" section."""
def format_item(item):
item = map(lambda x: x.strip(), item)
return ' - **{0}**: *{1}*\n {2}'.format(*item)
return '**Parameters**\n\n{0}'.format('\n\n'.join(
map(format_item, section))) | 8f1393b843b6ea46d69d5644f932f7f0e62160ab | 3,631,928 |
def get_kinds(cell, mf, kpts, tol=1e-6):
"""Given a list of kpts, return inds such that mf.kpts[inds] is a list of kpts equivalent to the input list"""
kdiffs = mf.kpts[np.newaxis] - kpts[:, np.newaxis]
frac_kdiffs = np.dot(kdiffs, cell.lattice_vectors().T) / (2 * np.pi)
kdiffs = np.mod(frac_kdiffs + 0.5, 1) - 0.5
return np.nonzero(np.linalg.norm(kdiffs, axis=-1) < tol)[1] | f187a01eef1349db1fb47582d409070f7362ecc5 | 3,631,929 |
from typing import List
def restore_checkpoints(
models: List[tf.keras.Model], ckpt_dir: str) -> tf.keras.Model:
"""Restores weights from the checkpoint."""
attr_names = list(ATTRIBUTES.keys())
for i in range(2):
attr_name = attr_names[i]
print("Restoring weights for attribute %s" % attr_name)
ckpt_path = opj(ckpt_dir, "%s.ckpt" % attr_name)
try:
models[i].load_weights(ckpt_path).expect_partial()
except:
print("Could not restore weights from %s" % ckpt_path)
return models | 25d7e9c5e6fbb27119e3be4888d9f7c3c5ec2391 | 3,631,930 |
def get_schema_piece(content_piece, uniprot_to_dcid):
"""Generate each
Args:
content_piece example:
AAC ABCD_AU181
BIT Nanobody
AID anti-SARS-CoV-2 Nb
TTY Protein
TGP UniProt:P0DTC2
TDE S, Spike protein, Spike glycoprotein
TPE Receptor-binding domain (RBD)
AAP ELISA, Flow cytometry, Immunofluorescence, Immunohistochemistry,
Immunoprecipitation, Surface plasmon resonance, Western blot, X-ray crystallography
ADR PDB:6ZCZ
ARX DOI:10.1101/2020.06.12.148387
//
Returns:
a data mcf
"""
lines = content_piece.split('\n')
# code and the meaning map:
# {'AAC': 'Accession',
# 'BIT': 'Antibody type',
# 'AID': 'Identifier',
# 'ASY': 'Synonyms',
# 'TTY': 'Target type',
# 'TGP': 'Target Protein',
# 'TGC': 'Target Chemical',
# 'TGO': 'Target Others',
# 'TDE': 'Target description',
# 'TPE': 'Epitope region',
# 'AAP': 'Applications',
# 'ACC': 'General comments',
# 'DEP': 'Deposited by',
# 'ADR': 'Cross-references',
# 'ARX': 'References ID'}
info = {}
# line example:
# AAC ABCD_AU181
for line in lines:
# code is the first 3 chars
# content and code are separated by two spaces
code = line[:3]
content = line[5:]
info[code] = content
antigen_type, recognizesAntigen = get_antigen_type(info, uniprot_to_dcid)
if not antigen_type:
return None
# if there is antibody type information
if 'BIT' in info:
antibody_type_schema = get_antibody_type(info['BIT'])
else:
antibody_type_schema = ''
name = get_class_name(info['AID'])
reference_mcf = get_reference_mcf(info)
# create the mcf for the antibody
mcf_list = []
mcf_list.append('Node: dcid:bio/' + name)
mcf_list.append('typeOf: dcs:Antibody')
mcf_list.append('name: "' + name + '"')
mcf_list.append('alternateName: ' + info['AID'])
if antibody_type_schema:
mcf_list.append(antibody_type_schema)
mcf_list.append('antigenType: ' + antigen_type)
mcf_list.append('recognizesAntigen: ' + recognizesAntigen)
mcf_list.append('abcdID: "' + info['AAC'] + '"')
# if information map has application attribute
if 'AAP' in info:
mcf_list.append('antibodyApplication: ' + info['AAP'])
mcf_list.append(reference_mcf)
# create the mcf for antigen
mcf_list.append('Node: dcid:bio/antigen_' + name)
mcf_list.append('typeOf: dcs:Antigen')
mcf_list.append('subClassOf: dcs:bio/' + name)
mcf_list.append('name: "antigen_' + name + '"')
mcf_list.append('antigenType: ' + antigen_type)
# if there is epitope info
if 'TPE' in info:
mcf_list.append('epitope: "' + info['TPE'] + '"')
return '\n'.join(mcf_list) + '\n' | ad7e3a8f47624602057c1ffac6e772b721722488 | 3,631,931 |
import copy
def editViewData(uniqueValue):
"""Edit the source service data for the current unique value"""
# Create a copy from the source service data
uniqueValueData = copy.deepcopy(_sourceServiceData)
# Change service data to use current unique value information (assumes a string value )
uniqueValueData["layers"][0]["layerDefinition"]["definitionExpression"] = f"{_uniqueValueField} = '{uniqueValue}'"
return uniqueValueData | 5a67a269e94d6906f7e1bbab757be75a875cbbd9 | 3,631,932 |
import sys as _sys
def _add_dll_dir():
"""
On windows for Python 3.8 or later, we have to add the bin directory to the search path for DLLs
Because python will no longer use the PATH environment variable to find dlls.
We assume here that this file is in $(RELEASE_DIR)\lib\python\htcondor and that the
bin directory is relative to it at $(RELEASE_DIR)\bin
"""
if _platform.system() in ["Windows"]:
if _sys.version_info >= (3,8):
bin_path = _path.realpath(_path.join(__file__,r'..\..\..\..\bin'))
return _os.add_dll_directory(bin_path)
return memoryview(b'') | 0e3eb74d93c7bfd49f18a82a4b00d5cc431ebb28 | 3,631,933 |
import functools
def _borg_pod_set_with_safe_self_access(wrapped_method):
"""
Wrapper for __setattr__ methods in @assimilate decorated classes to apply self.queen injection wrapper on any
relevant instance methods set during runtime.
:param Function wrapped_method: A @assimilate decorated class's __setattr__ method.
:rtype: Function
:return: The decorated __setattr__ method.
"""
@functools.wraps(wrapped_method)
def setter_wrapper(self, attribute, value):
if _should_protect_self_access(attribute, value):
value = _safe_self_access_decorator(value)
wrapped_method(self, attribute, value)
return setter_wrapper | b0dcfa6869a866794c088ac3c51eb682ca823206 | 3,631,934 |
from typing import Union
from typing import Iterable
from typing import Any
def prepend(catch: Union[type, tuple[type]], *values: Iterable[Union[Any, Iterable[Any]]]):
"""
Return a context manager that catches exception(s), prepends value(s) to the exception's
message (first argument) and reraises the exception.
Parameters:
• catch: exception class or classes to catch
• values: string values to prepend to exception's message
"""
return _pend(False, catch, *values) | 9a6a1e4e1061cc6fde92b45bdc18e367d19504ec | 3,631,935 |
import hashlib
def sha256(message):
"""
Returns the hexadecimal representation of the SHA256 hash digest.
"""
return hashlib.sha256(to_bytes(message)).hexdigest() | 1f57e10c59f896424f79dce274c153c036d4f85a | 3,631,936 |
def _grouprule_aggs_filter(having, columns):
"""
Given (having) conditions, return what to filter on as a string, to be used
after groupbys as grouped.query(string returned by this function).
:param having:
:type having: list
:param columns: Columns on which the group by is made.
:type columns: list
:return: String to be used on a df.query to filter based on the "having" conditions.
:rtype: str
"""
# add first condition
cond = having[0]
operator_map = dict()
operator_map["gt"] = ">"
operator_map["lt"] = "<"
operator_map["eq"] = "=="
first_operator = cond["operator"]
if cond["aggregator"] == "count" and cond["column"] == "*":
result = "_groupby_agg_%s_%s %s %s" % (
columns[0], "size", operator_map[first_operator], cond["value"])
else:
result = "_groupby_agg_%s_%s %s %s" % (
cond["column"], cond["aggregator"], operator_map[first_operator], cond["value"])
# add the rest
for cond in having[1:]:
operator = cond["operator"]
if cond["aggregator"] == "count" and cond["column"] == "*":
result = result + " and _groupby_agg_%s_%s %s %s" % (
columns[0], "size", operator_map[operator], cond["value"])
else:
result = result + " and _groupby_agg_%s_%s %s %s" % (
cond["column"], cond["aggregator"], operator_map[operator], cond["value"])
return result | 86243383bc3bd6f66751effe275ffaa0c34edf5e | 3,631,937 |
def _trim(s):
""" Trim long string to LOG_ENTRY_MAX_STRING(+3) length """
return s if not isinstance(s, str) or len(s) < LOG_ENTRY_MAX_STRING else s[:LOG_ENTRY_MAX_STRING] + '...' | 2e7a74796edcd63ffb5ab63254ae64989e3ad4bd | 3,631,938 |
def get_id_or_name(value, model):
"""Returns the id or name of a model instance from value. If a number or a
string is supplied, a check will be made to make sure it exists in the
data store.
"""
if not issubclass(model, db.Model):
raise TypeError('Invalid type (model); expected subclass of Model.')
if isinstance(value, (basestring, int, long)):
return value if entity_exists(value, model) else None
elif isinstance(value, model):
return value.key().id_or_name()
else:
raise TypeError('Invalid type (value); expected number, string or '
'%s.' % model.__name__) | 169643c95443a51d87bab87efc9a80ff9f98eca7 | 3,631,939 |
def validate_query_handler(query_string):
"""Verify the input query is some level of valid, right now it does not
check the value sent, just the key.
Currently only support one key, value pair -- but does verify this is
supplied."""
# likely throws an exception on parse error.
query_dict = parse_qs(query_string, keep_blank_values=True)
# user_id, since_id, and query
if len(query_dict) > 3:
return QUERY_INVALID, None
# currently, this only supports the notion of one query available.
for key in SUPPORTED_QUERIES:
if key in query_dict:
return SUPPORTED_QUERIES[key], query_dict
return QUERY_INVALID, None | c342ef1aee9d8cf022848b2cfa9c3c43b47c5c4a | 3,631,940 |
from datetime import datetime
import time
def makevalue(t, value):
"""Get value of ctypes-compatible value in XDWAPI-compatible type."""
t = XDW_ATTRIBUTE_TYPE.normalize(t)
if t == XDW_ATYPE_INT:
return int(value)
elif t == XDW_ATYPE_STRING:
return str(value)
elif t == XDW_ATYPE_DATE:
return datetime.date.fromtimestamp(value + time.timezone)
elif t == XDW_ATYPE_BOOL:
return bool(value)
return value | 176b4f86f7dde0a21f304cc6205792e8cdc9e6f2 | 3,631,941 |
from typing import Optional
def parse_directive_definition(
directive_definition_node: "DirectiveDefinitionNode",
schema: "GraphQLSchema",
) -> Optional["GraphQLDirective"]:
"""
Computes an AST directive definition node into a GraphQLDirective instance.
:param directive_definition_node: AST directive definition node to treat
:param schema: the GraphQLSchema instance linked to the engine
:type directive_definition_node: DirectiveDefinitionNode
:type schema: GraphQLSchema
:return: the GraphQLDirective instance
:rtype: Optional[GraphQLDirective]
"""
if not directive_definition_node:
return None
directive = GraphQLDirective(
name=parse_name(directive_definition_node.name, schema),
description=parse_name(directive_definition_node.description, schema),
locations=[
location.value for location in directive_definition_node.locations
],
arguments=parse_arguments_definition(
directive_definition_node.arguments, schema
),
)
schema.add_directive_definition(directive)
return directive | 47cfdae0387373c1ed37627ec3194cf72070e3a9 | 3,631,942 |
def _find_literal(s, start, level, parts, exprs):
"""Roughly Python/ast.c:fstring_find_literal"""
i = start
parse_expr = True
while i < len(s):
ch = s[i]
if ch in ("{", "}"):
if level == 0:
if i + 1 < len(s) and s[i + 1] == ch:
i += 2
parse_expr = False
break
elif ch == "}":
raise SyntaxError("f-string: single '}' is not allowed")
break
i += 1
parts.append(s[start:i])
return i, parse_expr and i < len(s) | 39e7d97f8aa4bfcd79af00359395605c5910985c | 3,631,943 |
import collections
def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
"""Compute all metrics for the given reference and estimated annotations.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> scores = mir_eval.segment.evaluate(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
ref_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
ref_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Adjust timespan of estimations relative to ground truth
ref_intervals, ref_labels = \
util.adjust_intervals(ref_intervals, labels=ref_labels, t_min=0.0)
est_intervals, est_labels = \
util.adjust_intervals(est_intervals, labels=est_labels, t_min=0.0,
t_max=ref_intervals.max())
# Now compute all the metrics
scores = collections.OrderedDict()
# Boundary detection
# Force these values for window
kwargs['window'] = .5
scores['Precision@0.5'], scores['Recall@0.5'], scores['F-measure@0.5'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
kwargs['window'] = 3.0
scores['Precision@3.0'], scores['Recall@3.0'], scores['F-measure@3.0'] = \
util.filter_kwargs(detection, ref_intervals, est_intervals, **kwargs)
# Boundary deviation
scores['Ref-to-est deviation'], scores['Est-to-ref deviation'] = \
util.filter_kwargs(deviation, ref_intervals, est_intervals, **kwargs)
# Pairwise clustering
(scores['Pairwise Precision'],
scores['Pairwise Recall'],
scores['Pairwise F-measure']) = util.filter_kwargs(pairwise,
ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Rand index
scores['Rand Index'] = util.filter_kwargs(rand_index, ref_intervals,
ref_labels, est_intervals,
est_labels, **kwargs)
# Adjusted rand index
scores['Adjusted Rand Index'] = util.filter_kwargs(ari, ref_intervals,
ref_labels,
est_intervals,
est_labels, **kwargs)
# Mutual information metrics
(scores['Mutual Information'],
scores['Adjusted Mutual Information'],
scores['Normalized Mutual Information']) = \
util.filter_kwargs(mutual_information, ref_intervals, ref_labels,
est_intervals, est_labels, **kwargs)
# Conditional entropy metrics
scores['NCE Over'], scores['NCE Under'], scores['NCE F-measure'] = \
util.filter_kwargs(nce, ref_intervals, ref_labels, est_intervals,
est_labels, **kwargs)
return scores | 62dc19f4f8e5341db53ff59d89d0cda26b14e7fc | 3,631,944 |
import argparse
def parse_arguments():
"""Arguments parsing."""
parser = argparse.ArgumentParser("my_agent", description="Launch my agent.")
parser.add_argument("--name", default="my_agent", help="Name of the agent")
parser.add_argument(
"--oef-addr", default="127.0.0.1", help="TCP/IP address of the OEF Agent"
)
parser.add_argument(
"--oef-port", default=10000, help="TCP/IP port of the OEF Agent"
)
parser.add_argument(
"--agent-timeout",
type=float,
default=1.0,
help="The time in (fractions of) seconds to time out an agent between act and react.",
)
parser.add_argument(
"--private-key-pem",
default=None,
help="Path to a file containing a private key in PEM format.",
)
parser.add_argument(
"--expected-version-id",
type=str,
help="The epected version id of the TAC.",
default="tac_v1",
)
return parser.parse_args() | d1b747e8ed9d57d63cb02c58eb585ffa649dc42e | 3,631,945 |
import re
def amex(value):
"""
Return whether or not given value is a valid American Express card number.
Examples::
>>> amex('378282246310005')
True
>>> amex('4242424242424242')
ValidationFailure(func=amex, args={'value': '4242424242424242'})
.. versionadded:: 0.15.0
:param value: American Express card number string to validate
"""
pattern = re.compile(r'^(34|37)')
return card_number(value) and len(value) == 15 and pattern.match(value) | fa17a2631607d7e22b5b6cb5f0e85689e403c4d1 | 3,631,946 |
def rel_multihead_attn(q, k, v, pos_enc, seg_mat, attn_mask, d_model, n_head,
d_head, dropout, dropatt, is_training, initializer,
attn_bias=None, func_mask=None, scope="rel_attn",
reuse=None, rel_attn_type="factorized",
name='rel_attn'):
"""Multi-head attention with relative positional encoding."""
ret_dict = {}
with tf.variable_scope(scope, reuse=reuse) as scope:
# attention core
attn_vec, attn_core_dict = rel_attn_core(
d_model, n_head, d_head, q, k, v, pos_enc, seg_mat, attn_mask,
attn_bias, dropatt, is_training, initializer, func_mask=func_mask,
rel_attn_type=rel_attn_type)
# post projection
attn_out = dense(attn_vec, d_model, initializer=initializer,
inp_shape=[n_head, d_head], scope="o")
attn_out = dropout_op(attn_out, dropout, training=is_training, name=name+"/rel_multihead_attn")
# residual + layer normalization
output, post_dict = residual_and_layer_norm(q, attn_out,
norm_shape=d_model)
# things to monitor
ret_dict = update_ret_dict(ret_dict, attn_core_dict)
ret_dict = update_ret_dict(ret_dict, post_dict)
return output, ret_dict | bc6fc963dee32c20f4dc54b25b7a615f39dc0ec2 | 3,631,947 |
import os
import subprocess
def validate_move(main_prefix: str, original_path: str, new_file: str) -> bool:
"""Checks that a given file exists at the location in
the main bucket and no longer exists in the upload bucket.
Returns True if this is the case and False otherwise.
"""
main_path = os.path.join('gs://', main_prefix, 'batch0', new_file)
# upload_path = os.path.join('gs://', upload_prefix, original_file)
upload_path = original_path
exists_main = subprocess.run(['gsutil', '-q', 'stat', main_path], check=False)
exists_upload = subprocess.run(['gsutil', '-q', 'stat', upload_path], check=False)
# Exists at destination and not at source
return exists_upload.returncode != 0 and exists_main.returncode == 0 | 12b9003604e6b5f79ebc3e1639a7162ca9b24ac6 | 3,631,948 |
def incident_created_modal_payload(pd_api_response):
"""Return the Slack Block Kit payload for the "Incident created" modal"""
safe_summary = slack_escape(pd_api_response["summary"])
return {
"response_action": "update",
"view": {
"type": "modal",
"title": {"type": "plain_text", "text": "Success"},
"close": {"type": "plain_text", "text": "Close"},
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*The incident <{pd_api_response['html_url']}|{safe_summary}> was successfully created*",
},
},
],
},
} | 1e1e44c564aae6861099810f86de5051372a461e | 3,631,949 |
import re
def split_name(package: str):
""" Use regex to properly split the string into name and version spec """
version_tuple = re.search('(-\d{1,10}\.\d{1,10}\.\d{1,10}-?.{0,50})', package)
version_string = version_tuple.groups()[0]
version = version_string.split("-", 1)[1]
name = re.split('(-\d{1,10}\.\d{1,10}\.\d{1,10}-?.{0,50})', package)
pkg_name = name[0]
return(pkg_name, version) | 57923f6d1af86c2b6f1b4afeded92c7af1dfca30 | 3,631,950 |
def buchdahl_find_alpha(wv, indices, wv_center, n_center, order=3, gtol=1.0e-9):
"""
Find the Buchdahl alpha parameter which gives a refractive index versus omega curve that is closest to a straight line.
Parameters
----------
wv : array of float
Wavelengths at which the refractive index data is provided. Units assumed nm if wv > 100.0.
indices : array of float, same length as wv
Refractive indices at the specified wavelengths.
wv_center : float
Center wavelength. If above 100.0 assumed nm, otherwise micron units.
n_center : float
Refractive index at the center wavelength.
order : int
The Buchdahl polynomial order. Either 2 or 3, default is 3rd order.
gtol : float
Controls the convergence accuracy. Default is 1.0e-9. Faster but less accurate convergence
will be achieved using larger values. A value of 1.0e-8 will often suffice.
Returns
-------
optimal_fit_parms : list of float
The Buchdahl alpha and nu coefficients that provide the best fit to the wavelength refractive
index data and with the imposed requirement that the index versus Buchdahl curve be close
to a straight line. The parameters are alpha, nu_1, nu_2 and (if the fit order is 3) nu_3.
"""
# Run the fit with alpha fitting to get initial values
# Returned fit parms are alpha, n_center, nu_1, nu_2 and (if order=3) nu_3
start_fit_parms = buchdahl_fit_alpha(wv, indices, wv_center, n_center, order)
# The error function in this case is the non-linearity error
optimal_fit_parms = least_squares(lambda parms, x, y: buchdahl_non_linear_error(parms, x, y, wv_center, n_center),
x0=start_fit_parms, gtol=gtol, args=(wv, indices))
return optimal_fit_parms | fd8f5d16bf721f8b0385ad3c5af79ebe7cea3df2 | 3,631,951 |
import os
import time
def ddpg_n_step_new(env_name, render_env=False,
actor_hidden_layers=[300, 300], critic_hidden_layers=[300, 300],
seed=0,
steps_per_epoch=5000, epochs=100, replay_size=int(1e6), gamma=0.99,
n_step=1, backup_method='mixed_n_step', exp_batch_size=10,
without_delay_train=False,
log_n_step_offline_and_online_expansion=False,
log_n_step_online_expansion_and_boostrapping=False,
polyak=0.995, pi_lr=1e-3, q_lr=1e-3, batch_size=100, start_steps=10000,
act_noise=0.1, policy_delay=2, max_ep_len=1000, logger_kwargs=dict(), save_freq=1):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` (batch, act_dim) | Deterministically computes actions
| from policy given states.
``q`` (batch,) | Gives the current estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q_pi`` (batch,) | Gives the composition of ``q`` and
| ``pi`` for states in ``x_ph``:
| q(x, pi(x)).
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to DDPG.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
pi_lr (float): Learning rate for policy.
q_lr (float): Learning rate for Q-networks.
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
act_noise (float): Stddev for Gaussian exploration noise added to
policy at training time. (At test time, no noise is added.)
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
tf.set_random_seed(seed)
np.random.seed(seed)
env, test_env = gym.make(env_name), gym.make(env_name)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Inputs to computation graph
x_ph = tf.placeholder(dtype=tf.float32, shape=(None, obs_dim))
a_ph = tf.placeholder(dtype=tf.float32, shape=(None, act_dim))
x2_ph = tf.placeholder(dtype=tf.float32, shape=(None, None, obs_dim))
r_ph = tf.placeholder(dtype=tf.float32, shape=(None, None))
d_ph = tf.placeholder(dtype=tf.float32, shape=(None, None))
n_step_ph = tf.placeholder(dtype=tf.float32, shape=())
batch_size_ph = tf.placeholder(dtype=tf.int32)
actor_hidden_sizes = actor_hidden_layers
critic_hidden_sizes = critic_hidden_layers
actor_hidden_activation = tf.keras.activations.relu
actor_output_activation = tf.keras.activations.tanh
critic_hidden_activation = tf.keras.activations.relu
critic_output_activation = tf.keras.activations.linear
# Main outputs from computation graph
with tf.variable_scope('main'):
actor = MLP(layer_sizes=actor_hidden_sizes + [act_dim],
hidden_activation=actor_hidden_activation, output_activation=actor_output_activation)
critic = MLP(layer_sizes=critic_hidden_sizes + [1],
hidden_activation=critic_hidden_activation, output_activation=critic_output_activation)
pi = act_limit * actor(x_ph)
q = tf.squeeze(critic(tf.concat([x_ph, a_ph], axis=-1)), axis=1)
q_pi = tf.squeeze(critic(tf.concat([x_ph, pi], axis=-1)), axis=1)
# Target networks
with tf.variable_scope('target'):
# Note that the action placeholder going to actor_critic here is
# irrelevant, because we only need q_targ(s, pi_targ(s)).
actor_targ = MLP(layer_sizes=actor_hidden_sizes + [act_dim],
hidden_activation=actor_hidden_activation, output_activation=actor_output_activation)
critic_targ = MLP(layer_sizes=critic_hidden_sizes + [1],
hidden_activation=critic_hidden_activation, output_activation=critic_output_activation)
n_step_bootstrapped_q = []
for n_step_i in range(n_step):
# slice next_obs for different n
next_obs_tmp = tf.reshape(tf.slice(x2_ph, [0, n_step_i, 0], [batch_size_ph, 1, obs_dim]),
[batch_size_ph, obs_dim])
pi_targ_tmp = act_limit * actor_targ(next_obs_tmp)
q_pi_targ_tmp = tf.squeeze(critic_targ(tf.concat([next_obs_tmp, pi_targ_tmp], axis=-1)), axis=1)
n_step_bootstrapped_q.append(q_pi_targ_tmp)
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Bellman backup for Q function
dis_acc_first, dis_acc_following, dis_boots, n_step_backups = [], [], [], []
for n_step_i in range(1, n_step + 1):
# for k = 0,..., n-1: (1-done) * gamma**(k) * reward
dis_rate = tf.tile(tf.reshape(tf.pow(gamma, tf.range(0, n_step_i, dtype=tf.float32)), [1, -1]),
[batch_size_ph, 1])
dis_rate = tf.multiply(dis_rate, 1 - tf.slice(d_ph, [0, 0], [batch_size_ph, n_step_i])) # multiply done slice
n_step_dis_rew = tf.multiply(dis_rate, tf.slice(r_ph, [0, 0], [batch_size_ph, n_step_i]))
# first step reward
n_step_first_rew = n_step_dis_rew[:, 0]
# discounted following step reward
n_step_following_rew = n_step_dis_rew[:, 1:]
n_step_offline_acc_rew = tf.reduce_sum(n_step_following_rew, axis=1)
# discounted bootstrapped reward
boots_q = gamma ** n_step_i * (1 - tf.reshape(tf.slice(d_ph, [0, n_step_i], [batch_size_ph, 1]), [-1])) * \
n_step_bootstrapped_q[n_step_i - 1]
# whole n-step backup
backup_tmp = tf.stop_gradient(n_step_first_rew + n_step_offline_acc_rew + boots_q)
# Separately save for logging
dis_acc_first.append(n_step_first_rew), dis_acc_following.append(n_step_offline_acc_rew)
dis_boots.append(boots_q), n_step_backups.append(backup_tmp)
# Define different backup methods
backup_avg_n_step = tf.stop_gradient(tf.reduce_mean(tf.stack(n_step_backups, axis=1), axis=1))
backup_min_n_step = tf.stop_gradient(tf.reduce_min(tf.stack(n_step_backups, axis=1), axis=1))
backup_avg_n_step_exclude_1 = tf.stop_gradient(tf.reduce_mean(tf.stack(n_step_backups[1:], axis=1), axis=1))
backups = [backup_avg_n_step, backup_min_n_step, backup_avg_n_step_exclude_1] + n_step_backups
# Crucial: if statement here does not work in tensorflow
backup_flag = np.zeros((3 + int(n_step)))
if backup_method == 'avg_n_step':
backup_flag[0] = 1
elif backup_method == 'min_n_step':
backup_flag[1] = 1
elif backup_method == 'avg_n_step_exclude_1':
backup_flag[2] = 1
else:
tmp_step, _ = backup_method.split('_')
tmp_step = int(tmp_step)
if 1 <= tmp_step and tmp_step <= n_step:
backup_flag[3 + tmp_step - 1] = 1 # index start from 0
else:
raise Exception('Wrong backup_method!')
if np.sum(backup_flag) != 1:
raise Exception('Wrong backup_flag!')
backup_index = np.where(backup_flag == 1)[0][0]
backup = backups[backup_index]
print("backup_index={}".format(backup_index))
# DDPG losses
pi_loss = -tf.reduce_mean(q_pi)
q_loss = tf.reduce_mean((q - backup) ** 2)
# Separate train ops for pi, q
pi_optimizer = tf.train.AdamOptimizer(learning_rate=pi_lr)
train_pi_op = pi_optimizer.minimize(loss=pi_loss, var_list=actor.variables)
q_optimizer = tf.train.AdamOptimizer(learning_rate=q_lr)
train_q_op = q_optimizer.minimize(loss=q_loss, var_list=critic.variables)
# Polyak averaging for target variables
target_update = tf.group([tf.assign(v_targ, polyak * v_targ + (1 - polyak) * v_main)
for v_main, v_targ in zip(actor.variables + critic.variables,
actor_targ.variables + critic_targ.variables)])
# Initializing targets to match main variables
target_init = tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(actor.variables + critic.variables,
actor_targ.variables + critic_targ.variables)])
# Initialize variables and target networks
sess = tf.keras.backend.get_session()
sess.run(tf.global_variables_initializer())
sess.run(target_init)
# restore actor-critic model
restore_actor_critic_model = False
if restore_actor_critic_model:
model_path = r"C:\Users\Lingheng\Google Drive\git_repos\spinup_data\2020-01-12_ddpg_n_step_new_AntPyBulletEnv_v0\2020-01-12_10-15-53-ddpg_n_step_new_AntPyBulletEnv_v0_s0"
actor.load_weights(os.path.join(model_path, 'checkpoints', 'epoch90_actor'))
critic.load_weights(os.path.join(model_path, 'checkpoints', 'epoch90_critic'))
sess.run(target_init)
def get_action(o, noise_scale):
a = sess.run(pi, feed_dict={x_ph: o.reshape(1, -1)})[0]
a += noise_scale * np.random.randn(act_dim)
return np.clip(a, -act_limit, act_limit)
def test_agent(n=10):
for j in range(n):
o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
while not (d or (ep_len == max_ep_len)):
# Take deterministic actions at test time (noise_scale=0)
o, r, d, _ = test_env.step(get_action(o, 0))
ep_ret += r
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
start_time = time.time()
# For PyBulletGym envs, must call env.render() before env.reset().
if render_env:
env.render()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
o_sim_state, o_elapsed_steps = get_sim_state_and_elapsed_steps(env, env_name)
total_steps = steps_per_epoch * epochs
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy (with some noise, via act_noise).
"""
if t > start_steps:
a = get_action(o, act_noise)
else:
a = env.action_space.sample()
# Step the env
if render_env:
env.render()
o2, r, d, _ = env.step(a)
o2_sim_state, o2_elapsed_steps = get_sim_state_and_elapsed_steps(env, env_name)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len == max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(o, o_sim_state, o_elapsed_steps, a, r,
o2, o2_sim_state, o2_elapsed_steps, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o, o_sim_state, o_elapsed_steps = o2, o2_sim_state, o2_elapsed_steps
if without_delay_train:
# batch = replay_buffer.sample_batch(batch_size)
batch = replay_buffer.sample_batch_n_step(batch_size, n_step=n_step)
feed_dict = {x_ph: batch['obs1'],
x2_ph: batch['obs2'],
a_ph: batch['acts'],
r_ph: batch['rews'],
d_ph: batch['done'],
n_step_ph: n_step,
batch_size_ph: batch_size}
# Q-learning update
outs = sess.run([q_loss, q, train_q_op], feed_dict)
logger.store(LossQ=outs[0], QVals=outs[1])
# # Policy update
# if t % policy_delay == 0:
# Delayed policy update
outs = sess.run([pi_loss, train_pi_op, target_update], feed_dict)
logger.store(LossPi=outs[0])
if d or (ep_len == max_ep_len):
"""
Perform all DDPG updates at the end of the trajectory,
in accordance with tuning done by TD3 paper authors.
"""
if not without_delay_train:
for j in range(ep_len):
# batch = replay_buffer.sample_batch(batch_size)
batch = replay_buffer.sample_batch_n_step(batch_size, n_step=n_step)
feed_dict = {x_ph: batch['obs1'],
x2_ph: batch['obs2'],
a_ph: batch['acts'],
r_ph: batch['rews'],
d_ph: batch['done'],
n_step_ph: n_step,
batch_size_ph: batch_size}
# critic update
outs = sess.run([q_loss, q, backups, train_q_op], feed_dict)
logger.store(LossQ=outs[0], QVals=outs[1])
logger.store(QBackupAvgNStep=outs[2][0], QBackupMinNStep=outs[2][1],
QBackupAvgNStepExclude1=outs[2][2])
logger.store(
**{'QBackup{}Step'.format(n_step_i + 1): outs[2][3 + n_step_i] for n_step_i in range(n_step)})
# actor update
outs = sess.run([pi_loss, train_pi_op, target_update], feed_dict)
logger.store(LossPi=outs[0])
"""
###############################################
"""
if log_n_step_offline_and_online_expansion:
# Logging: n-step offline + online expansion
# start_time_1 = time.time()
exp_after_n_step = 0 # indicates expand based on online policy after observing exp_n_step offline experiences
ground_truth_q, predicted_q = online_expand_to_end(sess, q, pi, x_ph, a_ph, gamma,
env_name, env,
replay_buffer, n_step, exp_batch_size,
exp_after_n_step)
logger.store(PredictedQ=predicted_q, GroundTruthQ=ground_truth_q)
# end_time_1 = time.time()
if log_n_step_online_expansion_and_boostrapping:
# Logging: n-step online expansion + bootstrapped Q thereafter
# start_time_2 = time.time()
exp_backup, exp_first, exp_second, exp_third, \
n_s_backup, n_s_first, n_s_second, n_s_third = online_expand_first_n_step(sess, pi,
backups, dis_acc_first,
dis_acc_following, dis_boots,
x_ph, x2_ph, r_ph, d_ph,
batch_size_ph,
env_name, env,
replay_buffer, n_step,
exp_batch_size)
logger.store(NStepOfflineBackup=n_s_backup, NStepOfflineFir=n_s_first,
NStepOfflineSec=n_s_second, NStepOfflineThi=n_s_third,
NStepOnlineBackup=exp_backup, NStepOnlineFir=exp_first,
NStepOnlineSec=exp_second, NStepOnlineThi=exp_third)
# end_time_2 = time.time()
# print('Method 1: {}, Method 2:{}'.format(end_time_1 - start_time_1, end_time_2-start_time_2))
"""
###############################################
"""
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
o_sim_state, o_elapsed_steps = get_sim_state_and_elapsed_steps(env, env_name)
# End of epoch wrap-up
if t > 0 and t % steps_per_epoch == 0:
epoch = t // steps_per_epoch
# Save actor-critic model
if (epoch % save_freq == 0) or (epoch == epochs - 1):
model_save_dir = os.path.join(logger.output_dir, 'checkpoints')
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
actor.save_weights(os.path.join(model_save_dir, 'epoch{}_actor'.format(epoch)))
critic.save_weights(os.path.join(model_save_dir, 'epoch{}_critic'.format(epoch)))
# Test the performance of the deterministic version of the agent.
test_agent()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('QVals', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ', average_only=True)
for n_step_i in range(n_step):
logger.log_tabular('QBackup{}Step'.format(n_step_i + 1), with_min_and_max=True)
logger.log_tabular('QBackupAvgNStep', with_min_and_max=True)
logger.log_tabular('QBackupMinNStep', with_min_and_max=True)
logger.log_tabular('QBackupAvgNStepExclude1', with_min_and_max=True)
if log_n_step_offline_and_online_expansion:
logger.log_tabular('PredictedQ', with_min_and_max=True)
logger.log_tabular('GroundTruthQ', with_min_and_max=True)
if log_n_step_online_expansion_and_boostrapping:
logger.log_tabular('NStepOfflineBackup', with_min_and_max=True)
logger.log_tabular('NStepOnlineBackup', with_min_and_max=True)
logger.log_tabular('NStepOfflineFir', with_min_and_max=True)
logger.log_tabular('NStepOnlineFir', with_min_and_max=True)
logger.log_tabular('NStepOfflineSec', with_min_and_max=True)
logger.log_tabular('NStepOnlineSec', with_min_and_max=True)
logger.log_tabular('NStepOfflineThi', with_min_and_max=True)
logger.log_tabular('NStepOnlineThi', with_min_and_max=True)
logger.log_tabular('Time', time.time() - start_time)
logger.dump_tabular() | f9fd8b1f560253f45ab6a5121b558c5fbb788427 | 3,631,952 |
from re import T
def shn_pentity_represent(id, default_label="[No ID Tag]"):
""" Represent a Person Entity in option fields or list views """
pe_str = T("None (no such record)")
pe_table = db.pr_pentity
pe = db(pe_table.pe_id == id).select(pe_table.instance_type,
pe_table.pe_label,
limitby=(0, 1)).first()
if not pe:
return pe_str
instance_type = pe.instance_type
instance_type_nice = pe_table.instance_type.represent(instance_type)
table = db.get(instance_type, None)
if not table:
return pe_str
label = pe.pe_label or default_label
if instance_type == "pr_person":
person = db(table.pe_id == id).select(
table.first_name, table.middle_name, table.last_name,
limitby=(0, 1)).first()
if person:
pe_str = "%s %s (%s)" % (vita.fullname(person),
label, instance_type_nice)
elif instance_type == "pr_group":
group = db(table.pe_id == id).select(table.name,
limitby=(0, 1)).first()
if group:
pe_str = "%s (%s)" % (group.name, instance_type_nice)
elif instance_type == "org_organisation":
organisation = db(table.pe_id == id).select(table.name,
limitby=(0, 1)).first()
if organisation:
pe_str = "%s (%s)" % (organisation.name, instance_type_nice)
elif instance_type == "org_office":
office = db(table.pe_id == id).select(table.name,
limitby=(0, 1)).first()
if office:
pe_str = "%s (%s)" % (office.name, instance_type_nice)
else:
pe_str = "[%s] (%s)" % (label,
instance_type_nice)
return pe_str | d48ce7d28f48d57f7386a34bacecc0f6a42bf21a | 3,631,953 |
def calculate_mean_SD_CV(df, ranking, mean_col_name):
"""calculate the mean coefficient of variation of the tFs binding to a promoter"""
# group by promoter and calculate mean for each promoter
means = df.groupby("promoter_AGI")[ranking].mean()
# turn into a dataframe
means_df = pd.DataFrame(means)
# turn the index into a new column
means_df.reset_index(level=0, inplace=True)
# name columns
cols = ["promoter_AGI", mean_col_name]
means_df.columns = cols
# group by promoter and calculate SD (standard deviation) for each promoter
sd = df.groupby("promoter_AGI")[ranking].std()
# turn into a dataframe
sd_df = pd.DataFrame(sd)
# turn the index into a new column
sd_df.reset_index(level=0, inplace=True)
# name columns
cols = ["promoter_AGI", "sd"]
sd_df.columns = cols
# merge the dfs
merged = pd.merge(means_df, sd_df)
return merged | 8fbefc305ea3ada337cf929e19953c3e56549b64 | 3,631,954 |
def normalize_trinucleotide(trinucleotide):
"""Return the normalized representation of the input trinucleotide sequence
Notes
-----
Each trinucleotide sequence has two possible representations (the sequence
and its reverse complement). For example, 5'-ACG-3' and 5'-CGT-3' are two
representations of the same trinucleotide sequence. To prevent ambiguity,
choose the representation where the central nucleotide of the trinucleotide
context is a C or a T is chosen.
"""
# Consistency checks
assert len(trinucleotide) == 3
for letter in trinucleotide:
assert letter in ['A', 'C', 'G', 'T']
complement_map = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}
reverse_complement = ""
for letter in trinucleotide[::-1]:
reverse_complement += complement_map[letter]
# Consistency checks
assert len(reverse_complement) == 3
for letter in reverse_complement:
assert letter in ['A', 'C', 'G', 'T']
# Choose the seq where the middle nucleotide is a 'C' or a 'T'
if trinucleotide[1] in ['C', 'T']:
return trinucleotide
elif reverse_complement[1] in ['C', 'T']:
return reverse_complement
else:
raise Exception("Unexpected error.") | fe04ba6fad28285eac9becbbd6e5324ec7734850 | 3,631,955 |
import os
def extract_file_by_file(location, target_dir, arch_type='*', skip_symlinks=True):
"""
Extract all files using a one-by-one process from a 7zip-supported archive
file at location in the `target_dir` directory.
Return a list of warning messages if any or an empty list.
Raise exception on errors.
`arch_type` is the type of 7zip archive passed to the -t 7zip option.
Can be None.
"""
abs_location = os.path.abspath(os.path.expanduser(location))
abs_target_dir = os.path.abspath(os.path.expanduser(target_dir))
entries, errors_msgs = list_entries(location, arch_type)
entries = list(entries)
# Determine if we need a one-by-one approach: technically the aproach is to
# check if we have files that are in the same dir and have the same name
# when the case is ignored. We take a simpler approach: we check if all
# paths are unique when we ignore the case: for that we only check that the
# length of two paths sets are the same: one set as-is and the other
# lowercased.
paths_as_is = set(e.path for e in entries)
paths_no_case = set(p.lower() for p in paths_as_is)
need_by_file = len(paths_as_is) != len(paths_no_case)
if not need_by_file:
# use regular extract
return extract_all_files_at_once(
location=location,
target_dir=target_dir,
arch_type=arch_type)
# now we are extracting one file at a time. this is a tad painful because we
# are dealing with a full command execution at each time.
errors = {}
warnings = {}
tmp_dir = fileutils.get_temp_dir(prefix='extractcode-extract-')
for i, entry in enumerate(entries):
if not entry.is_file:
continue
tmp_extract_dir = os.path.join(tmp_dir, str(i))
fileutils.create_dir(tmp_extract_dir)
ex_args = build_7z_extract_command(
location=location,
target_dir=tmp_extract_dir,
single_entry=entry,
arch_type=arch_type,
)
rc, stdout, stderr = command.execute2(**ex_args)
error = get_7z_errors(stdout, stderr)
if error or rc != 0:
error = error or UNKNOWN_ERROR
if TRACE:
logger.debug(
'extract: failure: {rc}\n'
'stderr: {stderr}\nstdout: {stdout}'.format(**locals()))
errors[entry.path] = error
continue
# these are all for a single file path
warns = get_7z_warnings(stdout) or {}
wmsg = '\n'.join(warns.values())
if wmsg:
if entry.path in warnings:
warnings[entry.path] += '\n' + wmsg
else:
warnings[entry.path] = wmsg
# finally move that extracted file to its target location, possibly renamed
source_file_name = fileutils.file_name(entry.path)
source_file_loc = os.path.join(tmp_extract_dir, source_file_name)
if not os.path.exists(source_file_loc):
if entry.path in errors:
errors[entry.path] += '\nNo file name extracted.'
else:
errors[entry.path] = 'No file name extracted.'
continue
safe_path = paths.safe_path(entry.path, posix=True)
target_file_loc = os.path.join(target_dir, safe_path)
target_file_dir = os.path.dirname(target_file_loc)
fileutils.create_dir(target_file_dir)
unique_target_file_loc = extractcode.new_name(target_file_loc, is_dir=False)
if TRACE:
logger.debug('extract: unique_target_file_loc: from {} to {}'.format(
target_file_loc, unique_target_file_loc))
if os.path.isfile(source_file_loc):
fileutils.copyfile(source_file_loc, unique_target_file_loc)
else:
fileutils.copytree(source_file_loc, unique_target_file_loc)
extractcode.remove_backslashes_and_dotdots(abs_target_dir)
if errors:
raise ExtractErrorFailedToExtract(errors)
return convert_warnings_to_list(warnings) | 895504f153318c3c84f6e4a1bcf58ac0867aa344 | 3,631,956 |
def r2z(data):
"""
Fischer's r-to-z transform on a matrix (elementwise).
"""
return(0.5 * np.log((1+data) / (1-data))) | 8874829837c2b47d019325835b73080cd524c0ac | 3,631,957 |
def loader_shift(loader, frame, relative=True):
"""Shift global in time by i preserving duration
This moves the loader by i frames preserving global duration. When relative
is False it will shift the global in to the start frame.
Args:
loader (tool): The fusion loader tool.
frame (int): The amount of frames to move.
relative (bool): When True the shift is relative, else the shift will
change the global in to frame.
Returns:
int: The resulting relative frame change (how much it moved)
"""
comp = loader.Comp()
time = comp.TIME_UNDEFINED
old_in = loader["GlobalIn"][time]
old_out = loader["GlobalOut"][time]
if relative:
shift = frame
else:
shift = frame - old_in
# Shifting global in will try to automatically compensate for the change
# in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those
# input values to "just shift" the clip
with preserve_inputs(loader, inputs=["ClipTimeStart",
"ClipTimeEnd",
"HoldFirstFrame",
"HoldLastFrame"]):
# GlobalIn cannot be set past GlobalOut or vice versa
# so we must apply them in the order of the shift.
if shift > 0:
loader["GlobalOut"][time] = old_out + shift
loader["GlobalIn"][time] = old_in + shift
else:
loader["GlobalIn"][time] = old_in + shift
loader["GlobalOut"][time] = old_out + shift
return int(shift) | 2593473b58aad8e073aaf7d4adc978e12df20762 | 3,631,958 |
import time
def get_current_timestamp(): # pylint: disable=unused-variable
"""
Retrieves the current local time in a custom timestamp format
"""
return time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) | 839ef3e2bc434355d5b077ef4e2a1cb138fab2d1 | 3,631,959 |
from datetime import datetime
def get_fight_updates(game_ids=None, before=None, after=None, order=None, count=None, page_size=1000, lazy=False, cache_time=5):
"""
Return a list of boss fight event updates
Args:
game_ids: list or comma-separated string of fight IDs.
before: return elements before this string or datetime timestamp.
after: return elements after this string or datetime timestamp.
order: sort in ascending ('asc') or descending ('desc') order.
count: number of entries to return.
page_size: number of elements to get per-page
lazy: whether to return a list or a generator
cache_time: response cache lifetime in seconds, or `None` for infinite cache
"""
if isinstance(before, datetime):
before = before.strftime(TIMESTAMP_FORMAT)
if isinstance(after, datetime):
after = after.strftime(TIMESTAMP_FORMAT)
params = {}
if before:
params["before"] = before
if after:
params["after"] = after
if order:
if order.lower() not in ('asc', 'desc'):
raise ValueError("Order must be 'asc' or 'desc'")
params["order"] = order
if page_size:
if page_size < 1 or page_size > 1000:
raise ValueError("page_size must be between 1 and 1000")
params["count"] = page_size
if game_ids:
params["fight"] = prepare_id(game_ids)
s = session(cache_time)
return paged_get(f'{BASE_URL}/fights/updates', params=params, session=s, total_count=count, page_size=page_size, lazy=lazy) | 69b32e224cd2651de850b03fd3ba2fef05e327cc | 3,631,960 |
import math
def format_float(number, decimal_places):
"""
Accurately round a floating-point number to the specified decimal
places (useful for formatting results).
"""
divisor = math.pow(10, decimal_places)
value = number * divisor + .5
value = str(int(value) / divisor)
frac = value.split('.')[1]
trail_len = decimal_places - len(frac)
return value + ''.join(['0'] * trail_len) | e7aaa92025284489075ce053319c27310bb96a00 | 3,631,961 |
def make_mean_edisp(
observations,
position,
e_true,
e_reco,
low_reco_threshold=Energy(0.002, "TeV"),
high_reco_threshold=Energy(150, "TeV"),
):
"""Compute mean energy dispersion.
Compute the mean edisp of a set of observations j at a given position
The stacking is implemented in :func:`~gammapy.irf.IRFStacker.stack_edisp`
Parameters
----------
observations : `~gammapy.data.Observations`
Observations for which to compute the EDISP
position : `~astropy.coordinates.SkyCoord`
Position at which to compute the EDISP
e_true : `~gammapy.utils.energy.EnergyBounds`
True energy axis
e_reco : `~gammapy.utils.energy.EnergyBounds`
Reconstructed energy axis
low_reco_threshold : `~gammapy.utils.energy.Energy`
low energy threshold in reco energy, default 0.002 TeV
high_reco_threshold : `~gammapy.utils.energy.Energy`
high energy threshold in reco energy , default 150 TeV
Returns
-------
stacked_edisp : `~gammapy.irf.EnergyDispersion`
Stacked EDISP for a set of observation
"""
list_aeff = []
list_edisp = []
list_livetime = []
list_low_threshold = [low_reco_threshold] * len(observations)
list_high_threshold = [high_reco_threshold] * len(observations)
for obs in observations:
offset = position.separation(obs.pointing_radec)
list_aeff.append(obs.aeff.to_effective_area_table(offset, energy=e_true))
list_edisp.append(
obs.edisp.to_energy_dispersion(offset, e_reco=e_reco, e_true=e_true)
)
list_livetime.append(obs.observation_live_time_duration)
irf_stack = IRFStacker(
list_aeff=list_aeff,
list_edisp=list_edisp,
list_livetime=list_livetime,
list_low_threshold=list_low_threshold,
list_high_threshold=list_high_threshold,
)
irf_stack.stack_edisp()
return irf_stack.stacked_edisp | 3f8ba4d8f6434dd0e6711f691dc650a061ee2a9e | 3,631,962 |
import time
def formatTime ( sec, nsec, fmt ):
""" Convert given time to a string presentation according to
a given control sequence """
# replace %f (and its variations) with fractional seconds
match = _ffmtre.search ( fmt, 0 )
while match :
# make replacement string
subsec = ".%09d" % nsec
if match.group(2) :
# precision is ginen in a format string
precision = int(match.group(2))
# bring it into range 1..9
precision = max ( min ( precision, 9 ), 1 )
# truncate replacement string
subsec = subsec[:precision+1]
# replace %f with this string
fmt = fmt[:match.start()] + subsec + fmt[match.end():]
# try again
match = _ffmtre.search ( fmt, match.start() )
# Python strftime has trouble with %z, we replace it ourselves
zi = fmt.find("%z")
while zi >= 0 :
lt = time.localtime(sec)
if lt.tm_isdst > 0 and time.daylight:
utc_offset_minutes = - int(time.altzone/60)
else:
utc_offset_minutes = - int(time.timezone/60)
utc_offset_str = "%+03d%02d" % (utc_offset_minutes/60.0, utc_offset_minutes % 60)
fmt = fmt[:zi] + utc_offset_str + fmt[zi+2:]
# try again
zi = fmt.find("%z")
# format seconds according to format string
t = time.localtime ( sec )
return time.strftime ( fmt, t ) | ad7e2b553545093b7834007453774bdb4cd62507 | 3,631,963 |
def CalculateChi6ch(mol):
"""
#################################################################
Calculation of molecular connectivity chi index for cycles of 6
---->Chi6ch
Usage:
result=CalculateChi6ch(mol)
Input: mol is a molecule object.
Output: result is a numeric value
#################################################################
"""
return _CalculateChinch(mol, NumCycle=6) | 240d97be1740b9af691598cd71d47473ce770d53 | 3,631,964 |
def decode_transaction_filter(metadata_bytes):
"""Decodes transaction filter from metadata bytes
Args:
metadata_bytes (str): Encoded list of transaction filters
Returns: decoded transaction_filter list
"""
transaction_filter = []
if not metadata_bytes:
return None
for i in metadata_bytes:
transaction_filter.append(int(i))
return transaction_filter | c76638f6592fb098e2878471746152aa9df9a694 | 3,631,965 |
def get_signin_box(burl):
""" xxx """
box_content = ''
if user_is_login() == 0:
l_app_header_title = 'Create unlimited optimized trading strategies'
l_app_header_desc = 'Chart patterns, price movements, '+\
'and news analysed using quantitative methods '+\
'with the power of artificial intelligence to generate trading signals. '+\
'Generate more profitable trades by using SmartAlpha as your trading assistant.'
l_app_call_to_action_link = 'Join now.'
etoro_logo_form = go_to_url(get_broker_affiliate_link('eToro', 'affiliate'),
'form',
'eToro')
etoro_logo_link = go_to_url(get_broker_affiliate_link('eToro', 'affiliate'),
'link',
'eToro')
box_content = etoro_logo_form +\
'<div class="box-sign"><div class="row">' +\
' <div class="col-lg-12 col-md-12 col-sm-12 col-xs-12">'+\
' <div class="sign-part">'+\
' <div class="row sign-row">'+\
' <div class="col-lg-6 col-md-6 col-sm-23 col-xs-12 sa-signin-box">'+\
' <div> </div>'+\
' <h1 style="text-align: left; font-size:x-large; font-weight:bolder;">'+\
l_app_header_title +'</h1> '+\
' <div>'+\
l_app_header_desc +' <a href="'+\
burl +'join/?" class="text-info">'+\
l_app_call_to_action_link +'</a></div>'+\
' <div class="row d-none d-sm-block style="margin: 20px;">'+\
' <a '+\
etoro_logo_link +'" target="_blank"><img src="'+\
burl +'static/etoro-logo.png" height="50px" style="margin:20px;" /></a>'+\
' <a href="#" target=""><img src="'+\
burl +'static/tradingview-logo.png" height="50px" style="margin:20px;" /></a>'+\
' <a href="#" target=""><img src="'+\
burl +'static/aws-logo.png" height="50px" style="margin:20px;" /></a>'+\
' </div>'+\
' </div>'+\
' <div class="col-lg-6 col-md-6 col-sm-12 col-xs-12" '+\
'style="padding: 50px;">'+\
get_login_form(burl, 'dark', '') +\
' </div>'+\
' </div>'+\
' </div>'+\
' </div>'+\
'</div></div>'
return box_content | 4181779ad6e8ce7924c2854e4e6e9b7d0d47926b | 3,631,966 |
import json
def jsonify(*args, **kwargs):
""" jsonify with support for MongoDB ObjectId
"""
return Response(
json.dumps(
dict(
*args,
**kwargs),
cls=MongoJSONEncoder),
mimetype='application/json') | 8001fe488e412bbf63cad7c9c359431fe9108b2c | 3,631,967 |
def parse_variable(srcline, funcname=None):
"""Return a Variable for the variable declared on the line (or None)."""
line = srcline.strip()
# XXX Handle more than just static variables.
if line.startswith('static '):
if '(' in line and '[' not in line:
# a function
return None, None
return parse_variable_declaration(line)
else:
return None, None | ff4027f1e3919087016c8169ff1ad8ac7ebf111b | 3,631,968 |
def _assert_df_is_valid_cforest(candidate_model):
"""Assert *df* represents valid causal forest.
A valid causal forest model is given by a pd.DataFrame which fulfills the
following criteria: 1 (MultiIndex). The data frame *df* must have a
MultiIndex with the first layer 'tree_id' and the second layer 'node_id'.
2 (Column names). The column names of *df* must match exactly with
["left_child", "right_child", "level", "split_feat", "split_value",
"treat_effect"] 3 (Column dtype). The dtypes of columns have to represent
(column: dtype) left_child: int; right_child: int; level: int;
split_feat: int; split_value: float; treat_effect: float.
Args:
candidate_model (pd.DataFrame): Data frame representing a causal
forest model, which might have columns that represent integer dtypes
but are stored as floats.
Returns: True if *candidate_model* constitutes a valid causal forest and
raises Error otherwise.
Raises:
ValueError, if *candidate_model* does not represent a valid causal
forest model.
"""
# MultiIndex
if not isinstance(candidate_model.index, pd.MultiIndex):
raise ValueError(
"Candidate model does not represent a valid causal forest as the "
"index is not of type pd.MultiIndex."
)
else:
if ["tree_id", "node_id"] != candidate_model.index.names:
raise ValueError(
"Candidate model does not represent a valid causal forest as "
"names of index are not 'tree_id' and 'node_id'."
)
# Column names
column_names = [
"left_child",
"right_child",
"level",
"split_feat",
"split_value",
"treat_effect",
]
if set(column_names) != set(candidate_model.columns):
raise ValueError(
"Candidate model does not represent a valid causal forest as the "
"set of column names is not equal to {'left_child', 'right_child',"
"'level', 'split_feat', 'split_value', 'treat_effect'}"
)
# Column data types
int_columns = column_names[:4]
for int_col in int_columns:
_is_int = (
candidate_model[int_col]
.apply(
lambda x: True if np.isnan(x) else float.is_integer(float(x))
)
.all()
)
if not _is_int:
raise ValueError(f"Data type of column {int_col} is not int.")
return True | cf1c52037705ce86940ca4c9e6fdf4b1055dea65 | 3,631,969 |
def is_ip(str_value: str) -> bool:
"""Returns True if string represents and IP address (either IPv4 or IPv6), else False.
:param str str_value: String to evaluate.
"""
return is_ipv4(str_value) or is_ipv6(str_value) | 1438148ab98ce882cd5e27961268726bae4450e0 | 3,631,970 |
def parse_ply(fin):
"""Parse vertex data from a PLY format
Retuns a dictionary of keys to numpy arrays
"""
num_pts, attr_key, attr_type = parse_ply_header(fin)
data = [[] for k in attr_key]
for i in range(num_pts):
line = next(fin)
tokens = line.split()
for j, t in enumerate(tokens):
data[j].append(np.array(t, dtype=attr_type[j]))
npdata = {}
for i, k in enumerate(attr_key):
npdata[k] = np.array(data[i], dtype=attr_type[i])
return npdata | 13558f96b55d2155cc032771acda272cfa7e5d0b | 3,631,971 |
def energy_change_charge_qa_atom(
df_qc, df_qats, target_label, delta_charge, target_initial_charge=0,
change_signs=False, basis_set='aug-cc-pV5Z', use_ts=True,
ignore_one_row=True, considered_lambdas=None, return_qats_vs_qa=False):
"""Calculate the energy difference to change the charge of a target atom
using quantum alchemy with or without a Taylor series.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
A pandas dataframe with quantum chemistry data. It should have the
following columns (from `get_qc_dframe`): system, atomic_numbers,
charge, multiplicity, n_electrons, qc_method, basis_set, lambda_range,
finite_diff_delta, finite_diff_acc, poly_coeff.
df_qats : :obj:`pandas.DataFrame`
A pandas dataframe with QATS data. It should have the
following columns (from `get_qats_dframe`): system, atomic_numbers,
charge, multiplicity, n_electrons, qc_method, basis_set, lambda,
electronic_energy, hf_energy, and correlation_energy.
target_label : :obj:`str`
Atoms in the system. For example, ``'c'``, ``'si'``, or ``'f.h'``.
delta_charge : :obj:`str`
Overall change in the initial target system.
target_initial_charge : :obj:`int`
Specifies the initial charge state of the target system. For example,
the first ionization energy is the energy difference going from
charge ``0 -> 1``, so ``target_initial_charge`` must equal ``0``.
change_signs : :obj:`bool`, optional
Multiply all predictions by -1. Used to correct the sign for computing
electron affinities. Defaults to ``False``.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
use_ts : :obj:`bool`, optional
Use a Taylor series approximation (with finite differences) to make
QATS-n predictions (where n is the order). Defaults to ``True``.
ignore_one_row : :obj:`bool`, optional
Used to control errors in ``state_selection`` when there is missing
data (i.e., just one state). If ``True``, no errors are raised. Defaults
to ``True``.
considered_lambdas : :obj:`list`, optional
Allows specification of lambda values that will be considered. ``None``
will allow all lambdas to be valid, ``[1, -1]`` would only report
predictions using references using a lambda of ``1`` or ``-1``.
Defaults to ``None``.
return_qats_vs_qa : :obj:`bool`, optional
Return the difference of QATS-n - QA predictions; i.e., the error of
using a Taylor series with repsect to the alchemical
PES. Defaults to ``False``.
Returns
-------
:obj:`dict`
Quantum alchemy predictions with or without a Taylor series for the
energy required to change the charge of an atom. Keys are system labels
of the references and values are :obj:`numpy.ndarray` of energy
predictions in Hartrees.
"""
if return_qats_vs_qa: assert use_ts == True
assert len(df_qc.iloc[0]['atomic_numbers']) != 2
assert delta_charge != 0
if delta_charge < 0: assert change_signs == True
### GETS INFORMATION ON TARGET SYSTEM ###
# Selects initial target ground state QC data.
target_initial_qc = df_qc[
(df_qc.system == target_label) & (df_qc.charge == target_initial_charge)
& (df_qc.lambda_value == 0.0) & (df_qc.basis_set == basis_set)
]
target_initial_qc = select_state(
target_initial_qc, 0, ignore_one_row=ignore_one_row
)
assert len(target_initial_qc) == 1
target_initial_n_electrons = target_initial_qc.n_electrons.values[0]
target_atomic_numbers = target_initial_qc.iloc[0]['atomic_numbers']
# Selects final target ground state QC data.
target_final_n_electrons = target_initial_n_electrons - delta_charge
### GETS QUANTUM ALCHEMY REFERENCES ###
# Get all available references for the initial target based on ground state
# energies.
avail_ref_final_sys = set(
df_qats[
(df_qats.system != target_label)
& (df_qats.n_electrons == target_final_n_electrons)
& (df_qats.basis_set == basis_set)
].system.values
)
ref_initial_qats = get_qa_refs(
df_qc, df_qats, target_label, target_initial_n_electrons,
basis_set=basis_set
)
ref_initial_qats = ref_initial_qats[
ref_initial_qats['system'].isin(avail_ref_final_sys)
]
ref_initial_qats = select_state(
ref_initial_qats, 0, ignore_one_row=ignore_one_row
)
# Get all available references for the final target based on ground state
# energies.
ref_final_qats = get_qa_refs(
df_qc, df_qats, target_label, target_final_n_electrons,
basis_set=basis_set
)
ref_final_qats = ref_final_qats[
ref_final_qats['system'].isin(ref_initial_qats.system)
]
ref_final_qats = select_state(
ref_final_qats, 0, ignore_one_row=ignore_one_row
)
# Checks that the size of initial and final dataframe is the same
assert len(ref_initial_qats) == len(ref_final_qats)
### MAKE PREDICTIONS ###
predictions = {}
for system in ref_initial_qats.system:
# Gets lambda value to go from reference to target.
ref_initial = ref_initial_qats.query('system == @system')
ref_final = ref_final_qats.query('system == @system')
lambda_initial = get_lambda_value(
ref_initial.iloc[0]['atomic_numbers'], target_atomic_numbers,
specific_atom=None, direction=None
)
lambda_final = get_lambda_value(
ref_final.iloc[0]['atomic_numbers'], target_atomic_numbers,
specific_atom=None, direction=None
)
assert lambda_initial == lambda_final
if considered_lambdas is not None:
if lambda_initial not in considered_lambdas:
continue
# Predictions with a Taylor series.
if use_ts or return_qats_vs_qa == True:
order_preds = []
for order in range(len(ref_initial.iloc[0]['poly_coeffs'])):
e_target_initial = qats_prediction(
ref_initial.iloc[0]['poly_coeffs'], order, lambda_initial
)
e_target_final = qats_prediction(
ref_final.iloc[0]['poly_coeffs'], order, lambda_final
)
e_diff = (e_target_final - e_target_initial)[0]
if change_signs:
e_diff *= -1
order_preds.append(e_diff)
predictions[system] = np.array(order_preds)
# Predictions without a Taylor series or compute the difference.
if not use_ts or return_qats_vs_qa == True:
chrg_ref_initial = ref_initial.iloc[0]['charge']
mult_ref_initial = ref_initial.iloc[0]['multiplicity']
ref_initial_qc = df_qc.query(
'system == @system & lambda_value == @lambda_initial'
'& charge == @chrg_ref_initial'
'& multiplicity == @mult_ref_initial'
'& basis_set == @basis_set'
)
assert len(ref_initial_qc) == 1
e_target_initial = ref_initial_qc.iloc[0]['electronic_energy']
chrg_ref_final = ref_final.iloc[0]['charge']
mult_ref_final = ref_final.iloc[0]['multiplicity']
ref_final_qc = df_qc.query(
'system == @system & lambda_value == @lambda_initial'
'& charge == @chrg_ref_final'
'& multiplicity == @mult_ref_final'
'& basis_set == @basis_set'
)
e_target_final = ref_final_qc.iloc[0]['electronic_energy']
e_diff = e_target_final - e_target_initial
if change_signs:
e_diff *= -1
if return_qats_vs_qa:
pred_diff = [i - e_diff for i in predictions[system]]
predictions[system] = np.array(pred_diff)
else:
predictions[system] = np.array([e_diff])
return predictions | 30c8b15a5e25edd0d05352066a1e3533c5182cd6 | 3,631,972 |
import click
def initiate_XY_data(config):
"""Initiates an empty dictionary to contain the XY-data for each polygon, ie. both sample data and target data.
This is needed for the reference run.
By default, the first column is for the polygon ID, the second for polygon geometry.
The antepenultimate column is for boolean information about conflict at t-1 while the penultimate column is for boolean information about conflict at t-1 in neighboring polygons.
The last column is for binary conflict data at t (i.e. the target data).
Every column in between corresponds to the variables provided in the cfg-file.
Args:
config (ConfigParser-object): object containing the parsed configuration-settings of the model.
Returns:
dict: emtpy dictionary to be filled, containing keys for each variable (X), binary conflict data (Y) plus meta-data.
"""
# Initialize dictionary
# some entries are set by default, besides the ones corresponding to input data variables
XY = {}
XY['poly_ID'] = pd.Series()
XY['poly_geometry'] = pd.Series()
for key in config.items('data'):
XY[str(key[0])] = pd.Series(dtype=float)
XY['conflict_t_min_1'] = pd.Series(dtype=bool)
XY['conflict_t_min_1_nb'] = pd.Series(dtype=float)
XY['conflict'] = pd.Series(dtype=bool)
if config.getboolean('general', 'verbose'):
click.echo('DEBUG: the columns in the sample matrix used are:')
for key in XY:
click.echo('...{}'.format(key))
return XY | eb0b2167845920631982bab3b7bc3405178176cc | 3,631,973 |
def load_key_string_pubkey(string, callback=util.passphrase_callback):
# type: (str, Callable) -> PKey
"""
Load an M2Crypto.EC.PKey from a public key as a string.
:param string: String containing the key in PEM format.
:param callback: A Python callable object that is invoked
to acquire a passphrase with which to protect the
key.
:return: M2Crypto.EC.PKey object.
"""
with BIO.MemoryBuffer(string) as bio:
return EVP.load_key_bio_pubkey(bio, callback) | 9283aff352a84cb99a382f88d6f7cca5ea0ee837 | 3,631,974 |
import os
def lookuptemplate(ui, topic, tmpl):
"""Find the template matching the given -T/--template spec 'tmpl'
'tmpl' can be any of the following:
- a literal template (e.g. '{rev}')
- a map-file name or path (e.g. 'changelog')
- a reference to [templates] in config file
- a path to raw template file
A map file defines a stand-alone template environment. If a map file
selected, all templates defined in the file will be loaded, and the
template matching the given topic will be rendered. No aliases will be
loaded from user config.
If no map file selected, all templates in [templates] section will be
available as well as aliases in [templatealias].
"""
# looks like a literal template?
if '{' in tmpl:
return templatespec('', tmpl, None)
# perhaps a stock style?
if not os.path.split(tmpl)[0]:
mapname = (templater.templatepath('map-cmdline.' + tmpl)
or templater.templatepath(tmpl))
if mapname and os.path.isfile(mapname):
return templatespec(topic, None, mapname)
# perhaps it's a reference to [templates]
if ui.config('templates', tmpl):
return templatespec(tmpl, None, None)
if tmpl == 'list':
ui.write(_("available styles: %s\n") % templater.stylelist())
raise error.Abort(_("specify a template"))
# perhaps it's a path to a map or a template
if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
# is it a mapfile for a style?
if os.path.basename(tmpl).startswith("map-"):
return templatespec(topic, None, os.path.realpath(tmpl))
with util.posixfile(tmpl, 'rb') as f:
tmpl = f.read()
return templatespec('', tmpl, None)
# constant string?
return templatespec('', tmpl, None) | 604b0d0b1a773464ce5cd61df0b1a2d0800a7314 | 3,631,975 |
def merge(left, right, path=None):
"""Merge dicts"""
if path is None:
path = []
for key in right:
if key in left:
if isinstance(left[key], dict) and isinstance(right[key], dict):
merge(left[key], right[key], path + [str(key)])
elif left[key] == right[key]:
pass # same leaf value
elif isinstance(left[key], list) and isinstance(right[key], list):
for item in right[key]:
if item not in left[key]:
left[key].append(item)
else:
raise Exception('Conflict at %s' %
'.'.join(path + [str(key)]))
else:
left[key] = right[key]
return left | cb313f153225af41626885ae0ee066215dce3b0e | 3,631,976 |
def resize_min_side(pil_img, min_len):
"""
Resize image such that the shortest side length = mins_len pixels
:param pil_img:
:param mins_len:
:return:
"""
# What's the min side?
w, h = pil_img.size
if w < h:
new_w = min_len
new_h = int(np.round(h * (new_w / float(w)))) # Scale height to same aspect ratio
else:
new_h = min_len
new_w = int(np.round(w * (new_h / float(h)))) # Scale height to same aspect ratio
return pil_img.resize((new_w, new_h)) | 38aeeedf107bedf2c82948248fbdc2483d6d2c10 | 3,631,977 |
def get_numeric_boundaries(df: DataFrame, column_name: str) -> (float, float):
"""
get the min and max values in a numric column. forces a cast to float.
If the column can't be casted as such then this wil throw an error which is currently not trapped
:param df:
:param column_name:
:return: (min_value, max_value)
"""
return df.select(col(column_name).cast("float").alias(column_name)).select(min(column_name), max(column_name)).first() | a02eefd1d6e6f2697350d5e48201e57cbb24c870 | 3,631,978 |
from typing import Optional
def concatenate(data: tvm.te.Tensor, axis: Optional[int] = 0):
"""Join a sequence of arrays along an existing axis. Optimized for CPU exeution.
Parameters
----------
data : tuple of tvm.te.Tensor
The arrays to concatenate
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
ret : tvm.te.Tensor
"""
def gen_ir_1d(data_bufs, in_outers_tensor, in_cumsum_tensor, out_buf):
"""Custom conactenation execution."""
i_b = tvm.tir.ir_builder.create()
data_bufs1 = [i_b.buffer_ptr(data_buf) for data_buf in data_bufs]
out_buf = i_b.buffer_ptr(out_buf)
outers = i_b.buffer_ptr(in_outers_tensor)
cumsum = i_b.buffer_ptr(in_cumsum_tensor)
for i in range(len(data)):
with i_b.for_range(0, outers[i], name="j") as j:
out_buf[cumsum[i] + j] = data_bufs1[i][j]
return i_b.get()
def gen_ir(data_bufs, in_outers_tensor, in_cumsum_tensor, out_buf, inner, outer):
"""Common case of conactenation execution."""
i_b = tvm.tir.ir_builder.create()
data_bufs1 = [i_b.buffer_ptr(data_buf) for data_buf in data_bufs]
out_buf = i_b.buffer_ptr(out_buf)
outers = i_b.buffer_ptr(in_outers_tensor)
cumsum = i_b.buffer_ptr(in_cumsum_tensor)
if inner > 1:
with i_b.for_range(0, inner, name="inn", kind="parallel") as inn:
pos = inn * outer
for i in range(len(data)):
offset = inn * outers[i]
with i_b.for_range(0, outers[i], name="j") as j:
out_buf[pos + cumsum[i] + j] = data_bufs1[i][offset + j]
else:
for i in range(len(data)):
with i_b.for_range(0, outers[i], name="j", kind="parallel") as j:
out_buf[cumsum[i] + j] = data_bufs1[i][j]
return i_b.get()
if axis < 0:
axis += len(data[0].shape)
concat_axis_sizes = [int(t.shape[axis]) for t in data]
join_size = int(np.sum(concat_axis_sizes))
in_outers = [int(np.prod(i.shape[axis:])) for i in data]
in_outers_cumsum = [0, *np.cumsum(in_outers, dtype="int64")[0:-1]]
dtype = data[0].dtype
out_shape = data[0].shape[:axis] + [join_size] + data[0].shape[axis + 1 :]
in_outers_tensor = const_vector(in_outers)
in_cumsum_tensor = const_vector(in_outers_cumsum, name="cumsum")
right_val = np.prod(out_shape[axis:])
left_val = np.prod(out_shape[:axis])
if (
len(data[0].shape) == 1
or right_val == 1
or (left_val == 1 and axis == len(data[0].shape) - 1)
or (left_val == 1 and right_val == 1)
):
# badly parallelized case
return te.extern(
[out_shape],
list(data) + [in_outers_tensor, in_cumsum_tensor],
lambda ins, outs: gen_ir_1d(ins, ins[-2], ins[-1], outs[0]),
dtype=dtype,
name="concatenate_ext",
)
inner = get_const_int(int(left_val))
outer = get_const_int(int(right_val))
return te.extern(
[out_shape],
list(data) + [in_outers_tensor, in_cumsum_tensor],
lambda ins, outs: gen_ir(ins, ins[-2], ins[-1], outs[0], inner, outer),
dtype=dtype,
name="concatenate_ext",
) | d9bb934f9518a565dab341316247294c525ea2c1 | 3,631,979 |
def defgrad_from_strain(E, kappa, flatten=1):
"""Compute the deformation gradient from the strain measure
Parameters
----------
E : ndarray (6,)
Strain measure
kappa : int or float
Seth-Hill strain parameter
flatten : bool, optional
If True (default), return a flattened array
Returns
-------
F : ndarray
The deformation gradient
"""
R = np.eye(3)
I = np.eye(3)
E = matrix_rep(E, 0)
if kappa == 0:
U = la.expm(E)
else:
U = la.powm(kappa * E + I, 1. / kappa)
F = np.dot(R, U)
if la.det(F) <= 0.0:
raise Exception("negative jacobian encountered")
if flatten:
return F.flatten()
return F | 3b18515562c3dd30757f9942627ac082eb3947b7 | 3,631,980 |
import hashlib
def get_url_gravatar(email):
"""
Obtenemos una url de gravatar
"""
m = hashlib.md5()
m.update(email.encode('utf-8'))
url = "http://www.gravatar.com/avatar/{0}.jpg?s=300".format(m.hexdigest())
return url | bf48d903445869ee91c685dd1b84e11034dc528c | 3,631,981 |
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types from specific qos specs."""
return IMPL.volume_type_qos_disassociate_all(context,
qos_specs_id) | 16ff0f985dd96d1f2a3aa32022c06c84a1c5f531 | 3,631,982 |
def create_command_at_set(command_set, command):
""" create a command on set """
command_entry = CommandEntry.objects.create(
command_set=command_set,
command=command
)
return command_entry | a4e8367077f5a42b62e27be1e66f2a4c5cf490d0 | 3,631,983 |
def get_index_image():
"""Formats html.
Returns:
Modified index.html content
"""
return """<!DOCTYPE HTML><html lang="en-us">
<head>
</head>
<body style='margin:0'>
<img src='image.[[image_ext]]'>
</body>
</html>""" | 41ea7fbc31e49879216e46083b102294edb5c76f | 3,631,984 |
def merge_schema(original: dict, other: dict) -> dict:
"""Merge two schema dictionaries into single dict
Args:
original (dict): Source schema dictionary
other (dict): Schema dictionary to append to the source
Returns:
dict: Dictionary value of new merged schema
"""
source = original.copy()
for key, value in other.items():
if key not in source:
source[key] = value
else:
if isinstance(value, list):
source[key].extend(value)
elif isinstance(value, dict):
source[key] = merge_schema(source[key], value)
else:
source[key] = value
return source | 6425b64e6ab166ac14afc2e47392745903b8fd12 | 3,631,985 |
def noise_eq_bandwidth(window, axis=-1):
"""
Calculate the noise equivalent bandwidth (NEB) of a windowing function
as
sqrt(window.size * window.max ** 2 / sum(window ** 2))
See https://analog.intgckts.com/equivalent-noise-bandwidth/
Args:
window : float ndarray
axis : int, axis along which to calculate NEB
Returns
neb : float or ndarray
Noise equivalent bandwidth of the window
"""
return np.sqrt(window.shape[axis] * np.max(window, axis=axis)**2 / np.sum(window**2, dtype=np.float, axis=axis)) | dd13abac6b9d39b68a1b3658fe4fba90be8c82cf | 3,631,986 |
import hashlib
def hash160(s: bytes) -> bytes:
"""
sha256 followed by ripemd160
:param s: data
:return: hashed data
"""
return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest() | 7b18fcdf51db707a17d5408c7b364818a6c5ee0c | 3,631,987 |
def trim_frame(fr: NDFrame, freq: str) -> NDFrame:
"""Trim index of frame to only keep full periods of certain frequency.
Parameters
----------
fr : NDFrame
The (untrimmed) pandas series or dataframe.
freq : str
Frequency to trim to. E.g. 'MS' to only keep full months.
Returns
-------
NDFrame
Subset of `fr`, with same frequency.
"""
i = stamps.trim_index(fr.index, freq)
return fr.loc[i] | c8b87ea993510725dc8f4074eeb07ea029445f0a | 3,631,988 |
def settings_alert_rules(request):
"""
To allow users to manage alert
rules for given sites
"""
context_dict = {}
sites = _get_user_sites(request)
user_sites = _get_user_sites(request)
context_dict['permitted'] = get_org_edit_permissions(request.user)
context_dict['sites_stats'] = get_quick_status(user_sites)
context_dict['sites'] = sites
return render(request, 'seshdash/settings/sites_alert_rules.html', context_dict) | b8e1326abdb96929f3451c6aae06093f94de0723 | 3,631,989 |
from typing import List
def generate_states_1qubit(c_sys: CompositeSystem, names: List[str]) -> List[State]:
"""returns a list of states on a common 1-qubit system.
Parameters
----------
c_sys: CompositeSystem
1-qubit system
names: List[str]
list of 1-qubit state names
Returns
-------
List[State]
"""
assert c_sys.num_e_sys == 1
assert c_sys.dim == 2
names_1qubit = get_state_names_1qubit()
for name in names:
assert name in names_1qubit
mode_qo = "state"
states = []
for name in names:
state = generate_qoperation(mode=mode_qo, name=name, c_sys=c_sys)
states.append(state)
return states | 3c4188d50181a9a7c21b50f2b655cb212249c9e2 | 3,631,990 |
def fix_columns(data, text_1_name=None, text_2_name=None, label_name=None):
"""
Rename columns in an input data frame to the ones bisemantic expects. Drop unused columns. If an argument is not
None the corresponding column must already be in the raw data.
:param data: raw data
:type data: pandas.DataFrame
:param text_1_name: name of column in data that should be mapped to text1
:type text_1_name: str or None
:param text_2_name: name of column in data that should be mapped to text2
:type text_2_name: str or None
:param label_name: name of column in data that should be mapped to label
:type label_name: str or None
:return: data frame containing just the needed columns
:rtype: pandas.DataFrame
"""
for name in [text_1_name, text_2_name, label_name]:
if name is not None:
if name not in data.columns:
raise ValueError("Missing column %s" % name)
data = data.rename(columns={text_1_name: text_1, text_2_name: text_2, label_name: label})
if label in data.columns:
columns = [text_1, text_2, label]
else:
columns = [text_1, text_2]
return data[columns] | 7a87e853f5f5e41afcb4ec0a40ebddea234ca289 | 3,631,991 |
import os
def substitute_placeholders_from_file_to_memory(filename, verb, data):
"""replace all variables placeholders in filename and return the result"""
if os.path.exists(filename):
with open(filename, "r") as in_file:
buffer = substitute_placeholders_from_memory_to_memory(
in_file.read(),
verb,
data
)
else:
raise RuntimeError(f"No such file: {filename}")
return buffer | 9a46200f6984d31752bf79d4b8e13872ac278253 | 3,631,992 |
def unpack_kgrid(n, vals, log=null_log):
"""
Unpack the 'pyramid' of values u>=v>=w into the (n,n,n) k-grid.
n - the size of the grid
vals - m(m+1)(m+2)/6 values in the pyramid
returns out - (n,n,n) float64 array.
"""
lib = _initlib(log)
v = require(vals, dtype=float64, requirements=['C'])
m = 1+n//2
assert(len(vals)==(m*(m+1)*(m+2))//6)
out = empty(n*n*n, dtype=float64)
lib.unpack_kgrid(n, v, out)
out.shape = (n,n,n)
return out | 62d7a364d43cc2c4cf1c181aa054d94200ea53a1 | 3,631,993 |
import re
def valid_email(email):
"""Check for a valid email address.
Args:
email (str): Email.
Returns:
bool: Return True if in valid email format and False if not.
"""
return bool(re.match('^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$', email)) | 01c343008229fb2fdf2af3a9a74f3059930696eb | 3,631,994 |
import traceback
def db_remove(key):
""" Endpoint Function to interact with PupDB's remove() method. """
try:
if not key:
return {'error': 'Missing parameter \'key\''}, 400
try:
result = DB.remove(key)
except KeyError as key_err:
return {'error': str(key_err)[1:-1]}, 404
if result:
return {
'message': 'Key \'{}\' removed from DB.'.format(key)
}, 200
return {
'error':
'There was a problem removing Key \'{}\' from the DB.'.format(key)
}, 400
except Exception:
return {
'error':
'Unable to process this request. Details: %s' %
traceback.format_exc(),
}, 422 | e354fe2f648cd4ad5c8ff7804789488b01edbe6c | 3,631,995 |
def rgb2gray(img):
""" Given an RGB image return the gray scale image.
Based on http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
img = 0.299 R + 0.587 G + 0.114 B
"""
print('Converting RGB image to gray scale.')
return np.uint8(np.dot(img[...,:3], [0.299, 0.587, 0.114])) | 37207a3f66e5008a358f3e4961e809ca658687bc | 3,631,996 |
def search():
"""获取随机推荐"""
html = '<form action="/blog/search/" method="get" name="form" ><div class="my-search" >' \
'<input type="text" name="q" autocomplete="off" placeholder="请输入搜索内容" class="search-input">' \
'<i class="layui-icon layui-icon-search search-btn" onclick="javascript:form.submit()"></i></div></form>'
return format_html(html) | 86a02c2de79476ae6bea7cd733e502658a5f7555 | 3,631,997 |
def find_number(text, ignore_spaces=False, make_int=True,
ignore_chars=None):
"""
Find the number in the `text`.
:param text: unicode or byte-string text
:param ignore_spaces: if True then groups of digits delimited
by spaces are considered as one number
:raises: :class:`DataNotFound` if number was not found.
"""
if ignore_chars:
for char in ignore_chars:
text = text.replace(char, '')
if ignore_spaces:
match = RE_NUMBER_WITH_SPACES.search(text)
else:
match = RE_NUMBER.search(text)
if match:
val = match.group(0)
if ignore_spaces:
val = drop_space(val)
if make_int:
val = int(val)
return val
else:
raise DataNotFound | d15c5468e965913a6b885f3f5835ed7441481e6f | 3,631,998 |
def PeerDownHasBgpNotification(reason):
"""Determine whether or not a BMP Peer Down message as a BGP notification.
Args:
reason: the Peer Down reason code (from the draft)
Returns:
True if there will be a BGP Notification, False if not
"""
return reason == 1 or reason == 3 | 8ee214798f6766916e8784dd907eeb45ff6620db | 3,631,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.