content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import os
def record_mode(request):
"""Manage compatibility with DD client libraries."""
mode = os.getenv("RECORD", "false")
if mode is not None:
if mode == "none":
request.config.option.disable_vcr = True
else:
setattr(
request.config.option,
"vcr_record",
{"true": "all", "false": "none",}[mode],
)
request.config.option.disable_vcr = False
return mode | a68af5c9449bf300fec18c51168e2f9096038b1f | 3,633,700 |
def get_number_of_engine_threads():
"""
Returns the number of engine threads.
"""
command = 'ps aux | grep "./build/engine" | grep -v "grep" | wc -l'
n_threads = int(common.run_local_cmd(command, get_output = True))
return n_threads | 843bec78af34d97f73b42447180636b5fe171d17 | 3,633,701 |
def metadata_repr_as_list(metadata_list):
"""
Turn a list of metadata into a list of printable representations
"""
output = []
for metadata_dict in metadata_list:
try:
output.append('%s - %s' % (MetadataType.objects.get(
pk=metadata_dict['id']), metadata_dict.get('value', '')))
except Exception:
pass
return output | ef6cae970f98311b150fde019b7479b5fd35a573 | 3,633,702 |
def dip_reconstructor(dataset='ellipses', name=None):
"""
:param dataset: Can be 'ellipses' or 'lodopab'
:return: The Deep Image Prior (DIP) method for the specified dataset
"""
try:
standard_dataset = load_standard_dataset(dataset)
params = Params.load('{}_dip'.format(dataset))
if name is None:
name = 'DIP'
reconstructor = DeepImagePriorReconstructor(standard_dataset.ray_trafo,
hyper_params=params.dict,
name=name)
return reconstructor
except Exception as e:
raise Exception('The reconstructor doesn\'t exist') | dcd34307b9d69ed5ad4cbbf8ee61063c02c8d592 | 3,633,703 |
def convert_listofrollouts(paths):
"""
Take a list of rollout dictionaries
and return separate arrays,
where each array is a concatenation of that array from across the rollouts
"""
observations = np.concatenate([path["observation"] for path in paths])
actions = np.concatenate([path["action"] for path in paths])
next_observations = np.concatenate([path["next_observation"] for path in paths])
terminals = np.concatenate([path["terminal"] for path in paths])
concatenated_rewards = np.concatenate([path["reward"] for path in paths])
unconcatenated_rewards = [path["reward"] for path in paths]
return (observations,
actions,
next_observations,
terminals,
concatenated_rewards,
unconcatenated_rewards) | f3cc52fa56f26985c1cb69c991bc28ff4f19b118 | 3,633,704 |
def get_iex_corporate_actions(start=None, **kwargs):
"""
Top-level function to retrieve IEX Corporate Actions from the ref-data
endpoints
Parameters
----------
start: datetime.datetime, default None, optional
A month to use for retrieval (a datetime object)
kwargs: Additional Request Parameters (see base class)
"""
return CorporateActions(start=start, **kwargs).fetch() | c355f70ff84f5be7a808dccad02bd644f21edc5f | 3,633,705 |
import math
def mc_generation_costs(df_ren, h2_demand, year_diff, capex_extra, capex_h2, lifetime_hours, electrolyser_efficiency,
elec_opex,
other_capex_elec, water_cost,
capex_wind, opex_wind, capex_solar, opex_factor_solar,
interest=0.08, full_load_hours=2000):
"""Calculates the cost of H2 generation on a yearly and per kg basis. Requires the main dataframe as input.
Optional inputs are the H2 demand (kt/yr) year (2019 or 2050), electrolyser type (alkaline, SOEC, or other),
interest rate, and full load hours (hours/yr). """
wind_efficiency = 0.4 # []
blade = 50 # [m]
turbine_size = 2 # [MW]
comp_elec = 4 # [MWh/ton H2]
# Determination of solar parameters
solar_efficiency = 0.64 + 0.003333 * year_diff # []
lifetime = lifetime_hours / full_load_hours
# Calculate required size of electricity generation (MWh/yr)
h2_demand_hourly = h2_demand * 1000 / full_load_hours # [ton/hr]
elec_demand = h2_demand_hourly * 39 / electrolyser_efficiency + comp_elec * h2_demand_hourly # [MW]
elec_demand_yearly = h2_demand * 1000 * 39 / electrolyser_efficiency + comp_elec * h2_demand * 1000 # [MWh/yr]
# Calculate the capex of the solar array required
df_ren['Solar Array Size'] = elec_demand_yearly * 1000 / df_ren['Solar Energy Potential'] # [kWp]
df_ren['Solar CapEx'] = df_ren['Solar Array Size'] * capex_solar # [Eur]
# Calculate the capex of wind turbines required
capex_turbine = turbine_size * capex_wind * 1000
df_ren['Wind Turbine Power'] = df_ren['Wind Power Density'] * wind_efficiency * (blade ** 2) * math.pi / 1e6 # [MW]
df_ren['No. of Turbines'] = elec_demand / df_ren['Wind Turbine Power'] # []
df_ren['Wind CapEx'] = df_ren['No. of Turbines'] * capex_turbine # [Eur]
# Get minimum cost location from solar and wind and calculate cost/yr and cost/kWh
df_ren['Yearly Cost Solar'] = annualise(df_ren['Solar CapEx'], interest, 25) + opex_factor_solar * df_ren[
'Solar CapEx'] # [Eur/yr]
df_ren['Yearly Cost Wind'] = annualise(df_ren['Wind CapEx'], interest,
20) + opex_wind * elec_demand_yearly # [Eur/yr]
df_ren['Elec Cost Solar'] = df_ren['Yearly Cost Solar'] / elec_demand_yearly # [Eur/MWh]
df_ren['Elec Cost Wind'] = df_ren['Yearly Cost Wind'] / elec_demand_yearly # [Eur/MWh]
df_ren['Cheaper source'] = ['Solar' if x < y else 'Wind' for x, y in
zip(df_ren['Yearly Cost Solar'], df_ren['Yearly Cost Wind'])]
# Calculate the cost of the electrolyser
total_capex_h2 = (capex_h2 + other_capex_elec) * elec_demand * 1000 # [Eur]
yearly_cost_h2 = annualise(total_capex_h2, interest, lifetime) + elec_opex * total_capex_h2 + (
capex_extra + water_cost) * h2_demand * 1000 * 1000 # [Eur/yr]
# Calculate total generation cost/yr
df_ren['Yearly gen. cost'] = [min(x, y) + yearly_cost_h2 for x, y in
zip(df_ren['Yearly Cost Solar'], df_ren['Yearly Cost Wind'])] # [Eur/yr]
df_ren['Gen. cost per kg H2'] = df_ren['Yearly gen. cost'] / (h2_demand * 1000 * 1000) # [Eur/kg H2]
return df_ren | 6ba407ecda6619293b811c7cab6c71a95284c2bc | 3,633,706 |
def relax(u, f, nu):
"""
Weighted Jacobi
"""
n = len(u)
Dinv = 1.0 / (2.0 * ((n+1)**2))
omega = 2.0 / 3.0
unew = u.copy()
for steps in range(nu):
unew = unew + omega * Dinv * residual(unew, f)
return unew | 5216744a04e93ad5dc22d5f01f47bc3c5c2ccbe1 | 3,633,707 |
import requests
import sys
def doi_to_id(doi, timestamp):
"""Query translator to convert book DOI to specified schema."""
params = {
'uri': doi,
'filter': f'uri_scheme:{URI_SCHEME}',
'strict': URI_STRICT
}
headers = {'Authorization': AUTH} if AUTH else {}
response = requests.get(URI_API_ENDP, params=params, headers=headers)
try:
assert response.status_code == 200
except AssertionError:
r = response.json()
print(
f"{r['message']}: {r['parameters']['uri']} ({timestamp})",
file=sys.stderr
)
return []
return response.json()['data'] | f260229eeeffe47f3e8d58e4262530476dc1904b | 3,633,708 |
def forward_influence_centrality(graph, weight=None):
"""Returns the forward influence centrality of the nodes in a network as an array.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance, otherwise the default is None.
Returns
-------
forward influence centrality : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their forward influence centralities.
References
----------
.. [1] Moutsinas, G., Shuaib, C., Guo, W., & Jarvis, S. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
TD = forward_hierarchical_differences(graph, weight=weight)
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = average(TD[i], weights=A[i])
elif isinstance(graph, spmatrix):
A = graph.transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = (A[i].multiply(TD[i])).sum() / A[i].sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = zeros((TD.shape[0], 1))
for i in range(m.shape[0]):
if A[i].sum() == 0:
m[i] = 0
else:
m[i] = (A[i].multiply(TD[i])).sum() / A[i].sum()
return ones((m.shape[0], 1)) - m | 684b6342b96b1807ec5dd841b23a476703813298 | 3,633,709 |
import json
def try_to_replace_line_json(line, json_type, new_json, json_prefix=""):
"""Attempts to replace a JSON declaration if it's on the line.
Parameters
----------
line: str
A line from a JavaScript code file. It's assumed that, if it declares
a JSON, this declaration will only take up one line (i.e. it
will be of the form
"[whitespace?]var [JSON prefix?][JSON name] = {JSON contents};").
If a replacement is made, everything on and after the { in this line
will be replaced with the contents of the new JSON, followed by
";\n".
json_type: str
One of "rank", "sample", or "count". Other values will result in a
ValueError being thrown.
new_json: dict
A JSON to try replacing the current variable declaration (if present)
on the input line with.
json_prefix: str (default value: "")
An optional prefix that will be appended to any JSON names we try to
replace. If this is anything but "", this *won't replace normal JSON
lines* (e.g. "var rankPlotJSON = {") -- instead, this will only
replace lines with the given prefix (e.g. if the prefix is "SST",
then only JSON lines of the format "var SSTrankPlotJSON = {" will be
replaced.
Returns
-------
(line, replacement_made): str, bool
If no replacement was made, replacement_made will be False and line
will just equal to the input line.
If a replacement was made, replacement_made will be True and line
will be equal to the new line with the JSON replaced.
"""
prefixToReplace = ""
if json_type == "rank":
prefixToReplace = "var {}rankPlotJSON = {{"
elif json_type == "sample":
prefixToReplace = "var {}samplePlotJSON = {{"
elif json_type == "count":
prefixToReplace = "var {}countJSON = {{"
else:
raise ValueError(
"Invalid json_type argument. Must be 'rank', "
"'sample', or 'count'."
)
prefixToReplace = prefixToReplace.format(json_prefix)
if line.lstrip().startswith(prefixToReplace):
return (
(
line[: line.index("{")]
+ json.dumps(new_json, sort_keys=True)
+ ";\n"
),
True,
)
return line, False | 602897349b52be3f10a41cf90d211ad70a6d4cc2 | 3,633,710 |
def createInvoice(request):
"""
Invoice Generator page it will have Functionality to create new invoices,
this will be protected view, only admin has the authority to read and make
changes here.
"""
heading_message = 'Formset Demo'
if request.method == 'GET':
formset = LineItemFormset(request.GET or None)
form = InvoiceForm(request.GET or None)
elif request.method == 'POST':
formset = LineItemFormset(request.POST)
form = InvoiceForm(request.POST)
if form.is_valid():
invoice = Invoice.objects.create(tenant=form.data["tenant"],
tenant_email=form.data["tenant_email"],
billing_address = form.data["billing_address"],
date=form.data["date"],
due_date=form.data["due_date"],
message=form.data["message"],
)
# invoice.save()
if formset.is_valid():
# import pdb;pdb.set_trace()
# extract name and other data from each form and save
total = 0
for form in formset:
service = form.cleaned_data.get('service')
description = form.cleaned_data.get('description')
quantity = form.cleaned_data.get('quantity')
rate = form.cleaned_data.get('rate')
if service and description and quantity and rate:
amount = float(rate)*float(quantity)
total += amount
LineItem(tenant=invoice,
service=service,
description=description,
quantity=quantity,
rate=rate,
amount=amount).save()
invoice.total_amount = total
invoice.save()
try:
generate_PDF(request, id=invoice.id)
except Exception as e:
print(f"********{e}********")
return redirect('invoice:invoice-list')
context = {
"title" : "Invoice Generator",
"formset": formset,
"form": form,
}
return render(request, 'invoice/invoice-create.html', context) | e759be0c0f270ab3979d2dc2303f838dee9b58a4 | 3,633,711 |
def GetTopLevelParent(*args, **kwargs):
"""GetTopLevelParent(Window win) -> Window"""
return _misc_.GetTopLevelParent(*args, **kwargs) | cf4bdbf694b45935af96f3e6432b38a7da6ca283 | 3,633,712 |
def slotnick(x, k):
"""
Relation between velocity and depth
Parameters
----------
x : 1-d ndarray
Depth to convert
k : scalar
velocity gradient
Notes
-----
typical values of velocity gradient k falls in the range 0.6-1.0s-1
References
----------
.. [1] M. Slotnick, "On seismic computations, with applications, I,"
Geophysics, vol. 1, no. 1, pp. 9-22, 1936.
"""
global v0
return v0 + k*x | 25b068919edea9226e071ad4ae948463384a71c9 | 3,633,713 |
def NodeEvolution(tensor, directed=False):
"""Temporal evolution of all nodes' input and output communicability or flow.
Parameters
----------
tensor : ndarray of rank-3
Temporal evolution of the network's dynamic communicability. A tensor
of shape timesteps x n_nodes x n_nodes, where n_nodes is the number of nodes.
Returns
-------
nodedyncom : tuple.
Temporal evolution of the communicability or flow for all nodes.
The result consists of a tuple of two ndarrays of shape (timesteps x n_nodes)
each. The first is for the sum of communicability interactions over all
inputs of each node and the second for its outputs.
"""
# 0) SECURITY CHECKS
tensor_shape = np.shape(tensor)
assert len(tensor_shape) == 3, 'Input not aligned. Tensor of rank-3 expected'
n_t, n1, n2 = tensor_shape
assert n1 == n2, 'Input not aligned. Shape (timesteps x n_nodes x n_nodes) expected'
# 1) Calculate the input and output node properties
innodedyn = tensor.sum(axis=1)
outnodedyn = tensor.sum(axis=2)
nodedyn = ( innodedyn, outnodedyn )
return nodedyn | cd05e84d136047997723c92f1e68d84d96c9d023 | 3,633,714 |
def transform_symbol(ctx, name):
"""Transform the symbol NAME using the renaming rules specified
with --symbol-transform. Return the transformed symbol name."""
for (pattern, replacement) in ctx.symbol_transforms:
newname = pattern.sub(replacement, name)
if newname != name:
print " symbol '%s' transformed to '%s'" % (name, newname)
name = newname
return name | d4dfb7a2875b4ee20b5a9476578c0ae65afb297c | 3,633,715 |
def get_active_resources_in_grid(grid):
"""Get active resources in grid.
:param powersimdata.input.grid.Grid grid: a Grid instance.
:return: (*set*) -- name of active resources in grid.
"""
_check_grid_type(grid)
active_resources = set(grid.plant.loc[grid.plant["Pmax"] > 0].type.unique())
return active_resources | b5d621577fb9cf99451efc41d32ee59530e6ecab | 3,633,716 |
def getReflectionandTransmission(
sig1,
sig2,
f,
theta_i,
eps1=epsilon_0,
eps2=epsilon_0,
mu1=mu_0,
mu2=mu_0,
dtype="TE",
):
"""
Compute reflection and refraction coefficient of plane waves
"""
theta_i = np.deg2rad(theta_i)
omega = 2 * np.pi * f
k1 = np.sqrt(omega ** 2 * mu1 * eps1 - 1j * omega * mu1 * sig1)
k2 = np.sqrt(omega ** 2 * mu2 * eps2 - 1j * omega * mu2 * sig2)
if dtype == "TE":
bunmo = (
mu2 * k1 * np.cos(theta_i)
+ mu1 * (k2 ** 2 - k1 ** 2 * np.sin(theta_i) ** 2) ** 0.5
)
bunja_r = (
mu2 * k1 * np.cos(theta_i)
- mu1 * (k2 ** 2 - k1 ** 2 * np.sin(theta_i) ** 2) ** 0.5
)
bunja_t = 2 * mu2 * k1 * np.cos(theta_i)
elif dtype == "TM":
bunmo = mu2 * k1 * (
k2 ** 2 - k1 ** 2 * np.sin(theta_i) ** 2
) ** 0.5 + mu1 * k2 ** 2 * np.cos(theta_i)
bunja_r = mu2 * k1 * (
k2 ** 2 - k1 ** 2 * np.sin(theta_i) ** 2
) ** 0.5 - mu1 * k2 ** 2 * np.cos(theta_i)
bunja_t = 2 * mu1 * k2 ** 2 * np.cos(theta_i)
else:
raise Exception("XXX")
r = bunja_r / bunmo
t = bunja_t / bunmo
theta_t = np.rad2deg(abs(np.arcsin(k1 / k2 * np.sin(theta_i))))
return r, t, theta_t | 6b04fc7e90c8baf8c78342d1e1afe2a6c65f489b | 3,633,717 |
import math
def tile(lng, lat, zoom, truncate=False):
"""Get the tile containing a longitude and latitude
Parameters
----------
lng, lat : float
A longitude and latitude pair in decimal degrees.
zoom : int
The web mercator zoom level.
truncate : bool, optional
Whether or not to truncate inputs to limits of web mercator.
Returns
-------
Tile
"""
if truncate:
lng, lat = truncate_lnglat(lng, lat)
lat = math.radians(lat)
n = 2.0 ** zoom
xtile = int(math.floor((lng + 180.0) / 360.0 * n))
try:
ytile = int(math.floor((1.0 - math.log(
math.tan(lat) + (1.0 / math.cos(lat))) / math.pi) / 2.0 * n))
except ValueError:
raise InvalidLatitudeError(
"Y can not be computed for latitude {} radians".format(lat))
else:
return Tile(xtile, ytile, zoom) | 4c5ad0ee802a61b1fe091a431d6cb53667a68d9c | 3,633,718 |
def read(file_path, lines=False):
"""Returns contents of file either as a string or list of lines."""
with open(file_path, 'r') as fp:
if lines:
return fp.readlines()
return fp.read() | 86b36dbc2792ac70bd9a71c74486643b3cdef690 | 3,633,719 |
from typing import Union
from typing import Tuple
from typing import List
def diff(tv: vs.VideoNode, bd: vs.VideoNode,
thr: float = 72,
height: int = 288,
return_array: bool = False,
return_frames: bool = False) -> Union[vs.VideoNode, Tuple[vs.VideoNode, List[int]]]:
"""
Creates a standard :py:class:`lvsfunc.comparison.Stack` between frames from two clips that have differences.
Useful for making comparisons between TV and BD encodes, as well as clean and hardsubbed sources.
There are two methods used here to find differences.
If `thr` is below 1, PlaneStatsDiff is used to figure out the differences.
Else, if `thr` is equal than or higher than 1, PlaneStatsMin/Max are used.
Recommended is PlaneStatsMin/Max, as those seem to catch
more outrageous differences more easily and not return
too many starved frames.
Note that this might catch artifacting as differences!
Make sure you verify every frame with your own eyes!
Alias for this function is `lvsfunc.diff`.
:param tv: TV clip
:param bd: BD clip
:param thr: Threshold, <= 1 uses PlaneStatsDiff, >1 uses Max/Min.
Value must be below 128 (Default: 72)
:param height: Height in px to downscale clips to if `return_array` is ``False``
(MakeDiff clip will be twice this resolution) (Default: 288)
:param return_array: Return frames as an interleaved comparison (using :py:class:`lvsfunc.comparison.Interleave`)
(Default: ``False``)
:param return_frames: Adds `frames list` to the return. (Default: ``False``)
:return: Either an interleaved clip of the differences between the two clips
or a stack of both input clips on top of MakeDiff clip.
Optionally, you can allow the function to also return a list of frames as well.
"""
if thr >= 128:
raise ValueError("diff: `thr` must be below 128")
if None in (tv.format, bd.format):
raise ValueError("diff: variable-format clips not supported")
tv, bd = vsutil.depth(tv, 8), vsutil.depth(bd, 8)
frames = []
if thr <= 1:
for i, f in enumerate(core.std.PlaneStats(tv, bd).frames()):
print(f"Progress: {i}/{tv.num_frames} frames", end="\r")
if get_prop(f, 'PlaneStatsDiff', float) > thr:
frames.append(i)
diff = core.std.MakeDiff(tv, bd)
else:
diff = core.std.MakeDiff(tv, bd).std.PlaneStats()
if diff.format is None:
raise ValueError("diff: variable-format clips not supported") # this is for mypy
t = float if diff.format.sample_type == vs.SampleType.FLOAT else int
for i, f in enumerate(diff.frames()):
print(f"Progress: {i}/{diff.num_frames} frames", end='\r')
if get_prop(f, 'PlaneStatsMin', t) <= thr or get_prop(f, 'PlaneStatsMax', t) >= 255 - thr > thr:
frames.append(i)
if not frames:
raise ValueError("diff: no differences found")
if return_array:
tv, bd = tv.text.FrameNum(9), bd.text.FrameNum(9)
comparison = Interleave({'TV': core.std.Splice([tv[f] for f in frames]),
'BD': core.std.Splice([bd[f] for f in frames])}).clip
return comparison if not return_frames else (comparison, frames)
else:
scaled_width = vsutil.get_w(height, only_even=False)
diff = diff.resize.Spline36(width=scaled_width * 2, height=height * 2).text.FrameNum(9)
tv, bd = (c.resize.Spline36(width=scaled_width, height=height).text.FrameNum(9) for c in (tv, bd))
tvbd_stack = Stack({'TV': core.std.Splice([tv[f] for f in frames]),
'BD': core.std.Splice([bd[f] for f in frames])}).clip
diff = diff.text.Text(text='diff', alignment=8)
diff = core.std.Splice([diff[f] for f in frames])
comparison = Stack((tvbd_stack, diff), direction=Direction.VERTICAL).clip
return comparison if not return_frames else (comparison, frames) | 73509e85f1134179a71170496c64ca17f0239b15 | 3,633,720 |
from typing import List
import os
def extract_wheel(wheel_file: str, extras: List[str]) -> str:
"""Extracts wheel into given directory and creates a py_library target.
Args:
wheel_file: the filepath of the .whl
extras: a list of extras to add as dependencies for the installed wheel
Returns:
The Bazel label for the extracted wheel, in the form '//path/to/wheel'.
"""
whl = Wheel(wheel_file)
directory = bazel.sanitise_name(whl.name)
os.mkdir(directory)
whl.unzip(directory)
# Note: Order of operations matters here
purelib.spread_purelib_into_root(directory)
bazel.setup_namespace_pkg_compatibility(directory)
with open(os.path.join(directory, "BUILD"), "w") as build_file:
build_file.write(
bazel.generate_build_file_contents(
bazel.sanitise_name(whl.name),
[
'"//%s"' % bazel.sanitise_name(d)
for d in sorted(whl.dependencies(extras_requested=extras))
],
)
)
os.remove(whl.path)
return "//%s" % directory | 79402e433b67b749955d2bd2af426f998838fd19 | 3,633,721 |
from typing import List
def random_choice(choices: List[float]) -> float:
"""Selects a random choice within a list."""
return choices[np.random.choice(len(choices), size=1)[0]] | 69d1f25b830e295d81666e3c29c7b16bf53e76ba | 3,633,722 |
def UnescapeUnderscores(s: str):
"""Reverses EscapeWithUnderscores."""
i = 0
r = ''
while i < len(s):
if s[i] == '_':
j = s.find('_', i + 1)
if j == -1:
raise ValueError('Not a valid string escaped with `_`')
ss = s[i + 1:j]
if not ss:
r += '_'
else:
r += chr(int(ss, 16))
i = j + 1
else:
r += s[i]
i += 1
return r | c793666527b37ee66f832e650e6c6aac47bc8a82 | 3,633,723 |
def login(base_config):
"""
返回登录后的
:return:
"""
username = base_config.get("username")
password = base_config.get("password")
base_url = base_config.get("base_url")
company_id = base_config.get("company_id")
app_id = base_config.get("app_id")
app_secret = base_config.get("app_secret")
login_url = base_url + "/" + base_config.get("login_url")
username = username.strip()
password = password.strip()
# 登录参数可以根据情况删减
login_values = {'accountName': username, "password": password, "companyId": company_id}
# 登录请求参数加密,若不需要加密,注释该行即可
login_values = get_sign_key(login_values, "POST", login_url, "", app_id, app_secret)
headers = {'content-type': 'application/json'}
response = requests.post(login_url, data=json.dumps(login_values), headers=headers)
if response.status_code == 200:
response_data = json.loads(response.text)
print response.text
access_token = response_data["data"]['token']["accessToken"]
return access_token
else:
print "login failed code: {} , response text: {}".format(response.status_code, response.text)
print "login_values: {}".format(login_values)
return "" | d17c9d300ac233a845e7439bd9abd9528a4e9041 | 3,633,724 |
def filter_(stream_spec, filter_name, *args, **kwargs):
"""Alternate name for ``filter``, so as to not collide with the
built-in python ``filter`` operator.
"""
return filter(stream_spec, filter_name, *args, **kwargs) | 0e55c8c6093fafed58ced08c757e6a489fcefa17 | 3,633,725 |
def angle_normalization_0_2pi(angle):
"""Automatically normalize angle value(s) to the range of 0-2pi.
This function relies on modular arithmetic.
Parameters
----------
angle : array_like
The angles to be converted
Returns
-------
normalized_angles : ndarray
The angles after being normalized to be between 0-2pi.
"""
# Type check.
angle = Robust.valid.validate_float_array(angle)
# Attempt to correct all of the angle values. Do an explicit loop in
# the event of only a single number.
angle = angle % (2*np.pi)
normalized_angles = np.where(angle <= 0, angle + 2*np.pi,angle)
# Vectorize just in case.
normalized_angles = np.array(normalized_angles,dtype=float)
return normalized_angles | ceef5b57ad18fc01ee7faf71d51c303621170674 | 3,633,726 |
def config_resolve_context(cookie, in_context, in_size):
""" Auto-generated UCS XML API Method. """
method = ExternalMethod("ConfigResolveContext")
method.cookie = cookie
method.in_context = str(in_context)
method.in_size = str(in_size)
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request | deea2ff376318f49102d1c0eeceb58a0dfb1e9d8 | 3,633,727 |
def bots_endpoint(page=1):
"""
Return bots from the BotList.
Use the url parameters `url` or `username` to perform a search on the BotList.
The @-character in usernames can be omitted.
:param page: The page to display
:return: All bots (paginated) or the search result if url parameters were used.
"""
if request.method == 'GET':
results = list()
if len(request.args) > 0:
# return bots matching the request arguments (after ?)
id_arg = request.args.get('id', None)
username_arg = request.args.get('username', None)
if id_arg:
results = Bot.select().where(Bot.id == id_arg).limit(1)
elif username_arg:
# allow for omitting the `@` in the username
results = Bot.select().where(
(Bot.username == username_arg) | (Bot.username == '@' + username_arg)).limit(1)
data = results[0].serialize
if data:
res = jsonify({
'search_result': data,
'meta': {'url': request.url}
})
res.status_code = 200
else:
res = _error('No bot found with your search parameters.')
res.status_code = 404
return res
else:
# return all bots (paginated)
per_page = 50
results = Bot.select().paginate(page, per_page)
data = [i.serialize for i in results]
if data:
res = jsonify({
'bots': data,
'meta': {'page': page, 'per_page': per_page, 'page_url': request.url}
})
res.status_code = 200
else:
res = _error('No bots found.')
res.status_code = 500
return res | 86686626a46817c85c9cdbc188b27bb4a2e59d6b | 3,633,728 |
import os
import sys
import shutil
import time
import math
def launch_visit_test(args):
"""
Runs a single VisIt test.
"""
idx = args[0]
test = args[1]
opts = args[2]
top_dir = visit_root()
test_script = abs_path(test_path(),"visit_test_main.py")
test_dir, test_file = os.path.split(test)
test_cat = os.path.split(test_dir)[1]
test_base = os.path.splitext(test_file)[0]
rcmd = opts["executable"] + " "
rcmd += opts["vargs"] + " "
# check for vargs embedded in the test file header
rcmd += parse_test_specific_vargs(test) + " "
rcmd += "-exec-timeout %d -idle-timeout %d " % (opts["limit"],opts["limit"])
rcmd += "-numrestarts 0 "
if not opts["interactive"]:
rcmd += "-nowin "
if not opts["use_pil"]:
rcmd += "-noPIL "
if not opts["no_timings"]:
rcmd += "-timing"
rcmd += " -cli "
cfile = pjoin(test_dir,test_base + ".config")
if os.path.isfile(cfile):
rcmd += "-config " + cfile + " "
else:
rcmd += "-noconfig "
rcmd += "-geometry %dx%d+32+32 " % (opts["width"],opts["height"])
rcmd += " -s %s " % os.path.abspath(test_script)
modes_list = opts["modes"].split(",")
if "dlb" in modes_list:
rcmd += " -allowdynamic "
if "icet" in modes_list:
rcmd += " -icet "
modes = ""
if opts["verbose"]:
rcmd += " -verbose "
fuzzy = opts["fuzzy"]
if "serial" in modes_list:
modes = "serial"
else:
if "scalable" in modes_list:
modes = "scalable"
if not opts["no_fuzzy"]:
fuzzy = True
if "parallel" in modes_list:
if len(modes) > 0:
modes +=","
modes +="parallel"
if "icet" in modes_list:
if len(modes) > 0:
modes +=","
modes +="icet"
run_dir = pjoin(opts["result_dir"],"_run","_%s_%s" % (test_cat, test_base))
# set opts vars
tparams = {}
tparams["script"] = test
tparams["category"] = test_cat
tparams["name"] = test_base
tparams["file"] = test_file
tparams["modes"] = modes
tparams["run_dir"] = run_dir
tparams["result_dir"] = opts["result_dir"]
tparams["fuzzy_match"] = fuzzy
tparams["skip_file"] = None
tparams["interactive"] = opts["interactive"]
tparams["use_pil"] = opts["use_pil"]
tparams["threshold_diff"] = opts["threshold_diff"]
tparams["threshold_error"]= opts["threshold_error"]
tparams["pixdiff"] = opts["pixdiff"]
tparams["avgdiff"] = opts["avgdiff"]
tparams["numdiff"] = opts["numdiff"]
tparams["top_dir"] = top_dir
tparams["data_dir"] = opts["data_dir"]
tparams["src_dir"] = opts["src_dir"]
tparams["data_host"] = opts["data_host"]
tparams["baseline_dir"] = opts["baseline_dir"]
tparams["tests_dir"] = opts["tests_dir"]
tparams["visit_bin"] = opts["executable"]
tparams["width"] = opts["width"]
tparams["height"] = opts["height"]
tparams["ctest"] = opts["ctest"]
tparams["display_failed"] = opts["display_failed"]
tparams["parallel_launch"]= opts["parallel_launch"]
tparams["host_profile_dir"] = opts["host_profile_dir"]
tparams["sessionfiles"] = opts["sessionfiles"]
tparams["cmake_cmd"] = opts["cmake_cmd"]
exe_dir, exe_file = os.path.split(tparams["visit_bin"])
if sys.platform.startswith("win"):
tparams["sim_dir"] = os.path.abspath(exe_dir)
else:
tparams["sim_dir"] = os.path.abspath(os.path.join(exe_dir, ".."))
if not opts["no_skip"]:
tparams["skip_file"] = opts["skip_file"]
skip = check_skip(opts["skip_list"],modes,test_cat,test_file)
if skip:
Log("[Skipping: %s/%s (found in skip list)]" % (test_cat,test_file))
result = TestScriptResult(idx,
test_cat,
test_base,
test_file,
116, # full skip return code
0,
0)
else:
Log("[Launching: %s/%s]" % (test_cat,test_file))
# run the test in a unique sub dir
if os.path.isdir(run_dir):
try:
shutil.rmtree(run_dir)
except OSError as e:
Log("<Error Cleaning up Script Run Directory before launch> %s" % run_dir)
os.mkdir(run_dir)
pfile = open(pjoin(run_dir,"params.json"),"w")
pfile.write("%s" % json_dumps(tparams))
pfile.close()
rcmd += " --params=%s" % os.path.abspath(pjoin(run_dir,"params.json"))
# get start timestamp
stime = time.time()
# change to working dir to our run dir
curr_dir = os.getcwd()
os.chdir(run_dir)
rcode = 0
test_specific_limit = parse_test_specific_limit(test)
if test_specific_limit != -1:
use_limit = test_specific_limit
else:
use_limit = opts["limit"]
sexe_res = sexe(rcmd,
suppress_output=(not (opts["verbose"] or opts["less_verbose"])),
echo=opts["verbose"],
timeout=use_limit * 1.1) # proc kill switch at 110% of the selected timeout
json_res_file = pjoin(opts["result_dir"],"json","%s_%s.json" %(test_cat,test_base))
if os.path.isfile(json_res_file):
results = json_load(json_res_file)
if "result_code" in results:
rcode = results["result_code"]
# os.mkdir(run_dir)
if sexe_res["killed"]:
Log("<Limit killed> %s" % os.path.abspath(test))
# wait for process chain to die
time.sleep(1)
# get end timestamp
etime = time.time()
dtime = math.ceil(etime - stime)
# wait for any logs to flush
time.sleep(1)
# change back to prev working dir
os.chdir(curr_dir)
# check for core files
ncores_files = process_cores(run_dir,opts["result_dir"],test_cat,test_base)
# move logs and timings to the html output dir
process_runtime_logs(run_dir,opts["result_dir"],test_cat,test_base)
# parse any output files
result = TestScriptResult(idx,
test_cat,
test_base,
test_file,
rcode,
ncores_files,
dtime)
if opts["cleanup"]:
try:
shutil.rmtree(run_dir)
except OSError as e:
Log("<Error Removing Script Run Directory> %s" % run_dir)
log_test_result(opts["result_dir"],result)
return result | 13432ce7e37ada6b59ac15a2d69328dcf20a58be | 3,633,729 |
from typing import List
from typing import Dict
def parse_secrets(raw: List[str]) -> Dict[str, str]:
"""Parses secrets"""
result: Dict[str, str] = {}
for raw_secret in raw:
keyval = raw_secret.split('=', 1)
if len(keyval) != 2:
raise ValueError(f'Invalid secret "{raw_secret}"')
result[keyval[0]] = keyval[1]
return result | d209c954c75353c17f0bca561c3ad94fc26a9ad0 | 3,633,730 |
async def detect_custom(model: str = Form(...), image: UploadFile = File(...)):
"""
Performs a prediction for a specified image using one of the available models.
:param model: Model name or model hash
:param image: Image file
:return: Model's Bounding boxes
"""
draw_boxes = False
predict_batch = False
try:
output = await dl_service.run_model(model, image, draw_boxes, predict_batch)
error_logging.info('request successful;' + str(output))
return output
except ApplicationError as e:
error_logging.warning(model + ';' + str(e))
return ApiResponse(success=False, error=e)
except Exception as e:
error_logging.error(model + ' ' + str(e))
return ApiResponse(success=False, error='unexpected server error') | f6a2eefa7ac855899bec9fd86399f8704e24f3d6 | 3,633,731 |
def ensure_databases_alive(max_retries: int = 100,
retry_timeout: int = 5,
exit_on_failure: bool = True) -> bool:
"""
Checks every database alias in ``settings.DATABASES`` until it becomes available. After ``max_retries``
attempts to reach any backend are failed it returns ``False``. If ``exit_on_failure`` is set it shuts down with
``exit(1)``.
For every database alias it tries to ``SELECT 1``. If no errors raised it checks the next alias.
:param exit_on_failure: set to ``True`` if there's no sense to continue
:param int max_retries: number of attempts to reach every database; default is ``100``
:param int retry_timeout: timeout in seconds between attempts
:return: ``True`` if all backends are available, ``False`` if any backend check failed
"""
template = """
=============================
Checking database connection `{CONNECTION}`:
Engine: {ENGINE}
Host: {HOST}
Database: {NAME}
User: {USER}
Password: {PASSWORD}
=============================\n"""
for connection_name in connections:
_db_settings = dict.fromkeys(['ENGINE', 'HOST', 'NAME', 'USER', 'PASSWORD'])
_db_settings.update(settings.DATABASES[connection_name])
_db_settings['CONNECTION'] = connection_name
if _db_settings.get('PASSWORD'):
_db_settings['PASSWORD'] = 'set'
wf(template.format(**_db_settings))
wf('Checking db connection alive... ', False)
for i in range(max_retries):
try:
cursor = connections[connection_name].cursor()
cursor.execute('SELECT 1')
cursor.fetchone()
wf('[+]\n')
break
except OperationalError as e:
wf(str(e))
sleep(retry_timeout)
else:
wf('Tried %s time(s). Shutting down.\n' % max_retries)
exit_on_failure and exit(1)
return False
return True | e583d3b1cceca43e66c246fa4fea8eb58727ef6c | 3,633,732 |
def __maxCrossingSubArr(seq, low, mid, high):
"""
寻找seq[low..high]跨越了中点mid的最大子数组
总循环次数为high-low+1,线性的
"""
leftSum = float('-Inf')
sumTemp = 0
for i in range(mid, low - 1, -1):
sumTemp += seq[i]
if sumTemp > leftSum:
leftSum = sumTemp
maxLeft = i
rightSum = float('-Inf')
sumTemp = 0
for j in range(mid + 1, high + 1):
sumTemp += seq[j]
if sumTemp > rightSum:
rightSum = sumTemp
maxRight = j
return maxLeft, maxRight, leftSum + rightSum | 542f07214438297623518046c51974cf461b3aa5 | 3,633,733 |
def transition_matrix(embeddings, word_net=False, first_order=False, sym=False, trans=False, **kwargs):
"""
Build a probabilistic transition matrix from word embeddings.
"""
if word_net:
L = wordnet_similarity_matrix(embeddings)
elif not first_order:
L = similarity_matrix(embeddings, **kwargs)
else:
L = embeddings.m.todense().A
if sym:
Dinv = np.diag([1. / np.sqrt(L[i].sum()) if L[i].sum() > 0 else 0 for i in range(L.shape[0])])
return Dinv.dot(L).dot(Dinv)
else:
print('foo')
Dinv = np.diag([1. / L[i].sum() for i in range(L.shape[0])])
L = L.dot(Dinv)
if trans:
return L.T
return L | c4181b5ad61f32429d289c207eb9bb5282f8fa99 | 3,633,734 |
import os
import sys
def read_met_data():
"""
Reads in the streamflow data and returns them as dataframe.
Expects to be in the dir were the data is stored.
:return: stream_df: Column names are the stream ids. Index is the date.
"""
# Set the cwd to the directory of the file
os.chdir(os.path.dirname(sys.argv[0]))
# Read in the files
list_prec = []
list_temp = []
list_et = []
list_snowmelt = []
list_snow_stor = []
os.chdir(org_os + os.sep + "basin_dataset_public_v1p2" + os.sep + "basin_mean_forcing" + os.sep + "daymet")
temp_cwd = os.getcwd()
# Go through the directory order of the CAMELS data set
for name in os.listdir(temp_cwd):
print("Directory = " + name)
os.chdir(temp_cwd + os.sep + name)
for file in os.listdir(os.getcwd()):
print(file)
if file.endswith(".txt"):
# Use a regular expression to seperate by different
# amounts of whitespaces
temp_df = pd.read_csv(file, skiprows=4, sep="\t", header=None,
engine="python",
na_values=-999.00, index_col=0)
# Get the date as index
temp_df.index = pd.to_datetime(temp_df.index)
temp_df.columns = ["dayl(s)","prcp(mm/day)", "srad(W/m2)", "swe(mm)", "tmax(C)", "tmin(C)","vp(Pa)"]
temp_df["tmean(C)"] = (temp_df["tmax(C)"] + temp_df["tmin(C)"]) / 2
prec_df = temp_df["prcp(mm/day)"]
prec_df.name = file.split("_")[0]
temperature_df = temp_df["tmean(C)"]
temperature_df.name = file.split("_")[0]
et_rad = watt_to_MJ(temp_df["srad(W/m2)"])
et_df = hargreaves(temp_df["tmin(C)"], temp_df["tmax(C)"], temp_df["tmean(C)"], et_rad)
et_df.name = file.split("_")[0]
snow_storage_df, snow_melt_df = simple_snowmelt.snow_model(prec_df, temperature_df)
snow_storage_df.name = file.split("_")[0]
snow_melt_df.name = file.split("_")[0]
list_prec.append(prec_df)
list_temp.append(temperature_df)
list_et.append(et_df)
list_snowmelt.append(snow_melt_df)
list_snow_stor.append(snow_storage_df)
# Combine all separate streams into one dataframe.
return [pd.concat(dfs, axis=1) for dfs in [list_prec, list_temp, list_et, list_snow_stor, list_snowmelt]] | f4b106c95f5c70f55fdc45e45a1ed4e68c09ac4b | 3,633,735 |
def gen_review_vecs(reviews, model, num_features):
"""
Function which generates a m-by-n numpy array from all reviews,
where m is len(reviews), and n is num_feature
Input:
reviews: a list of lists.
Inner lists are words from each review.
Outer lists consist of all reviews
model: trained word2vec model
num_feature: dimension of word2vec vectors
Output: m-by-n numpy array, where m is len(review) and n is num_feature
"""
curr_index = 0
review_feature_vecs = np.zeros((len(reviews), num_features), dtype="float32")
for review in reviews:
if curr_index % 1000 == 0.:
print(
"Vectorizing review %d of %d" % (curr_index, len(reviews)))
review_feature_vecs[curr_index] = review_to_vec(review, model, num_features)
curr_index += 1
return review_feature_vecs | b8ce4489aaa03f45727e3340c361f185bf2f77dd | 3,633,736 |
def ott(high, low, close, length=None,_shift=None, multiplier=None, **kwargs):
"""Indicator: Supertrend"""
# Validate Arguments
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
length = int(length) if length and length > 0 else 7
shift = int(_shift) if _shift and _shift > 0 else 0
multiplier = float(multiplier) if multiplier and multiplier > 0 else 2.0
# Calculate Results
m = close.size
dir_, trend, _ott = [0] * m, [0] * m, [0] * m
# long, short = [npNaN] * m, [npNaN] * m
MAvg = vidya(close,length)
fark = MAvg*multiplier*0.01
upperband = MAvg + fark
lowerband = MAvg - fark
for i in range(1, m):
if MAvg.iloc[i] > upperband.iloc[i - 1]:
dir_[i] = 1
elif MAvg.iloc[i] < lowerband.iloc[i - 1]:
dir_[i] = -1
else:
dir_[i] = dir_[i - 1]
if dir_[i] > 0 and lowerband.iloc[i] < lowerband.iloc[i - 1]:
lowerband.iloc[i] = lowerband.iloc[i - 1]
if dir_[i] < 0 and upperband.iloc[i] > upperband.iloc[i - 1]:
upperband.iloc[i] = upperband.iloc[i - 1]
if dir_[i] > 0:
# lowerband.iloc[i] = lowerband.iloc[i]*(200+multiplier)/200
trend[i] = lowerband.iloc[i]
else:
# upperband.iloc[i] = upperband.iloc[i]*(200-multiplier)/200
trend[i] = upperband.iloc[i]
if MAvg.iloc[i]>trend[i]:
_ott[i] = trend[i]*(200+multiplier)/200
else:
_ott[i] = trend[i]*(200-multiplier)/200
# Prepare DataFrame to return
_props = f"_{length}_{multiplier}"
df = DataFrame({
f"OTTSL{_props}": MAvg,
f"OTT{_props}": _ott,
f"OTTd{_props}": dir_,
}, index=close.index)
df.name = f"OTT{_props}"
df.category = "overlap"
# Apply offset if needed
df[f"OTT{_props}"] = df[f"OTT{_props}"].shift(shift)
# Handle fills
if 'fillna' in kwargs:
df.fillna(kwargs['fillna'], inplace=True)
if 'fill_method' in kwargs:
df.fillna(method=kwargs['fill_method'], inplace=True)
return df | dd9f1010a4db9c45ce8d81d78a562ae656980d10 | 3,633,737 |
def lambda_cut_series(x, mfx, n):
"""
Determines a series of lambda-cuts in a sweep from 0+ to 1.0 in n steps.
Parameters
----------
x : 1d array
Universe function for fuzzy membership function mfx.
mfx : 1d array
Fuzzy membership function for x.
n : int
Number of steps.
Returns
-------
z : 2d array, (n, 3)
Lambda cut intevals.
"""
x = np.asarray(x)
mfx = np.asarray(mfx)
step = (mfx.max() - mfx.min()) / float(n - 1)
lambda_cuts = np.arange(mfx.min(), mfx.max() + np.finfo(float).eps, step)
z = np.zeros((n, 3))
z[:, 0] = lambda_cuts.T
z[0, [1, 2]] = _support(x, mfx)
for ii in range(1, n):
xx = _lcutinterval(x, mfx, lambda_cuts[ii])
z[ii, [1, 2]] = xx
return z | 60d25561b0fb637ab33407a6ea627da6ef553192 | 3,633,738 |
def validate_schema(request, schema_instance):
""" A decorator function that validates schema againt request payload """
def decorator(func):
@wraps(func)
def wrapper_function(*args, **kwargs):
json_payload = request.get_json()
schema_instance.load_json_into_schema(json_payload)
return func(*args, **kwargs)
return wrapper_function
return decorator | 841e60779887fc076cdf5af4dc1fc054b72799c2 | 3,633,739 |
def has_inference_based_loaders(cfg: CfgNode) -> bool:
"""
Returns True, if at least one inferense-based loader must
be instantiated for training
"""
return len(cfg.BOOTSTRAP_DATASETS) > 0 | 6a8677edfe2074902a6f0327636cfa177577f862 | 3,633,740 |
def get_terrain_for_coord(x, y):
"""Get the terrain type for a coordinate.
:param int x: The x coordinate
:param int y: The y coordinate
:returns tuple(Terrain, bool): The terrain type and whether it is diverse
"""
elevation, moisture, temperature, diversity = _render_map_data(
1, 1, (x, y))
terrain = TERRAIN.get_terrain_for_point(
elevation[0][0], moisture[0][0], temperature[0][0])
return terrain, terrain.is_diverse(diversity[0][0]) | 0f25ecd3e70d3a1d45b3b4d08a064d40c57e2284 | 3,633,741 |
def read_Image8(Object, Channel, iFlags=0):
"""
read_Image8(Object, Channel, iFlags=0) -> bool
read_Image8(Object, Channel) -> bool
"""
return _Channel.read_Image8(Object, Channel, iFlags) | 4a3265d9a3ba0ce486d968156a5a35e6b61ec7de | 3,633,742 |
import math
def Linear(in_features, out_features, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return nn.utils.weight_norm(m) | de26db37469b6e0d4fbd92545982b1deaef997c6 | 3,633,743 |
def get_state(initial, input_value=None):
"""Get new state, filling initial and optional input_value."""
return {
'last_position': None,
'initial': [initial],
'input': [input_value] if input_value is not None else [],
'output': [],
} | 7520341debf6b7287a445be1a44e51bd5675472f | 3,633,744 |
import operator
def predictkNNLabels(closest_neighbors, y_train):
"""This function predicts the label of a individual point
in X_test based on the labels of the nearest neighbour(s).
And sums up the total of appearences of the labels and
returns the label that occurs the most """
labelPrediction = {}
for i in range(len(closest_neighbors)):
if y_train[closest_neighbors[i]][0] in labelPrediction:
labelPrediction[y_train[closest_neighbors[i]][0]] += 1
else:
labelPrediction[y_train[closest_neighbors[i]][0]] = 1
sortedLabelPrediction = sorted(labelPrediction.items(), key=operator.itemgetter(1), reverse=True)
return sortedLabelPrediction[0][0] # gives the most in common label | aa7ce9383253230f2c0535e3e27e2f2442dec043 | 3,633,745 |
def fetch_dataset(filename):
"""
Useful util function for fetching records
"""
buffer_size = 32 * 1024 * 1024 # 32 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset | 594a2298b6d72cea2982ec91476d6c0f78aaae65 | 3,633,746 |
def get_alma_project(ra,de, radius_arcsec=10/3600):
"""Return ALMA project IDs (if exists) given coordinates."""
# set up connection
cnx = db.get_cnx(cfg.mysql['user'], cfg.mysql['passwd'],
cfg.mysql['host'], cfg.mysql['db_sdb'])
if cnx is None:
return
cursor = cnx.cursor(buffered=True)
cursor.execute("SELECT DISTINCT project_code FROM alma_obslog WHERE "
"ra BETWEEN {}-{} AND {}+{} AND "
"dec_ BETWEEN {}-{} AND {}+{};"
"".format(ra,radius_arcsec,ra,radius_arcsec,
de,radius_arcsec,de,radius_arcsec))
return [s for (s,) in cursor] | 8e1ac1b1bd979f5386d388aecef7f85ccff92492 | 3,633,747 |
from typing import Optional
def read_certification_data(reader: PdfFileReader) -> Optional[DocMDPInfo]:
"""
Read the certification information for a PDF document, if present.
:param reader:
Reader representing the input document.
:return:
A :class:`.DocMDPInfo` object containing the relevant data, or ``None``.
"""
try:
certification_sig = reader.root['/Perms']['/DocMDP']
except KeyError:
return
perm = _extract_docmdp_for_sig(certification_sig)
ref = _extract_reference_dict(certification_sig, '/DocMDP')
md = None
if ref is not None:
md = misc.get_and_apply(
ref, '/DigestMethod', lambda x: x[1:].lower()
)
return DocMDPInfo(perm, md, certification_sig) | f26f09e7b3c835e5d029d824257053fa4fdcb97d | 3,633,748 |
from re import T
def from_independent_matroid(matroid: tuple[set[T], list[set[T]]]) -> list[set[T]]:
"""Construct circuits from a matroid defined by independent sets.
Args:
matroid (tuple[set[T], list[set[T]]]): A matroid defined by independent sets.
Returns:
list[set[T]]: The circuits of a given matroid.
"""
E, Is = matroid
# Ds = { D ⊆ E : D ∈ Is }
Ds = [D for D in powset(E) if D not in Is]
# Cs = { C ∈ Ds : D ⊈ C, ∀D ∈ Ds\{C} }
return [C for C in Ds if all(map(lambda D: not D < C, Ds))] | a5b8a278147b3926904cfc4d0bc23249a1669857 | 3,633,749 |
from .ir import ModularIndexing
import sympy
def join_dimensions(expr: sympy.Expr) -> sympy.Expr:
"""
ModularIndexing(i0, 1, 32) + 32 * ModularIndexing(i0, 32, 4)
becomes
ModularIndexing(i0, 1, 128)
This type of pattern can come from view operations
"""
if not isinstance(expr, sympy.Add):
return expr
scale = sympy.Wild("scale", exclude=[0])
base = sympy.Wild("base")
divisor = sympy.Wild("divisor")
mod1 = sympy.Wild("modulus")
mod2 = sympy.Wild("modulus2")
for term1 in expr.args:
m1 = term1.match(scale * ModularIndexing(base, divisor, mod1))
if m1:
for term2 in expr.args:
m2 = term2.match(
m1[scale] * m1[mod1] * ModularIndexing(m1[base], m1[mod1], mod2)
)
if m2 and term1 != term2:
expr = join_dimensions(
expr
- term1
- term2
+ m1[scale]
* ModularIndexing(m1[base], m1[divisor], m1[mod1] * m2[mod2])
)
return expr
return expr | 72528ce5630f5e8ddcfcb42ddbc2b45cc194b9d4 | 3,633,750 |
import gzip
import time
import sys
def map_trans_tfrecord(vcf_tfrecord, phenotype_df, covariates_df, interaction_s=None, return_sparse=True, pval_threshold=1e-5, maf_threshold=0.05, batch_size=50000, logger=None):
"""Run trans-QTL mapping from genotypes in tfrecord"""
if logger is None:
logger = SimpleLogger()
assert np.all(phenotype_df.columns==covariates_df.index)
with open(vcf_tfrecord+'.samples') as f:
vcf_sample_ids = f.read().strip().split('\n')
n_samples_vcf = len(vcf_sample_ids)
with gzip.open(vcf_tfrecord+'.variants.gz', 'rt') as f:
variant_ids = f.read().strip().split('\n')
variant_dict = {i:j for i,j in enumerate(variant_ids)}
n_variants = len(variant_ids)
# index of VCF samples corresponding to phenotypes
ix_t = get_sample_indexes(vcf_sample_ids, phenotype_df)
n_samples = phenotype_df.shape[1]
# batched_dataset = dataset.apply(tf.contrib.data.padded_batch(batch_size, padded_shapes=[[batch_size], [None]]))
# batched_dataset = dataset.padded_batch(batch_size, padded_shapes=(batch_size,n_samples), padding_values=0)
with tf.device('/cpu:0'):
batched_dataset = genotypeio.make_batched_dataset(vcf_tfrecord, batch_size, ix_t=ix_t)
iterator = batched_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
next_element = genotypeio.pad_up_to(next_element, [batch_size, n_samples]) # not clear if right/best way to do this
logger.write(' * {} samples'.format(n_samples))
logger.write(' * {} phenotypes'.format(phenotype_df.shape[0]))
logger.write(' * {} covariates'.format(covariates_df.shape[1]))
logger.write(' * {} variants'.format(n_variants))
if interaction_s is not None:
logger.write(' * including interaction term')
num_batches = int(np.ceil(np.true_divide(n_variants, batch_size)))
# calculate correlation threshold for sparse output
if return_sparse:
dof = n_samples - 2 - covariates_df.shape[1]
t = stats.t.ppf(pval_threshold/2, dof)**2 / dof
r2_threshold = t / (1+t)
else:
r2_threshold = None
if interaction_s is None:
genotypes, phenotypes, covariates = initialize_data(phenotype_df, covariates_df, batch_size=batch_size)
with tf.device('/gpu:0'):
p_values, maf = calculate_association(genotypes, phenotypes, covariates, return_sparse=return_sparse, r2_threshold=r2_threshold)
else:
genotypes, phenotypes, covariates, interaction = initialize_data(phenotype_df, covariates_df, batch_size=batch_size, interaction_s=interaction_s)
p_values, maf = calculate_association(genotypes, phenotypes, covariates, interaction_t=interaction, return_sparse=return_sparse, r2_threshold=r2_threshold)
# g = _parse_function(next_element, batch_size, n_samples, ix_t)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
start_time = time.time()
with tf.Session() as sess:
# run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
# run_metadata = tf.RunMetadata()
# writer = tf.summary.FileWriter('logs', sess.graph, session=sess)
sess.run(init_op)
pval_list = []
maf_list = []
for i in range(num_batches):
sys.stdout.write('\rProcessing batch {}/{}'.format(i+1, num_batches))
sys.stdout.flush()
g_iter = sess.run(next_element)
# g_iter = sess.run(g)
p_ = sess.run([p_values, maf], feed_dict={genotypes:g_iter})#, options=run_options, run_metadata=run_metadata)
# writer.add_run_metadata(run_metadata, 'batch%d' % i)
pval_list.append(p_[0])
maf_list.append(p_[1])
if return_sparse:
pval = tf.sparse_concat(0, pval_list).eval()
else:
pval = tf.concat(pval_list, 0).eval()
maf = tf.concat(maf_list, 0).eval()
print()
# writer.close()
logger.write('Time elapsed: {:.2f} min'.format((time.time()-start_time)/60))
if return_sparse:
ix = pval.indices[:,0]<n_variants # truncate last batch
v = [variant_dict[i] for i in pval.indices[ix,0]]
pval_df = pd.DataFrame(
np.array([v, phenotype_df.index[pval.indices[ix,1]], pval.values[ix], maf[ix]]).T,
columns=['variant_id', 'phenotype_id', 'pval', 'maf']
)
pval_df['pval'] = pval_df['pval'].astype(np.float64)
pval_df['maf'] = pval_df['maf'].astype(np.float32)
else:
# truncate last batch
pval = pval[:n_variants]
maf = maf[:n_variants]
# add indices
pval_df = pd.DataFrame(pval, index=variant_ids, columns=[i for i in phenotype_df.index])
pval_df['maf'] = maf
pval_df.index.name = 'variant_id'
if maf_threshold is not None and maf_threshold>0:
logger.write(' * filtering output by MAF >= {}'.format(maf_threshold))
pval_df = pval_df[pval_df['maf']>=maf_threshold]
return pval_df | 7ea91bcc708ba4ffb6d5240295359728095a0fe9 | 3,633,751 |
def evolve(model, mutator, population, tournament_size=4):
"""
Performs crossover and mutation and doubles population size
:param model: Instance of Model
:param mutator: Instance of Mutator
:param population: List of points
:param tournament_size: Size of tournament
:return: List of population + List of mutants
"""
kids = []
clones = [one.clone() for one in population]
for _ in xrange(len(clones)):
mom = binary_tournament_selection(model, population, tournament_size)
while True:
dad = binary_tournament_selection(model, population, tournament_size)
if not mom == dad: break
kid = mutator.cross_over(mom, dad)
kid = mutator.mutate(kid, population)
kids.append(kid)
return clones + kids | 0bf6629601ee6f8a3be1a9491d85944fb817bd38 | 3,633,752 |
def cell_snippet(x, is_date=False):
"""create the proper cell snippet depending on the value type"""
if type(x) == int:
return {
'userEnteredValue': {'numberValue': x},
'userEnteredFormat': {
'numberFormat': {
'type': 'NUMBER',
'pattern': '#,##0'
}
}
}
elif type(x) == float:
return {
'userEnteredValue': {'numberValue': x},
'userEnteredFormat': {
'numberFormat': {
'type': 'DATE' if is_date else 'NUMBER',
'pattern': 'yyyy/mm/dd hh:mm:ss' if is_date else '#,##0.00'
}
}
}
else:
return {
'userEnteredValue': {'stringValue': x}
} | bc91279e5e9b4e9e6b853badf28081e0e4746549 | 3,633,753 |
import io
def load_image(image_path: str) -> np.ndarray:
"""
Read image from disk.
:param image_path: Path to input image.
:return: uint8 numpy array sized H x W.
"""
# load image
img = io.imread(image_path)
# assert img dtype
assert img.dtype == 'uint8'
return img | f524b5be404541ece8807455f4c0f450a9aa9fc1 | 3,633,754 |
def lowerUserList(inputList):
"""Lowercase user inputLists in case there are misspellings. (e.g. 3-6KB)"""
# clean list
loweredList = []
for item in inputList:
loweredItem = item.lower()
loweredList.append(loweredItem)
return loweredList | e5d55a39a98b741758c8b1e8306a4ee486c7a29d | 3,633,755 |
def na_cmp():
"""Binary operator for comparing NA values.
Should return a function of two arguments that returns
True if both arguments are (scalar) NA for your type.
By default, uses ``operator.or``
"""
return lambda x, y: x is None and y is None | 27c6324219af507d30d2ef763e731f2f6b525820 | 3,633,756 |
def get_list_files(initializer):
""" """
# get settings for find
roots, listOfExtension, ignoreLists, ignoreLists, ignoreLists = initializer()
resultArgv = ''
for pathes in roots:
for at in listOfExtension:
listSlice = list()
listSlice.append(at)
# поиск
resultList, msg = dirs_walker.find_files_down_tree_(pathes, listSlice, ignoreLists)
# список получен, можно его обработать
# в принципе можно передать указатель на функцию обработки
resultList.append('#')
resultArgv += '#'.join(resultList)
# open editor
resultArgv = resultArgv.split('#')
return resultArgv | 23b19c3ceb6cb266a5e9bf1f9881917eab319b5b | 3,633,757 |
def constant(name, shape, value, dtype=tf.float32):
""" Creates a variable which is initiated to a constant value.
:param name: The name of the variable.
:param shape: The shape of the variable.
:param value: The constant value of the tensor.
:param dtype: The data type.
:return: A constant-initiated variable.
"""
initial = tf.constant_initializer(value=value, dtype=dtype)
return tf.get_variable(name=name, shape=shape, initializer=initial) | 6878afa7dbd6485dd0373f6cd67fb0377f9957e1 | 3,633,758 |
def _cat_blob(repo, obj, bad_ok=False):
"""Call `git cat-file blob OBJ`.
Parameters
----------
repo : GitRepo
obj : str
Blob object.
bad_ok : boolean, optional
Don't fail if `obj` doesn't name a known blob.
Returns
-------
Blob's content (str) or None if `obj` is not and `bad_ok` is true.
"""
if bad_ok:
kwds = {"expect_fail": True, "expect_stderr": True}
else:
kwds = {}
try:
out_cat = repo.call_git(["cat-file", "blob", obj], **kwds)
except CommandError as exc:
if bad_ok and "bad file" in exc.stderr:
out_cat = None
else:
raise
return out_cat | 81a092d85b18106b7acb8c0df951267d56d07e40 | 3,633,759 |
import torch
def get_disp_samples(max_dis, feature_map, stage_id=0, disprity_map=None, step=1, samp_num=9, sample_spa_size=None) :
"""function: get the sampled disparities
args:
max_dis: the maximum disparity;
feature map: left or right feature map, N*C*H*W;
disprity_map: if it is not the first stage, we need disparity map to be the sampling center in new cost volume, N*H*W;
step: the step size between each samples, where -1 represents the traditional sampling from 0 to max_disp;
samp_num: the total number of samples;
return:
the sampled disparities for each pixel, N*S*H*W;
"""
# print("disprity_map: {}".format(disprity_map.size()))
batch_size, channels, height, width = feature_map.size()
if disprity_map is None or step==-1 or stage_id==0 :
disp_samples = torch.arange(max_dis, dtype=feature_map.dtype, device=feature_map.device).expand(batch_size,height,width,-1).permute(0,3,1,2)
else :
# # get the range only from one pixel
# lower_bound = disprity_map-(samp_num/2)*step
# upper_bound = disprity_map+(samp_num/2)*step
# lower_bound = lower_bound.clamp_(min=0.0)
# upper_bound = upper_bound.clamp_(max=max_dis)
# get the range from the pixel and its neighbors
if sample_spa_size is None :
kernel_size = 3 if stage_id==1 else 5 if stage_id==2 else 7
else :
kernel_size = sample_spa_size
lower_bound = torch.abs( torch.max_pool2d(-disprity_map, kernel_size=kernel_size, stride=1, padding=int((kernel_size-1)//2)) )
upper_bound = torch.max_pool2d(disprity_map, kernel_size=kernel_size, stride=1, padding=int((kernel_size-1)//2) )
modified_disp_range = (samp_num*step - (upper_bound-lower_bound)).clamp(min=0) / 2
lower_bound = (lower_bound - modified_disp_range).clamp(min=0, max=max_dis)
upper_bound = (upper_bound + modified_disp_range).clamp(min=0, max=max_dis)
new_step = (upper_bound-lower_bound) / (samp_num-1)
disp_samples = lower_bound.unsqueeze(1) + (torch.arange(0, samp_num, device=disprity_map.device,
dtype=disprity_map.dtype, requires_grad=False).reshape(1, -1, 1, 1) * new_step.unsqueeze(1))
# disp_samples = []
# for i in np.arange(samp_num) :
# disp_samples.append(lower_bound+i*step)
# disp_samples = torch.stack(disp_samples,dim=1)
# disp_samples = []
# for i in np.arange(-(samp_num//2)*step, samp_num//2*step, step) :
# disp_samples.append(disprity_map+i)
# disp_samples = torch.stack(disp_samples,dim=1)
# disp_samples = disp_samples.clamp_(min=0,max=max_dis-1)
# print("disp_samples: {}".format(disp_samples.size()))
return disp_samples | d07bc06e69b0015f9604a91fe4455ef61bb3c505 | 3,633,760 |
def load_data(database_filepath):
"""
Load cleaned data from database_filepath
INPUT
database_filepath --filepath to csv dataset
OUTPUT
X - message column to predict Y values
Y - list of columns to be predicted
category_names - name of Y column names
"""
# load data from database
database_filepath = 'sqlite:///' + database_filepath
#engine = create_engine('sqlite:///disasterResponse.db')
engine = create_engine(database_filepath)
df = pd.read_sql('SELECT * FROM disaster_msg_clean',con=engine,index_col='id')
#select message column
X = df['message'].dropna()
#skip first three columns : message, original, genre and select the rest of the columns
Y = df.iloc[:,3:].dropna()
#name of Y column names
category_names = Y.columns.values
return X,Y,category_names | a4e4eb2acbbe1bbc7f338b4dcfe1c19ff81dd968 | 3,633,761 |
def sigma_clip(array, flags=None, sigma=4.0, axis=0, min_N=4):
"""
one-iteration robust sigma clipping algorithm. returns clip_flags array.
Warning: this function will directly replace flagged and clipped data in array with
a np.nan, so as to not make a copy of array.
Parameters:
-----------
array : ndarray of complex visibility data. If 2D, [0] axis is samples and [1] axis is freq.
flags : ndarray matching array shape containing boolean flags. True if flagged.
sigma : float, sigma threshold to cut above
axis : int, axis of array to sigma clip
min_N : int, minimum length of array to sigma clip, below which no sigma
clipping is performed.
return_arrs : type=boolean, if True, return array and flags
Output: flags
-------
clip_flags : type=boolean ndarray, has same shape as input array, but has clipped
values set to True. Also inherits any flagged data from flags array
if passed.
"""
# ensure array is an array
if not isinstance(array, np.ndarray):
array = np.array(array)
# ensure array passes min_N criteria:
if array.shape[axis] < min_N:
return np.zeros_like(array, np.bool)
# create empty clip_flags array
clip_flags = np.zeros_like(array, np.bool)
# inherit flags if fed and apply flags to data
if flags is not None:
clip_flags += flags
array[flags] *= np.nan
# get robust location
location = np.nanmedian(array, axis=axis)
# get MAD! * 1.482579
scale = np.nanmedian(np.abs(array - location), axis=axis) * 1.482579
# get clipped data
clip = np.abs(array - location) / scale > sigma
# set clipped data to nan and set clipped flags to True
array[clip] *= np.nan
clip_flags[clip] = True
return clip_flags | 9ce1ec8f261e0b548ae8feb3d6f5769f3994f64f | 3,633,762 |
from genrl.core import get_actor_critic_from_name
from genrl.core import get_value_from_name
from genrl.core import get_policy_from_name
from typing import Union
def get_model(type_: str, name_: str) -> Union:
"""
Utility to get the class of required function
:param type_: "ac" for Actor Critic, "v" for Value, "p" for Policy
:param name_: Name of the specific structure of model. (
Eg. "mlp" or "cnn")
:type type_: string
:returns: Required class. Eg. MlpActorCritic
"""
if type_ == "ac":
return get_actor_critic_from_name(name_)
elif type_ == "v":
return get_value_from_name(name_)
elif type_ == "p":
return get_policy_from_name(name_)
raise ValueError | 13e29ab4065425a1c68bfead5c8d6811b24234f3 | 3,633,763 |
import os
def _preprocess_data(data, data_type, auth=None):
"""Preprocess input data according to the specified type.
Possoble data types are:
- "raw" use data as is provided in the request
- "json_pgframe" create a PandasPGFrame from the provided JSON repr
- "nexus_dataset" download a JSON dataset from Nexus and
create a PandasPGFrame from this representation
# - collection of Nexus resources to build a PG from
# - (then i guess we need a bucket/org/project/token)
"""
if data_type == "raw":
# Use passed data as is
return data
elif data_type == "json_pgframe":
return PandasPGFrame.from_json(data)
elif data_type == "nexus_dataset":
if auth is None:
raise ValueError(
"To use Nexus-hosted property graph as the dataset "
"authentication token should be provided in the "
"request header")
forge = KnowledgeGraphForge(
app.config["FORGE_CONFIG"], endpoint=data["endpoint"],
bucket=data["bucket"], token=auth)
resource = forge.retrieve(data["resource_id"])
forge.download(
resource, "distribution.contentUrl",
app.config["DOWNLOAD_DIR"])
downloaded_file = os.path.join(
app.config["DOWNLOAD_DIR"], resource.distribution.name)
graph = PandasPGFrame.load_json(downloaded_file)
os.remove(downloaded_file)
return graph
else:
raise ValueError("Unknown data type") | 69ded5d2e39657d7bc68d68edd2ba7b26048b3a5 | 3,633,764 |
def short_comment(x):
"""Ham comments are often short, such as 'cool video!'"""
return len(x.text.split()) < 5 | f8d91feb4549219275dd5bace104cd8d89b96748 | 3,633,765 |
def xxyy_basis_rotation(pairs, clean_xxyy=False):
"""Generate the measurement circuits."""
all_ops = []
for a, b in pairs:
if clean_xxyy:
all_ops += [
cirq.rz(-np.pi * 0.25).on(a),
cirq.rz(np.pi * 0.25).on(b),
cirq.ISWAP.on(a, b)**0.5
]
else:
all_ops += [
cirq.rz(-np.pi * 0.25).on(a),
cirq.rz(np.pi * 0.25).on(b),
cirq.FSimGate(-np.pi / 4, np.pi / 24).on(a, b)
]
return all_ops | 4d5e5bfeb0b6ec6276fc8b640c600e3d2b8dc488 | 3,633,766 |
def partitions_class_attribute(data_points, attr_index):
"""Partitions data points using a given class attribute. Data points
with the same class label are combined into a partition.
:param data_points: List of tuples representing the data points.
:param attr_index: Index of the attribute inside the tuple to be used
for partitioning."""
partitioned_data_points = {}
for point in data_points:
add_to_list(partitioned_data_points, point[attr_index], point)
return partitioned_data_points | 848cce86bfbe052098caefb83bd0f27ca9703c74 | 3,633,767 |
def set_crop_to_volume(volume, bb_min, bb_max, sub_volume):
"""
set a subregion to an nd image.
:param volume: volume image
:param bb_min: box region minimum
:param bb_max: box region maximum
:
"""
dim = len(bb_min)
out = volume
if(dim == 2):
out[np.ix_(range(bb_min[0], bb_max[0] + 1),
range(bb_min[1], bb_max[1] + 1))] = sub_volume
elif(dim == 3):
out[np.ix_(range(bb_min[0], bb_max[0] + 1),
range(bb_min[1], bb_max[1] + 1),
range(bb_min[2], bb_max[2] + 1))] = sub_volume
elif(dim == 4):
out[np.ix_(range(bb_min[0], bb_max[0] + 1),
range(bb_min[1], bb_max[1] + 1),
range(bb_min[2], bb_max[2] + 1),
range(bb_min[3], bb_max[3] + 1))] = sub_volume
else:
raise ValueError("array dimension should be 2, 3 or 4")
return out | 54be20a0f1b9f85187adba68a2f5a9a3dec85d1c | 3,633,768 |
def display_image_grid(bounding_pts,image,skip_dilate=True,size = 2.5):
"""
construct a grid from the padded images and create the entire grid
displays the image
returns an image and list of all the digit images
"""
if not skip_dilate:
list_digits = [draw_block(i[0],i[1],image.copy(),skip_dilate = False) for i in bounding_pts]
else:
list_digits = [draw_block(i[0],i[1],image.copy()) for i in bounding_pts]
list_padded_digits = [scale_and_center(i,size=size) for i in list_digits]
list_padded_digits = [pad_scale(i) for i in list_padded_digits]
h,w = list_padded_digits[0].shape[:2]
new_column = 1
sudoku_number = []
for idx, i in enumerate(list_padded_digits):
#store the digit as a 9 by 9 grid
if (idx+1)%9 == 1:
horizontal_stack = i
else:
horizontal_stack = np.concatenate((horizontal_stack,i),axis = 1)
if (idx+1)%9 == 0 and new_column==1:
vertical_stack = horizontal_stack
new_column += 1
elif (idx+1)%9 == 0:
vertical_stack = np.concatenate((vertical_stack, horizontal_stack),axis = 0)
# plt.imshow(vertical_stack, cmap = 'gray')
return vertical_stack, list_padded_digits | fb539d899410b6c7824ce57fb21292a746abe464 | 3,633,769 |
import random
import string
def generate_random_name():
"""Generate a random name to use as a handle for a job."""
return "".join(random.choice(string.ascii_lowercase) for j in range(8)) | c793c77289e7813cfd679b23613a9b1cd38af941 | 3,633,770 |
import os
def _compile_explicit_relpath(verb):
"""Decline to map any explicit relpath to verb"""
assert ("/" in verb) or ("." in verb)
if os.path.exists(verb):
how = _compile_log_error(
"bash.py: warning: {}: No such file or directory in bash path".format(verb)
)
return how
how = _compile_log_error(
"bash.py: warning: {}: No such file or directory".format(verb)
)
return how | 869874a768a85d113904c515e1ccb31747315bf7 | 3,633,771 |
import os
def get_commands():
"""
Returns a list of command names.
This works by looking for commands inside crichtoncli.commands.
The list is in the format [command_name]. Items
from this list can then be used in calls to
load_command_class(command_name).
The list is cached on the first call and reused on subsequent
calls.
"""
global _commands
if _commands is None:
_commands = list([name for name in find_commands(os.path.join(__path__[0], 'commands'))])
return _commands | c565edbd43e95de2424cd3f3370c6b1b011f2e51 | 3,633,772 |
def import_skymodel_from_hdf5(filename):
"""Import a Skymodel from HDF5 format
:param filename:
:return: SkyModel
"""
with h5py.File(filename, 'r') as f:
ncomponents = f.attrs['number_skycomponents']
components = [convert_hdf_to_skycomponent(f['skycomponent%d' % i])
for i in range(ncomponents)]
nimages = f.attrs['number_images']
images = [convert_hdf_to_image(f['image%d' % i]) for i in range(nimages)]
return SkyModel(components=components, images=images) | b57e8ce7af13f303e6985690c45d1b99fb3b1755 | 3,633,773 |
def _get_initial_ktensor(init, X, rank, random_state, scale_norm=True):
"""
Parameters
----------
init : str
Specifies type of initializations ('randn', 'rand')
X : ndarray
Tensor that the decomposition is fit to.
rank : int
Rank of decomposition
random_state : RandomState or int
Specifies seed for random number generator
scale_norm : bool
If True, norm is scaled to match X (default: True)
Returns
-------
U : KTensor
Initial factor matrices used optimization.
normX : float
Frobenious norm of tensor data.
"""
normX = linalg.norm(X) if scale_norm else None
if init == 'randn':
# TODO - match the norm of the initialization to the norm of X.
U = randn_ktensor(X.shape, rank, norm=normX, random_state=random_state)
elif init == 'rand':
# TODO - match the norm of the initialization to the norm of X.
U = rand_ktensor(X.shape, rank, norm=normX, random_state=random_state)
elif isinstance(init, KTensor):
U = init.copy()
else:
raise ValueError("Expected 'init' to either be a KTensor or a string "
"specifying how to initialize optimization. Valid "
"strings are ('randn', 'rand').")
return U, normX | aab154e034465961e3aee336ad615677d353e487 | 3,633,774 |
import numpy
def grabocka_params_to_shapelet_size_dict(n_ts, ts_sz, n_classes, l, r):
"""Compute number and length of shapelets.
This function uses the heuristic from [1]_.
Parameters
----------
n_ts: int
Number of time series in the dataset
ts_sz: int
Length of time series in the dataset
n_classes: int
Number of classes in the dataset
l: float
Fraction of the length of time series to be used for base shapelet length
r: int
Number of different shapelet lengths to use
Returns
-------
dict
Dictionnary giving, for each shapelet length, the number of such shapelets to be generated
Examples
--------
>>> d = grabocka_params_to_shapelet_size_dict(n_ts=100, ts_sz=100, n_classes=3, l=0.1, r=2)
>>> keys = sorted(d.keys())
>>> print(keys)
[10, 20]
>>> print([d[k] for k in keys])
[4, 4]
References
----------
.. [1] J. Grabocka et al. Learning Time-Series Shapelets. SIGKDD 2014.
"""
base_size = int(l * ts_sz)
d = {}
for sz_idx in range(r):
shp_sz = base_size * (sz_idx + 1)
n_shapelets = int(numpy.log10(n_ts * (ts_sz - shp_sz + 1) * (n_classes - 1)))
d[shp_sz] = n_shapelets
return d | bc1016e57487374762d801de64059c4b5a5acd07 | 3,633,775 |
import csv
def gslib(FileName, deli=' ', useTab=False, numIgLns=0, pdo=None):
"""
Description
-----------
Reads a GSLIB file format to a vtkTable. The GSLIB file format has headers lines followed by the data as a space delimited ASCI file (this filter is set up to allow you to choose any single character delimiter). The first header line is the title and will be printed to the console. This line may have the dimensions for a grid to be made of the data. The second line is the number (n) of columns of data. The next n lines are the variable names for the data in each column. You are allowed up to ten characters for the variable name. The data follow with a space between each field (column).
Parameters
----------
`FileName` : str
- The absolute file name with path to read.
`deli` : str
- The input files delimiter. To use a tab delimiter please set the `useTab`.
`useTab` : boolean
- A boolean that describes whether to use a tab delimiter.
`numIgLns` : int
- The integer number of lines to ignore.
`pdo` : vtk.vtkTable, optional
- A pointer to the output data object.
Returns
-------
Returns a vtkTable of the input data file.
"""
if pdo is None:
pdo = vtk.vtkTable() # vtkTable
if (useTab):
deli = '\t'
titles = []
data = []
with open(FileName) as f:
reader = csv.reader(f, delimiter=deli)
# Skip defined lines
for i in range(numIgLns):
next(f)
# Get file header (part of format)
header = next(f) # TODO: do something with the header
#print(os.path.basename(FileName) + ': ' + header)
# Get titles
numCols = int(next(f))
for i in range(numCols):
titles.append(next(f).rstrip('\r\n'))
# Read data
for row in reader:
data.append(row)
_rows2table(data, titles, pdo)
return pdo, header | be8259e987627df32254a4231791caf1571c8fad | 3,633,776 |
from typing import List
def precision_recall(
similarity_melted_df: pd.DataFrame, replicate_groups: List[str], k: int,
) -> pd.DataFrame:
"""
Determine the precision and recall at k for all unique replicate groups
based on a predefined similarity metric (see cytominer_eval.transform.metric_melt)
Arguments:
similarity_melted_df - a long pandas dataframe output from transform.metric_melt
replicate_groups - a list of metadata column names in the original profile dataframe
to use as replicate columns
k - an integer indicating how many pairwise comparisons to threshold
Output:
pandas DataFrame of precision and recall metrics for all replicate groups
"""
# Determine pairwise replicates and make sure to sort based on the metric!
similarity_melted_df = assign_replicates(
similarity_melted_df=similarity_melted_df, replicate_groups=replicate_groups
).sort_values(by="similarity_metric", ascending=False)
# Check to make sure that the melted dataframe is full
assert_melt(similarity_melted_df, eval_metric="precision_recall")
# Extract out specific columns
pair_ids = set_pair_ids()
replicate_group_cols = [
"{x}{suf}".format(x=x, suf=pair_ids[list(pair_ids)[0]]["suffix"])
for x in replicate_groups
]
# Calculate precision and recall for all groups
precision_recall_df = similarity_melted_df.groupby(replicate_group_cols).apply(
lambda x: calculate_precision_recall(x, k=k)
)
# Rename the columns back to the replicate groups provided
rename_cols = dict(zip(replicate_group_cols, replicate_groups))
return precision_recall_df.reset_index().rename(rename_cols, axis="columns") | 518e667597d3b85683d3b49dd0426a514ef9997f | 3,633,777 |
def validate_index(n: int, ind: int, command: str):
"""
Simple function to validate existence of index within in the model repository.
Args:
n (int): length of indices
ind (int): selected index
command (str): name of command for "tailored" help message
"""
# ensure index exists in indices
if -n <= ind < n:
return ind
else:
raise IndexError(f"Index {ind} does not exist... Run `kaos {command} list` again") | 3aef711caef041d2f4aa1dfdf0b5135d9f626b3c | 3,633,778 |
import requests
def search_page(request):
"""Page with search results."""
def _articles_filter(name):
"""Delete articles in product names
Arguments:
name {str} -- Name or generic name of product
"""
exclude = ("de", "des", "au", "aux", 'en', "le",
"la", "les", "et", "un", "une", "du", "à")
produit = name.lower()
produit = produit.split(" ")
for e in exclude:
for x in produit:
if x in e:
try:
produit.remove(e)
except:
return x
keyword = (" ".join(produit[0:5]))
keyword = keyword.replace(",", "")
return keyword
query = request.GET.get('query')
URL = 'https://fr.openfoodfacts.org/cgi/search.pl?'
PARAMETERS = {
'action': 'process',
'search_terms': query,
'sort_by': 'unique_scans_n',
'axis_x': 'energy',
'axis_y': 'products_n',
'page_size': '1',
'page': '1',
'json': '1'
}
r = requests.get(URL, params=PARAMETERS)
requested_product = r.json()
try:
substitution_products = Product.objects.annotate(
search=SearchVector('product_name'),).filter(
search=_articles_filter(query))
substitution_products = substitution_products.order_by(
'nutrition_grades')
if not substitution_products:
substitution_products = Product.objects.annotate(
search=SearchVector('product_name'),).filter(
search=_articles_filter(requested_product['products'][0]['product_name']))
substitution_products = substitution_products.order_by(
'nutrition_grades')
if not substitution_products:
substitution_products = Product.objects.annotate(
search=SearchVector('product_name'),).filter(
search=_articles_filter(requested_product['products'][0]['generic_name_fr']))
substitution_products = substitution_products.order_by(
'nutrition_grades')
except:
raise Http404
bookmark_id = list()
if request.user.is_authenticated:
qs_bookmark = Bookmark.objects.filter(user=request.user)
for i in qs_bookmark:
bookmark_id.append(i.bookmark.id)
if request.method == 'POST':
request_post = request.POST.get('bookmark_product_code')
Bookmark.objects.create(user=request.user, bookmark_id=request_post)
return HttpResponseRedirect(request.get_full_path())
# PAGINATION
# https://docs.djangoproject.com/fr/2.2/topics/pagination/
paginator = Paginator(substitution_products, 9) # 9 products by page
page = request.GET.get('page')
try:
substitution_products = paginator.page(page)
except PageNotAnInteger:
substitution_products = paginator.page(1)
except EmptyPage:
substitution_products = paginator.page(paginator.num_pages)
# /PAGINATION
template_name = 'app/search.html'
context = {
'query': query,
'img': IMG,
'requested_title': f"1er résultat Open Food Facts pour : {query}",
'requested_product_name': requested_product['products'][0]['product_name'],
'requested_product_brands': requested_product['products'][0]['brands'],
'requested_product_quantity': requested_product['products'][0]['quantity'],
'requested_product_image': requested_product['products'][0]['image_url'],
'products': substitution_products,
'paginate': True,
'bookmarks': bookmark_id,
}
return render(request, template_name, context) | a41143e49cd1e94aa3341f6e30b3dc1d98b22882 | 3,633,779 |
def calculate_sa_expected_feature_counts(pi, mdp, epsilon=0.0001):
"""return dictionary of feature counts associated with each (s,a) pair"""
sa_fcounts = dict()
#compute feature expectations per state
fcounts = calculate_expected_feature_counts(pi, mdp, epsilon)
#(s,a) feature expectations are \phi(s) + \gamma * \sum_s' T(s,a,s') * fcounts[s']
for s in mdp.states:
for a in mdp.actions(s):
sa_fcounts[s,a] = mdp.get_state_features(s)
for p,s1 in mdp.T(s, a):
sa_fcounts[s,a] += mdp.gamma * p * fcounts[s1]
return sa_fcounts | 19ebd9da4b722078d1f961da0774573176017742 | 3,633,780 |
def xmlToTag(tag):
"""The opposite of tagToXML()"""
if tag == "OS_2":
return "OS/2"
if len(tag) == 8:
return identifierToTag(tag)
else:
return tag + " " * (4 - len(tag))
return tag | 4c6e26f429e273f9b25adc25d262a523fda6467c | 3,633,781 |
def translate(num_list, transl_dict):
""" Translates integer list to number word list (no error handling!)
Args:
num_list: list with interger items.
transl_dict: dictionary with integer keys and number word values.
Returns:
list of strings which are the translated numbers into words.
"""
return [transl_dict[item] for item in num_list] | dec1b25d64acf99dc04885f773ea42a55865bf8d | 3,633,782 |
def _1d_overlap_filter(x, n_h, n_edge, phase, cuda_dict, pad, n_fft):
"""Do one-dimensional overlap-add FFT FIR filtering."""
# pad to reduce ringing
x_ext = _smart_pad(x, (n_edge, n_edge), pad)
n_x = len(x_ext)
x_filtered = np.zeros_like(x_ext)
n_seg = n_fft - n_h + 1
n_segments = int(np.ceil(n_x / float(n_seg)))
shift = ((n_h - 1) // 2 if phase.startswith('zero') else 0) + n_edge
# Now the actual filtering step is identical for zero-phase (filtfilt-like)
# or single-pass
for seg_idx in range(n_segments):
start = seg_idx * n_seg
stop = (seg_idx + 1) * n_seg
seg = x_ext[start:stop]
seg = np.concatenate([seg, np.zeros(n_fft - len(seg))])
prod = _fft_multiply_repeated(seg, cuda_dict)
start_filt = max(0, start - shift)
stop_filt = min(start - shift + n_fft, n_x)
start_prod = max(0, shift - start)
stop_prod = start_prod + stop_filt - start_filt
x_filtered[start_filt:stop_filt] += prod[start_prod:stop_prod]
# Remove mirrored edges that we added and cast (n_edge can be zero)
x_filtered = x_filtered[:n_x - 2 * n_edge].astype(x.dtype)
return x_filtered | 4c6825a2df425415d546e47f0c2ba24166e0bf2c | 3,633,783 |
def LED(state):
"""this is the arduino function that controls how the LED bulb with respect to what the webserver says"""
if state == True:
arduino.write(b'1')
print('the arduino is in the ON state')
return True
else:
arduino.write(b'0')
print('the arduino is in the OFF state')
return False | bcdc9bbc315215631af8ca0ef5bc48eb3b0b3b03 | 3,633,784 |
def noise_mean(corr, scale_factor = 1., mode = "corr"):
"""Computes the scalled mean noise from the correlation data estimator.
This is the delta parameter for weighted normalization.
Parameters
----------
corr: (ndarray,)
Correlation function (or difference function) model.
scale_factor : ndarray
Scaling factor as returned by :func:`.core.scale_factor`. If not provided,
corr data must be computed with scale = True option.
Returns
-------
delta : ndarray
Scalled delta value.
"""
scale_factor = np.asarray(scale_factor)
g = np.divide(corr,scale_factor[...,None])
if mode == "corr":
noise = np.clip(1 - g[...,0],0,1)
elif mode == "diff":
noise = np.clip(g[...,0]/2,0,1)
else:
raise ValueError("Wrong mode.")
return noise | e94f04664888f981131fd406a1f55e404dcb1581 | 3,633,785 |
def get_python(uid):
"""Returns location of virtualenv python binary given UID"""
return get_venv_folder(uid) + '/bin/python' | b8e73ff14df2151915a55d11c1aaba9457dac4c3 | 3,633,786 |
def login():
"""View of login to the backstage"""
if current_user.is_authenticated:
return redirect(url_for('admin.index'))
form = LoginForm()
if form.validate_on_submit():
admin = Administrator.query \
.filter_by(name=form.name.data).first()
if admin is not None and admin.verify_password(form.password.data):
login_user(admin, form.remember_me.data)
redirect_url = request.args.get('next')
if redirect_url is None or not redirect_url.startswith('/'):
redirect_url = url_for('admin.index')
return redirect(redirect_url)
flash('Invalid administrator name or password')
return render_template('/auth/login.html', form=form) | e653f078c9f31477d13cc773db25a9bb2bb16ceb | 3,633,787 |
def vgg_retinanet(num_classes, backbone='vgg16', inputs=None, modifier=None, **kwargs):
""" Constructs a retinanet model using a vgg backbone.
Args
num_classes: Number of classes to predict.
backbone: Which backbone to use (one of ('vgg16', 'vgg19')).
inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).
Returns
RetinaNet model with a VGG backbone.
"""
if backbone == 'vgg16_5f':
return vgg16_retinanet_5f(num_classes=num_classes, inputs=inputs, modifier=modifier, **kwargs)
if backbone == 'vgg16_3f':
return vgg16_retinanet_3f(num_classes=num_classes, inputs=inputs, modifier=modifier, **kwargs)
# choose default input
if inputs is None and '_sf' not in backbone:
inputs = keras.layers.Input(shape=(None, None, 3))
# create the vgg backbone
if backbone == 'vgg16':
vgg = keras.applications.VGG16(input_tensor=inputs, include_top=False)
# weights = '/store/datasets/UAV/models/vgg16-rn-w-s-1-on/snapshots/vgg16_csv_14.h5'
# weights = '/store/datasets/UA-Detrac/models2/vgg16-1-on/snapshots/vgg16_csv_07.h5'
# vgg.load_weights(weights, by_name=True)
# for layer in vgg.layers[:-4]:
# layer.trainable = False
elif backbone == 'vgg19':
vgg = keras.applications.VGG19(input_tensor=inputs, include_top=False)
elif backbone == 'vgg16_flow_s':
inputs = keras.layers.Input(shape=(None, None, 6))
vgg = VGG16_flow_s(input_tensor=inputs, include_top=False)
elif backbone == 'vgg16_flow_y':
inputs = keras.layers.Input(shape=(None, None, 6))
vgg = VGG16_flow_y(input_tensor=inputs, include_top=False)
elif backbone == 'vgg16_flow_3d':
inputs = keras.layers.Input(shape=(2, None, None, 3))
vgg = VGG16_flow_3d(input_tensor=inputs, include_top=False)
elif backbone == 'vgg16_sf':
if inputs is None:
inputs = keras.layers.Input(shape=(None, None, 15))
vgg = VGG16_sf(input_tensor=inputs, include_top=False)
elif backbone == 'vgg16_sf_flow':
if inputs is None:
inputs = keras.layers.Input(shape=(None, None, 11))
vgg = VGG16_sf_flow(input_tensor=inputs, include_top=False)
# elif backbone == 'vgg16_flow_c':
# inputs = keras.layers.Input(shape=(None, None, 6))
# vgg = VGG16_flow_c(input_tensor=inputs, include_top=False)
else:
raise ValueError("Backbone '{}' not recognized.".format(backbone))
if modifier:
vgg = modifier(vgg)
# create the full model
layer_names = ["block3_pool", "block4_pool", "block5_pool"]
layer_outputs = [vgg.get_layer(name).output for name in layer_names]
model = retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=layer_outputs, **kwargs)
# model.save('/store/datasets/ILSVRC2015/models/rn-vgg16-sm256/model.h5')
# exit(0)
return model | cc1e14f45108ede7bed38c40601bcd35a487fb97 | 3,633,788 |
def fetchWorkflowsSpec(config, listOfWfs):
"""
Fetch the workload of a list of workflows. Filter out only a few
usefull keys
"""
if isinstance(listOfWfs, basestring):
listOfWfs = [listOfWfs]
wfDBReader = RequestDBReader(config.AnalyticsDataCollector.centralRequestDBURL,
couchapp=config.AnalyticsDataCollector.RequestCouchApp)
tempWfs = wfDBReader.getRequestByNames(listOfWfs, True)
wfShortDict = {}
for wf in listOfWfs:
wfShortDict[wf] = filterKeys(tempWfs[wf])
return wfShortDict | 77ac4fb477673acc0e08388cc9075c00f7dbbcf5 | 3,633,789 |
def rel_error(x, y):
""" Returns relative error """
assert x.shape == y.shape, "tensors do not have the same shape. %s != %s" \
% (x.shape, y.shape)
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) | 34166fe707ad3733c7e798319f634eaac549811d | 3,633,790 |
def wrap_ddp(cls):
"""Return wrapper class for the torch.DDP and apex. Delegete getattr to the
inner module.
"""
class _Wrap(cls):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, name):
wrapped_module = super().__getattr__('module')
if hasattr(wrapped_module, name):
return getattr(wrapped_module, name)
return super().__getattr__(name)
return _Wrap | d8d4148a4e26e28bca76bbac7ecd0e0cea64b3ba | 3,633,791 |
def mortgage_max(buyer_dsr):
"""
Calculates maximum loan available to offer based on buyer's proposed downpayment and downpayment percent
...
Returns
-------
loan : float
Returns maximum available loan to be offered
"""
downpayment_percent = 0
min_downpayment = 0
try:
if buyer_dsr.home_price <= 500000:
downpayment_percent = 0.05
elif buyer_dsr.home_price > 500000 and buyer_dsr.home_price <= 1000000:
downpayment_percent = 0.1
else:
downpayment_percent = 0.2
except Exception:
return None
#loan = self.max_dp() / downpayment_percent
loan = buyer_dsr.downpayment / downpayment_percent
return loan | c1fa429d61100dce20b5a4cbeaef365d39ad14af | 3,633,792 |
import jinja2
def generate_newsletter(cal_dict):
""" Given a JSON formatted calendar dictionary, make the text for
a fascinating newsletter.
"""
sorted_items = organize_events_by_day(
cal_dict['items'],
config.NEWSLETTER_MAX_DAYS,
)
# pprint.pprint(sorted_items)
template_loader = jinja2.FileSystemLoader(
searchpath=TEMPLATE_DIR,
)
template_env = jinja2.Environment(
loader=template_loader,
lstrip_blocks=True,
trim_blocks=True,
)
template_env.filters['humandate'] = get_human_datestring
template_env.filters['humandateonly'] = get_human_dateonly
template_env.filters['timeonly'] = get_human_timeonly
template_env.filters['shorturl'] = shorten_url
template_env.filters['underline'] = get_underline
template_env.filters['addtz'] = add_timezone
template = template_env.get_template( NEWSLETTER_TEMPLATE )
template_vars = {
"title": cal_dict['summary'],
"items" : sorted_items,
"header" : config.NEWSLETTER_HEADER,
}
output_newsletter = template.render(template_vars)
return output_newsletter | efc39101c668e7744d95334204778e854747c362 | 3,633,793 |
import json
def request_api(request, # type: Request
path, # type: Str
method="GET", # type: Str
data=None, # type: Optional[Union[JSON, Str]]
headers=None, # type: Optional[HeadersType]
cookies=None, # type: Optional[CookiesType]
): # type: (...) -> AnyResponseType
"""
Use a pyramid sub-request to request Magpie API routes via the UI. This avoids max retries and closed connections
when using 1 worker (eg: during tests).
Some information is retrieved from :paramref:`request` to pass down to the sub-request (eg: cookies).
If they are passed as argument, corresponding values will override the ones found in :paramref:`request`.
All sub-requests to the API are assumed to be :py:data:`magpie.common.CONTENT_TYPE_JSON` unless explicitly
overridden with :paramref:`headers`. Headers are also looked for for additional ``Set-Cookie`` header in case they
need to be passed down to :paramref:`cookies`.
:param request: incoming Magpie UI request that requires sub-request to Magpie API, to retrieve required details.
:param path: local Magpie API path (relative to root without URL).
:param method: HTTP method to send the API sub-request.
:param data: JSON dictionary or literal string content of the request body.
:param headers: override headers to employ for the API sub-request. Defaults to JSON Accept & Content-Type headers.
:param cookies:
Override cookies to employ for the API sub-request. Defaults to current logged user.
For empty cookies (no user), explicitly provide an empty dictionary.
"""
method = method.upper()
extra_kwargs = {"method": method}
if headers:
headers = dict(headers)
else:
headers = {"Accept": CONTENT_TYPE_JSON, "Content-Type": CONTENT_TYPE_JSON}
# although no body is required per-say for HEAD/GET requests, add it if missing
# this avoid downstream errors when 'request.POST' is accessed
# we use a plain empty byte str because empty dict `{}` or `None` cause errors on each case
# of local/remote testing with corresponding `webtest.TestApp`/`requests.Request`
if not data:
data = ""
if isinstance(data, dict) and get_header("Content-Type", headers, split=[",", ";"]) == CONTENT_TYPE_JSON:
data = json.dumps(data)
if hasattr(cookies, "items"): # any dict-like implementation
cookies = list(cookies.items())
if cookies and isinstance(headers, dict):
headers = list(headers.items())
for cookie_name, cookie_value in cookies:
headers.append(("Set-Cookie", "{}={}".format(cookie_name, cookie_value)))
if cookies is None:
cookies = request.cookies
# cookies must be added to kw only if populated, iterable error otherwise
if cookies:
# at this point, can be either the internal RequestCookies object (from request), that we can pass directly
# otherwise we have a list (or dict pre-converted to list items), that we must clean up
# dict/list format happens only when explicitly overriding the input cookies to ignore request ones
if isinstance(cookies, list):
# cookies passed as dict/list are expected to provide only the token value without any additional details
# must trim extra options such as Path, Domain, Max-age, etc. for Authentication to succeed
cookies = [(name, value.split(";")[0]) for name, value in cookies]
extra_kwargs["cookies"] = cookies
subreq = Request.blank(path, base_url=request.application_url, headers=headers, POST=data, **extra_kwargs)
resp = request.invoke_subrequest(subreq, use_tweens=True)
return resp | 4d6131ee53b948298652b357be62e52112015854 | 3,633,794 |
def unit_vector(vector):
""" Returns the unit vector of the vector. """
div = np_norm(vector)
if div == 0.0:
return vector
return vector / div | fbff470eae5bdc6fe43fd883fec83d3ec3e0482e | 3,633,795 |
import uuid
import base64
import os
def save_files(images):
"""
Save encoded image to image file on local machine. Image is decoded and
saved as JPEG file with UUID name in "imstore" directory.
:param images: encoded image string (string)
:return: file name of stored image (string)
"""
filename = str(uuid.uuid1())
img = base64.b64decode(images)
filename = filename + '.jpg'
pathname = os.path.join('imstore/', filename)
with open(pathname, 'wb') as file:
file.write(img)
return filename | ba3e6f99570b540470fe687f69fc13ac803c9a39 | 3,633,796 |
import re
def _generate_nsa_options(query):
"""
>>> _generate_nsa_options("NM_000551.3")
[('refseq', 'NM_000551.3')]
>>> _generate_nsa_options("ENST00000530893.6")
[('ensembl', 'ENST00000530893.6')]
>>> _generate_nsa_options("gi:123456789")
[('gi', '123456789')]
>>> _generate_nsa_options("01234abcde")
[('MD5', '01234abcde%'), ('VMC', 'GS_ASNKvN4=%')]
"""
if ":" in query:
# interpret as fully-qualified identifier
nsa_options = [tuple(query.split(sep=":", maxsplit=1))]
return nsa_options
namespaces = infer_namespaces(query)
if namespaces:
nsa_options = [(ns, query) for ns in namespaces]
return nsa_options
# if hex, try md5 and TRUNC512
if re.match(r"^(?:[0-9A-Fa-f]{8,})$", query):
nsa_options = [("MD5", query + "%")]
# TRUNC512 isn't in seqrepo; synthesize equivalent VMC
id_b64u = hex_to_base64url(query)
nsa_options += [("VMC", "GS_" + id_b64u + "%")]
return nsa_options
return [(None, query)] | 07f6e9a703464b1c97cac94dfc5806f2ef39f420 | 3,633,797 |
def identify_place_type(place_type):
"""
Identifies the type of the given geoname.
It first inferes the type from the metadata file, then identifies using regular expressions.
"""
if re.search("Lieu", place_type):
if re.search("RUE$", place_type):
return 11
elif re.search("AVENUE$", place_type):
return 12
elif re.search("ALLEE$", place_type):
return 13
elif re.search("ALLEES$", place_type):
return 14
elif re.search("PLACE$", place_type):
return 15
elif re.search("SQUARE$", place_type):
return 16
elif re.search("QUAI$", place_type):
return 17
elif re.search("BOULEVARD$", place_type):
return 18
elif re.search("QUARTIER", place_type):
return 18
elif re.search("PORT$", place_type):
return 19 # Classified as Edifice, but Street for our purposes
elif re.search("COMMUNE$", place_type):
return 30
elif re.search("HAMEAU$", place_type):
return -1
else:
#print place_type
return 40
elif re.search("Edifice", place_type):
if re.search("STATUE$", place_type):
return -1
if re.search("PARTICULIER$", place_type):
return -1
else:
#print "Edifice drop out %s"%geoname
return 9
else:
print "Cannot match geoname %s" % geoname
return -1 | edfe5dd42f3ce3e0e1d609a4d73328b2995184ec | 3,633,798 |
def InterferenceDict(data_list):
"""Creates an interferenceReturns a double dict from a list of lat,lng, interferences."""
if not isinstance(data_list, list):
data_list = [data_list]
result = {}
for lat, lon, data in data_list:
if lat not in result: result[lat] = {}
result[lat][lon] = data
return result | fae34ea1182c6f709691ef1bab72f1796d073a2a | 3,633,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.