content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def dropbox_fileupload(dropbox, request):
""" accepts a single file upload and adds it to the dropbox as attachment"""
attachment = request.POST['attachment']
attached = dropbox.add_attachment(attachment)
return dict(
files=[dict(
name=attached,
type=attachment.type,
)]
)
|
84a89a8b8a21c3a78105d8ef911ee6aba71206f2
| 13,939
|
def series(expr, x, point=0, n=6, dir="+"):
"""Series expansion of expr around point `x=point`.
See the doctring of Basic.series() for complete details of this wrapper.
"""
return expr.series(x, point, n, dir)
|
fee3fff7a29136813c7d6bf5d1cf7c576651368d
| 13,940
|
def process_transform_funcs(trans_funcs, func_args=None, func_kwargs=None):
"""Process input of the apply_transform_funcs function.
:param iterable trans_funcs: functions to apply, specified the function or a (function, args, kwargs) tuple
:param dict func_args: function positional arguments, specified as (function, arguments tuple) pairs
:param dict func_kwargs: function keyword arguments, specified as (function, kwargs dict) pairs
:returns: transformation functions for apply_transform_funcs function
:rtype: list
"""
# copy function arguments and make sure they are in dictionaries
func_args = dict(func_args) if func_args else {}
func_kwargs = dict(func_kwargs) if func_kwargs else {}
# loop over specified functions
proc_funcs = []
for func_spec in trans_funcs:
# expand (func, args, kwargs) combinations
try:
func, args, kwargs = (func_spec, None, None) if isinstance(func_spec, str) else func_spec
except TypeError:
func, args, kwargs = func_spec, None, None
except ValueError:
raise ValueError('expected (func, args, kwargs) combination (got {!s})'.format(func_spec))
# get positional arguments
args_ = func_args.pop(func, None)
if args is not None and args_ is not None:
raise RuntimeError('arguments for "{!s}" in both "trans_funcs" and "func_args"'.format(func))
args = tuple(args) if args is not None else tuple(args_) if args_ is not None else ()
# get keyword arguments
kwargs_ = func_kwargs.pop(func, None)
if kwargs is not None and kwargs_ is not None:
raise RuntimeError('keyword arguments for "{!s}" in both "trans_funcs" and "func_kwargs"'.format(func))
kwargs = dict(kwargs) if kwargs is not None else dict(kwargs_) if kwargs_ is not None else {}
# check if function is callable
if not (isinstance(func, str) or callable(func)):
raise TypeError('function "{!s}" is not callable'.format(func))
# append function specification
proc_funcs.append((func, args, kwargs))
# check if all specified arguments were used
if func_args:
raise ValueError('unused function arguments specified: {!s}'.format(func_args))
if func_kwargs:
raise ValueError('unused function keyword arguments specified: {!s}'.format(func_kwargs))
return proc_funcs
|
2e9fe4bec55f0a13a0644cfe27ba887f00e85c55
| 13,942
|
import math
def _get_fov(pillow_image) -> float:
"""Get the horizontal FOV of an image in radians."""
exif_data = pillow_image.getexif()
# 41989 is for 'FocalLengthIn35mmFilm'.
focal_length = exif_data[41989]
# FOV calculation, note 36 is the horizontal frame size for 35mm
# film.
return 2 * math.atan2(36, 2 * focal_length)
|
9b8101010a980079d950b6151a3d5280d5eedb73
| 13,944
|
import re
def validate_str(input,validate):
"""
This function returns true or false if the strings pass regexp
validation.
Validate format:
substrname: "^\w{5,10}$",
Validates that the string matches the regexp.
"""
if not re.match(validate,input) == None:
# If the validation returned something, return true.
return True
return False
|
400fd1cd7b4fa297f5916e80a39d7ef668deab1c
| 13,945
|
def pieChartInfoPlus(trips):
"""
Calculates the total distance per activity mode
Parameters
----------
trips : dict - Semantic information (nested)
Returns
-------
list(data): list - labels of the activity modes
list(data.values()): list - distance per activity mode
"""
labels = ["IN_PASSENGER_VEHICLE","STILL","WALKING","IN_BUS","CYCLING","FLYING","RUNNING","IN_FERRY","IN_TRAIN","SKIING","SAILING","IN_SUBWAY","IN_TRAM","IN_VEHICLE"]
data = {}
for year in trips:
for month in trips[year]:
for event in trips[year][month]:
if list(event)[0] == 'activitySegment':
try:
dist = event['activitySegment']['distance']
for label in labels:
if label == event['activitySegment']['activityType']:
data[label] = data.get(label,0) + dist
except:
print('There is no distance!')
return list(data), list(data.values())
|
ad0306b2561b01a33c509b2c454f464e9c6ff8a3
| 13,946
|
import os
import subprocess
def build_flags(library, type_, path):
"""Return separated build flags from pkg-config output"""
pkg_config_path = [path]
if "PKG_CONFIG_PATH" in os.environ:
pkg_config_path.append(os.environ['PKG_CONFIG_PATH'])
if "LIB_DIR" in os.environ:
pkg_config_path.append(os.environ['LIB_DIR'])
pkg_config_path.append(os.path.join(os.environ['LIB_DIR'], "pkgconfig"))
options = ["--static", {'I': "--cflags-only-I", 'L': "--libs-only-L", 'l': "--libs-only-l"}[type_]]
return [
flag.strip("-{}".format(type_))
for flag in subprocess.check_output(
["pkg-config"] + options + [library], env=dict(os.environ, PKG_CONFIG_PATH=":".join(pkg_config_path))
)
.decode("UTF-8")
.split()
]
|
2fc526d4cb2fe6ad12e47bc7abebbcd9dc32780b
| 13,947
|
import os
import json
def getG2Inputs(conf, mnet):
"""Get dictionaries C, F, and flow info (RTT and path) for each traffic flow.
Write these dictionaries to JSON files.
Args:
conf (ConfigHandler): ConfigHandler object containing user-specified configurations.
mnet (NetworkSimulator): The NetworkSimulator object.
Returns:
C (dict) [linkID] -> linkCapacity
F (dict) [flowID] -> [linkIDs]
flowInfo (dict) [flowID] -> {linkStr, links, rtt}
Note:
-- All IDs are integers starting at 1, prefixed with 'l' or 'f'' (for link or flow respectively).
-- C, F, and L are written to c_dict.json, f_dict.json, and l_dict.json files in the output directory (as specified in config).
"""
flows = conf.trace.jobs
topoConf = conf.topoData
paths = mnet.paths
topo = mnet.net.topo
# The capacity dictionary.
C = {}
# The flow dictionary.
# A flow is the list of links traversed by it.
F = {}
# linkID -> [linkStr]. Example, {'l1': 's1-s2'}.
L = conf.topoData['L']
# [linkStr] -> linkID. Example, {'s1-s2': 'l1'}.
reverseL = dict((v,k) for k,v in L.items())
# Dictionary to hold flow information and RTT.
flowInfo = {}
# Link specifications are provided by the user under either 'link_info' or 'default_link_info' parameters.
linkInfos = topoConf['linkInfos']
defaultBW = float(topoConf['defaultLinkInfo']['bw'].strip())
links = topo.iterLinks()
for n1, n2 in links:
# For C, consider only switch-switch links.
if topo.isSwitch(n1) and topo.isSwitch(n2):
linkStr = n1 + '-' + n2
linkBW = defaultBW
for lif in linkInfos:
if (n1 == lif['src'] and n2 == lif['dst']) or (n2 == lif['src'] and n1 == lif['dst']):
linkBW = float(lif['bw'].strip())
break
C[reverseL[linkStr]] = linkBW
# At this point, C and L dictionaries will have data for all the relevant links.
# Now prepare F and flowInfo
defaultDelay = float(topoConf['defaultLinkInfo']['delay'].strip('ms')) # remove 'ms' from end
for flow in flows:
flowID = 'f' + str(flow['id'])
flowInfo[flowID] = {}
src = flow['src']
dst = flow['dst']
flowStr = src + '-' + dst
flowInfo[flowID]['flowStr'] = flowStr
# Generate pairs of consecutive nodes on the path - we consider complete end-to-end path.
pathList = paths[src][dst]
flowLinks = [(x,y) for x,y in zip(pathList, pathList[1:])]
flowInfo[flowID]['links'] = flowLinks # all links, both host-switch and switch-switch.
# Add to F, the details for this flow, consider only switch-switch links for that.
F[flowID] = []
for (x,y) in flowLinks:
if topo.isSwitch(x) and topo.isSwitch(y):
if x+'-'+y in reverseL:
F[flowID].append(reverseL[x+'-'+y])
elif y+'-'+x in reverseL:
F[flowID].append(reverseL[y+'-'+x])
else:
# This would happen only when 'paths' do not correspond to the same network, e.g., due to (un)intentional data corruption.
F[flowID] = []
break
if not F[flowID]:
# Again, the extreme case of data corruption, as above.
F = {}
break
# RTT computation.
rtt = 0.0
for n1, n2 in flowLinks:
linkDelay = defaultDelay
for lif in linkInfos:
if (n1 == lif['src'] and n2 == lif['dst']) or (n2 == lif['src'] and n1 == lif['dst']):
linkDelay = float(lif['delay'].strip('ms'))
break
rtt += linkDelay
rtt *= 2.0
flowInfo[flowID]['rtt'] = rtt
# Write C, F, and L to files in output directory.
cfile = os.path.join(conf.outPath, "c_dict.json")
ffile = os.path.join(conf.outPath, "f_dict.json")
lfile = os.path.join(conf.outPath, "l_dict.json")
with open(cfile, "w") as write_file:
json.dump(C, write_file, indent=1)
with open(ffile, "w") as write_file:
json.dump(F, write_file, indent=1)
with open(lfile, "w") as write_file:
json.dump(L, write_file, indent=1)
return (C, F, flowInfo)
|
ca356119f3e797085529717c4ecfd46be3bd670a
| 13,948
|
def findCharacter(stringList, patternCharacter):
"""
Find the specific character from the list and return their indices
"""
return([ind for ind, x in enumerate(list(stringList)) if x == patternCharacter])
|
32cc8fb5970c6cd3cefd161b9e13e340f1645d13
| 13,949
|
def cell_trap_getter_generator(priv_attr):
""" Generates a getter function for the cell_trap property.
"""
def getter(self):
if getattr(self, priv_attr) is None:
data =\
(
self.gfpffc_bulb_1 - self.gfpffc_bulb_bg
)/self.gfpffc_bulb_bg
setattr(self, priv_attr, data)
return getattr(self, priv_attr)
return getter
|
15217adbd96ce44b361444867e5d9c6d202440f4
| 13,951
|
import os
def docker_compose_files(in_docker_compose, pytestconfig):
"""
This fixture provides support for `cloudbuild`.
By passing the command line argument `--in-docker-compose=cloudbuild`,
uses `docker-compose.cloudbuild.yml`.
"""
dc_type = f".{in_docker_compose}" if in_docker_compose else ""
dc_file = f"docker-compose{dc_type}.yml"
return [os.path.join(os.path.dirname(__file__), dc_file)]
|
22aec9c7953955febc6ea52c5fec450b36a1d144
| 13,953
|
def bg_trim(im):
"""
Function to programmatically crop card to edge.
`im` is a PIL Image Object.
"""
# This initial crop is hacky and stupid (should just be able to set device
# options) but scanner isn't 'hearing' those settings.
# w,h = im.size
im = im.crop((443, 0, 1242, 1200))
# bg = Image.new(im.mode, im.size, im.getpixel((2, 2)))
# diff = ImageChops.difference(im, bg)
# diff = ImageChops.add(diff, diff, 2.0, -100)
# bbox = diff.getbbox()
return im
# if bbox:
# return im
# else:
# print("There's been a problem.")
|
b5b59059aa9823cd2be385ead5cc21b135a4e24b
| 13,954
|
def get_filtered_query(must_list=None, must_not_list=None):
"""Get the correct query string for a boolean filter. Accept must and
must_not lists. Use MatchList for generating the appropriate lists.
"""
bool_filter = {}
if must_list:
bool_filter['must'] = must_list
if must_not_list:
bool_filter['must_not'] = must_not_list
result = {
'query': {
'filtered': {
'filter': {
'bool': bool_filter
}
}
}
}
return result
|
2190456ad7e91239bb623f7ec3d2c460e521e36f
| 13,955
|
def convert_to_letter(grade):
"""Convert a decimal number to letter grade"""
grade = round(grade, 1)
if grade >= 82.5:
return 'A'
elif grade >= 65:
return 'B'
elif grade >= 55:
return 'C'
elif grade >= 50:
return 'D'
else:
return 'F'
|
13ce25275750e7a27e0699078a15ba551674a941
| 13,956
|
def time_coord(cube):
"""
Return the variable attached to time axis.
Examples
--------
>>> import iris
>>> url = ('http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/'
... 'fmrc/us_east/US_East_Forecast_Model_Run_Collection_best.ncd')
>>> cube = iris.load_cube(url, 'sea_water_potential_temperature')
>>> timevar = time_coord(cube)
>>> timevar.name() # What is the time coordinate named?
'time'
>>> cube.coord_dims(timevar) # Is it the zeroth coordinate?
(0,)
"""
timevars = cube.coords(axis="T", dim_coords=True)
if not timevars:
timevars = [
coord for coord in cube.dim_coords if "time" in coord.name()
] # noqa
if not timevars:
ValueError(f'Could not find "time" in {repr(cube.dim_coords)}')
if len(timevars) != 1:
raise ValueError("Found more than one time coordinates!")
timevar = timevars[0]
return timevar
|
12a706f956846e8471d0d7f044367c77210c4486
| 13,957
|
import re
def oneliner(s) -> str:
"""Collapse any whitespace in stringified `s` into a single space. """
return re.sub(r"[\n ]+", " ", str(s).strip())
|
ed1d419b4fab8cb2deccdbc2944996ef7be28cc5
| 13,958
|
from typing import Counter
def diff_counts(values : list[int]) -> dict[int, int]:
"""Count the gaps between ordered elements in a list, by size."""
ordered = [0] + sorted(values) + [max(values) + 3]
return Counter(j - i for i, j in zip(ordered, ordered[1:]))
|
897cb7fdfed85b37bd8bd7031290e34199c57574
| 13,960
|
def get_one_prior_probability(state, segment_df, total_positions):
"""
Returns the number of genomics positions that are of the state
"""
state_df = segment_df[segment_df[3] == state]
num_pos_this_state = float(sum(state_df['num_occ']))
return num_pos_this_state / float(total_positions)
|
157666f84d9ba6b7d236bb0f10e331e9315d9efa
| 13,962
|
def convert_netdict_to_pydict(dict_in):
"""Convert a net dictionary to a Python dictionary.
Parameters
----------
dict_in : dict
Net dictionary to convert.
Returns
-------
dict
Dictionary converted to Python.
"""
pydict = {}
for key in dict_in.Keys:
pydict[key] = dict_in[key]
return pydict
|
6aa9bb5ac00ff92d23ff5a449d096caad0d01c9c
| 13,963
|
from typing import List
def binary_search(input_array: List[int], target: int):
"""
Given a sorted input array of integers, the function looks for a target or returns None
Returns:
Target index or None
"""
if not input_array or not target:
return None
left_pointer = 0
right_pointer = len(input_array) - 1
while left_pointer <= right_pointer:
mid_pointer = (left_pointer + right_pointer) // 2
mid_value = input_array[mid_pointer]
if target > mid_value:
left_pointer = mid_pointer + 1
elif target < mid_value:
right_pointer = mid_pointer - 1
else:
return mid_pointer
return None
|
b9f389d1b31e5b95bb885bd638549f7775db207e
| 13,964
|
from datetime import datetime
def _sort_by_date(time_coord):
"""
Private sorting function used by _file
_sort_by_earliest_date() and sort_by_earl
iest_date().
Args:
time_coord: Cube time coordinate for each cube
to be sorted by.
Returns:
time_origin: The time origin to sort cubes
by, as a specific start date e.g 1850.
"""
time_origin = time_coord.units.num2date(0)
if not isinstance(time_origin, datetime):
if time_origin.datetime_compatible:
time_origin = time_origin._to_real_datetime()
else:
time_origin = datetime(time_origin.year,
time_origin.month,
time_origin.day)
return time_origin
|
89c52c40dd8dfa03cbe431a4db6f107b42b22e81
| 13,965
|
def is_superuser(view):
"""Allow access to the view if the requesting user is a superuser."""
return view.request.user.is_superuser
|
5a15433200634ca326c36bdc17acbc1ada4e6426
| 13,966
|
import os
def meta(file_dir, file_name=None, deep=1):
"""
返回文件的基本信息
:param file_dir: 路径
:param file_name: 文件名称
:param deep: 深度
:return:文件信息
"""
return {
'dir': file_dir,
'name': file_name,
'path': file_dir if file_name is None else os.path.join(file_dir, file_name),
'isdir': True if file_name is None else False,
'deep': deep
}
|
d68002a864e255151fe5176bfa20c01a2f554c78
| 13,967
|
def tag_row(tag):
"""Format a tag table row"""
return [tag.get('id'), tag.get('name'), tag.get('referenceCount', 0)]
|
4cd1d0991eae996a52288c6d1d9bb3bd1a368ea6
| 13,968
|
from typing import Any
import torch
import numbers
def python_scalar_to_tensor(data: Any, device: torch.device = torch.device("cpu")) -> Any:
""" Converts a Python scalar number to a torch tensor and places it on the given device. """
if isinstance(data, numbers.Number):
data = torch.tensor([data], device=device)
return data
|
83e4ac093a40225f9c5fe121d9b67424f258e039
| 13,969
|
def allpath(path):
"""
return paths
"""
paths = path.strip("/").split("/")
for i in range(1,len(paths)):
paths[i] = paths[i-1] + "/" + paths[i]
return paths
|
8b76f70dbae41a1ae46462d0755dbb5c5c0eb4c1
| 13,970
|
def LIGHTNING(conf):
"""Get Lightning Color code from config"""
return conf.get_color("colors", "color_lghtn")
|
08781e469c7262228904e883594e475be634816b
| 13,971
|
def chrxor(st1, st2, strf1, ar1, strf2, ar2, fun1, fun2):
"""Takes two strings and applies a function to each charater in the string.
Function is some function that takes characters and turns them into integers.
This could be something to turn them into ascii order, or simply map them to another range of integers.
"""
st1 = strf1(st1, ar1) ##take these things and process them into an iterable list of things.
st2 = strf2(st2, ar2) ##ditto
ret = None
l1 = len(st1)-1
l2 = len(st2)-1
if l1 == l2 or l1 < l2: # if our key is longer than the plain, we just encipher as much
ret = [None]* len(st1) # as the plain will allow.
#print ret
for i in range(0, l1+1):
#print i
ret[i] = fun1(st1[i] ) ^ fun2(st2[i])
else: #otherwise you need to repeat the shorter key.
j = 0
ret = [None]* len(st1)
for i in range(0,l1+1):
#print i , ": "+ st1[i] + " " + st2[j]
ret[i] = fun1(st1[i]) ^ fun2(st2[j])
j += 1
if j > l2:
j = 0
return ret
|
d463e21e55bf41ce9dee612d77810c2000210477
| 13,972
|
def is_exists(t):
"""Whether t is of the form ?x. P x."""
return t.is_comb() and t.fun.is_const() and \
t.fun.name == "exists" and t.arg.is_abs()
|
7f7c5f32685df636ec2181efd5a1b7dbc8313d4c
| 13,973
|
from typing import Tuple
def InterpolateValue(x: float, xy0: Tuple[float, float], xy1: Tuple[float, float]) -> float:
"""Get the position of x on the line between xy0 and xy1.
:type x: float
:type xy0: Tuple[float, float]
:type xy1: Tuple[float, float]
:return: y
:rtype: float
"""
if xy0[0] < xy1[0]:
min_xy = xy0
max_xy = xy1
else:
min_xy = xy1
max_xy = xy0
a = (max_xy[1] - max_xy[1]) / (max_xy[0] - min_xy[0])
b = min_xy[1] - a * min_xy[0]
return a * x + b
|
9de4372b593fae65f772c19a8ac369315a8489d0
| 13,975
|
def get_subset_with_sum(a, n):
"""Returns a subset of a with sum equal to n (if exists)."""
dp = [[]] * (n + 1)
for i in range(len(a)):
for j in range(n, a[i] - 1, -1):
if j == a[i]:
dp[j] = [a[i]]
elif dp[j]:
continue
elif dp[j - a[i]]:
dp[j] = dp[j - a[i]] + [a[i]]
return dp[-1]
|
c9e2ba7b6ea3b33d839cdad3586ccd8717089ebf
| 13,976
|
def boolean(prompt=None, yes='y', no='n', default=None, sensitive=False,
partial=True):
"""Prompt for a yes/no response.
Parameters
----------
prompt : str, optional
Use an alternative prompt.
yes : str, optional
Response corresponding to 'yes'.
no : str, optional
Response correspnding to 'no'.
default : bool, optional
The return value if user inputs empty response.
sensitive : bool, optional
If True, input is case sensitive.
partial : bool, optional
Can user type 'y' or 'ye' for 'yes' and 'n' for 'no'?
Returns
-------
bool
Either True (if user selects 'yes') or False (if user selects 'no')
"""
def norm(x):
return x if sensitive else str(x).lower()
def to_bool(c):
"""Business logic for converting input to boolean."""
if partial and len(c):
if norm(yes).startswith(norm(c)):
return True
elif norm(no).startswith(norm(c)):
return False
else:
if norm(yes) == norm(c):
return True
elif norm(no) == norm(c):
return False
raise ValueError
if prompt is None:
y = '[{}]'.format(yes) if default is True else yes
n = '[{}]'.format(no) if default is False else no
prompt = '{y}/{n}? '.format(y=y, n=n)
s = input(prompt)
if (default is not None) and not s:
return default
try:
return to_bool(s)
except ValueError:
return boolean(prompt=prompt, yes=yes, no=no, default=default,
sensitive=sensitive, partial=partial)
|
159c8be6004e4e9f2c5a271e36facd3610561b61
| 13,978
|
import torch
def compute_local_cost(pi, a, dx, b, dy, eps, rho, rho2, complete_cost=True):
"""Compute the local cost by averaging the distortion with the current
transport plan.
Parameters
----------
pi: torch.Tensor of size [Batch, size_X, size_Y]
transport plan used to compute local cost
a: torch.Tensor of size [Batch, size_X]
Input measure of the first mm-space.
dx: torch.Tensor of size [Batch, size_X, size_X]
Input metric of the first mm-space.
b: torch.Tensor of size [Batch, size_Y]
Input measure of the second mm-space.
dy: torch.Tensor of size [Batch, size_Y, size_Y]
Input metric of the second mm-space.
eps: float
Strength of entropic regularization.
rho: float
Strength of penalty on the first marginal of pi.
rho2: float
Strength of penalty on the first marginal of pi. If set to None it is
equal to rho.
complete_cost: bool
If set to True, computes the full local cost, otherwise it computes the
cross-part on (X,Y) to reduce computational complexity.
Returns
----------
lcost: torch.Tensor of size [Batch, size_X, size_Y]
local cost depending on the current transport plan.
"""
distxy = torch.einsum(
"ij,kj->ik", dx, torch.einsum("kl,jl->kj", dy, pi)
)
kl_pi = torch.sum(
pi * (pi / (a[:, None] * b[None, :]) + 1e-10).log()
)
if not complete_cost:
return - 2 * distxy + eps * kl_pi
mu, nu = torch.sum(pi, dim=1), torch.sum(pi, dim=0)
distxx = torch.einsum("ij,j->i", dx ** 2, mu)
distyy = torch.einsum("kl,l->k", dy ** 2, nu)
lcost = (distxx[:, None] + distyy[None, :] - 2 * distxy) + eps * kl_pi
if rho < float("Inf"):
lcost = (
lcost
+ rho
* torch.sum(mu * (mu / a + 1e-10).log())
)
if rho2 < float("Inf"):
lcost = (
lcost
+ rho2
* torch.sum(nu * (nu / b + 1e-10).log())
)
return lcost
|
3d29e8ae5ef14ab30cd676eebeb6507e9cbfafca
| 13,981
|
def electron_binding_energy(charge_number):
"""Return the electron binding energy for a given number of protons (unit
is MeV). Expression is taken from [Lunney D., Pearson J. M., Thibault C.,
2003, Rev. Mod. Phys.,75, 1021]."""
return 1.44381e-5 * charge_number ** 2.39\
+ 1.55468e-12 * charge_number ** 5.35
|
6d5a845b1b11720b44b62500f979f0a621faca0a
| 13,985
|
def parse_generic(data, key):
"""
Returns a list of (potentially disabled) choices from a dictionary.
"""
choices = []
for k, v in sorted(data[key].iteritems(), key=lambda item: item[1]):
choices.append([v, k])
return choices
|
90d2f2188d5cca7adb53eebca80a80f2c46b04a7
| 13,986
|
def attrs_to_dict(attrs):
"""
Convert a list of tuples of (name, value) attributes to a single dict of attributes [name] -> value
:param attrs: List of attribute tuples
:return: Dict of attributes
"""
out = {}
for attr in attrs:
if out.get(attr[0]) is None:
out[attr[0]] = attr[1]
else:
if not isinstance(out[attr[0]], list):
out[attr[0]] = [out[attr[0]]]
out[attr[0]].append(attr[1])
return out
|
ccf9440d29de2f8556e694f4ab87e95f0bfd9e8a
| 13,987
|
def count(i):
"""List or text. Returns the length as an integer."""
return len(i)
|
d8daf7cd325ce1acfb382723759ff190becd785f
| 13,989
|
def score(hand):
"""
Compute the maximal score for a Yahtzee hand according to the
upper section of the Yahtzee score card.
hand: full yahtzee hand
Returns an integer score
"""
possible_scores = []
#Add each possible score to the list of possible scores
for num in list(hand):
poss_score = hand.count(num)*num
possible_scores.append(poss_score)
#Sort possible scores in ascending order
possible_scores.sort()
#Return the highest score
return possible_scores[-1]
|
da6ff8c5a0c640034ae063bc08a54351f4dd259e
| 13,990
|
import math
def PlateStiffener (b, t, hw, tw, bf, tf, L, fyp, fyw = 0, fyf = 0, E = 210000):
""" """
# plate (flange) section
_b = b
_t = t
# web section
_hw = hw
_tw = tw
# flange section
_bf = bf
_tf = tf
# Fy plate
_Fyp = fyp
# Fy web
if fyw == 0: _Fyw = _Fyp
else: _Fyw = fyw
# fy flange
if fyf == 0 :
_Fyf = _Fyp
else:
_Fyf = fyf
# section length
_L = L
# Elatic modulus
_E = E
#
# Cross Sectional Area
# area plate (flange)
_Ap = _b * _t
# area web
_Aw = _hw * _tw
# area flange
_Af = _bf * _tf
# cross sectional area of stiffener
_As = _Aw + _Af
# total area
_A = _Ap + _Aw + _Af
#
#
_ef = _tw / 2.0
#
#
# Equivalent Yield Strength over
# the cross-section
_Fyeq = (_Ap * _Fyp + _Aw * _Fyw + _Af * _Fyf) / _A
#
# Distance from outer surface of attached
# plating to elastic horizontal neutral axis
#
Zp = (((0.50 * _b * _t**2) + _Aw * (_t + 0.50 * _hw)
+ _Af * (_t + _hw + 0.50 * _tf)) / _A)
#
Ztf = (t + _hw + _tf - Zp)
#
#print('Z0 ==> ',Zp, Ztf )
#
# Moment of Inertia
#
_I = ((_b * _t**3 / 12.0) + (_Ap * (Zp - _t / 2.0)**2)
+ (_hw**3 * _tw / 12.0) + _Aw * (Zp - _t - _hw / 2.0)**2
+ (_bf * _tf**3 / 12.0) + _Af * (_t + _hw + _tf / 2.0 - Zp)**2)
#
#print('Ixx =',_I)
#
# Section Modulus
#
_Wep = _I / Zp
#
_Wes = _I / Ztf
#
_W = min(_Wep, _Wes)
#
#print ('--->', _Wep, _Wes, _W)
#
# Radius of gyration
#
_r = math.sqrt(_I / _A)
#
#
# Column Slenderness Ratio
#
Lambda = (_L / (math.pi * _r) * math.sqrt(_Fyeq / _E))
#
#
# Torsional Moment of Inertia
Iz = (_Af * _bf**2 / 12.0) + (_ef**2 * _Af / float(1 + (_Af / _Aw)))
#
#print('Iz ===>',Iz)
#
print ('Af = ',_Af)
print ('Aw = ',_Aw)
print ('Iz = ',Iz)
print ('ef = ',_ef)
print ('bf = ',_bf)
# Plate Slenderness Ratio
try:
Beta = _b / _t * math.sqrt(_Fyp / _E)
except :
Beta = 0
#
return _A, _As, Zp, _I, Iz, _Wes, _Wep, _Fyeq
|
13321a27dd3580f99b0128daa859acfeb5dad5d7
| 13,993
|
def find_body(view):
"""
Find first package body declaration.
"""
return view.find(r'(?im)create\s+(or\s+replace\s+)?package\s+body\s+', 0)
|
b592199e4ef09d079645fc82ade8efbe3c92a895
| 13,994
|
import math
def roll(entropy, n):
"""From 0 to n - 1."""
# Minimum bit depth to cover the full range.
# Note that more bits would be more fair.
bit_depth = math.ceil(math.log2(n))
x = entropy(bit_depth)
# Scale from total range to desired range.
# Numbers with higher odds will be evenly distributed.
return math.floor(x * n / 2 ** bit_depth)
|
a714b1636cd6a97fb4d4ccc19e966795cadec76c
| 13,996
|
import subprocess
import os
def get_peatclsm_data_table(variable, expected_header):
"""Return a tabulated variable as parameterized in PEATCLSM
"""
output = subprocess.check_output(
['Rscript', '--vanilla',
os.path.join(
os.path.dirname(__file__),
'peatclsm_hydraulic_functions.R'),
variable
],
encoding='utf-8'
)
rows = [line.strip().split(',') for line in output.splitlines()]
assert rows.pop(0) == expected_header
return [tuple(float(value) for value in row) for row in rows]
|
3d21f05004c5deb77514c7021cd79abf20c325af
| 13,998
|
def get_num_objects(tree):
""" Get number of objects in an image.
Args:
tree: Tree element of xml file.
Returns: Number of objects.
"""
num_obj = 0
for e in tree.iter():
if e.tag == 'object':
num_obj = num_obj + 1
return num_obj
|
92f937cbbf2eabdc909ef6bc1f06ccb87e0148b7
| 13,999
|
def partition(n, g):
"""
Returns the number of ways to partitions set of n
elements with a partition of size g
"""
if n == 1:
return 1
elif n == g:
return 1
elif g>n or g<=0:
return 0
else:
sum = 0
for i in range(g, n-g+1, 1):
sum += partition(n-g, i)
return sum
|
e0af35b413963c6ff600e87e5a55e8f4dbcc4b84
| 14,000
|
from typing import Union
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
|
375f7918a04fafb4a79f77abd3f0282cdc74e992
| 14,001
|
def get_arg(cmds):
"""Accepts a split string command and validates its size.
Args:
cmds: A split command line as a list of strings.
Returns:
The string argument to the command.
Raises:
ValueError: If there is no argument.
"""
if len(cmds) != 2:
raise ValueError('%s needs an argument.' % cmds[0])
return cmds[1]
|
a12b9402cb824748127c9a925850a10f6a9fe022
| 14,003
|
from pathlib import Path
def _resolve_ros_workspace(ros_workspace_input: str) -> Path:
"""Allow for relative paths to be passed in as a ros workspace dir."""
ros_workspace_dir = Path(ros_workspace_input).resolve()
if not (ros_workspace_dir / 'src').is_dir():
raise ValueError(
'specified workspace "{}" does not look like a colcon workspace '
'(there is no "src/" directory). cannot continue'.format(ros_workspace_dir))
return ros_workspace_dir
|
45c5843a2e34a79f077ae595b100f2d58f903bae
| 14,004
|
import os
import pickle
def _load_scaler_to_memory(path):
""" Load a pickle from disk to memory """
if not os.path.exists(path):
raise ValueError("File " + path + " does not exist")
return pickle.load(open(path, 'rb'))
|
cbc87a09ce22836ec20f971432d2aa91b584ce6b
| 14,005
|
def unscale_daily_temp(mean_temp, scaled_daily_temp, scale_params):
"""scale back the daily temp"""
daily_temp = scaled_daily_temp*\
(scale_params[0] - scale_params[1]*mean_temp) + mean_temp
return daily_temp
|
bbb66eabbfc929d1a9099a07e31ad1bd606d20e7
| 14,006
|
import os
def find_file(name, path):
"""File the first file within a given path.
Args:
name: Name of the file to search for.
path: Root of the path to search in.
Returns:
Full path of the given filename or None if not found.
"""
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
return None
|
608727432f16725f4e334170e1412804175dacf3
| 14,007
|
def _magnitude_to_marker_size(v_mag):
"""Calculate the size of a matplotlib plot marker representing an object with
a given visual megnitude.
A third-degree polynomial was fit to a few hand-curated examples of
marker sizes for magnitudes, as follows:
>>> x = np.array([-1.44, -0.5, 0., 1., 2., 3., 4., 5.])
>>> y = np.array([120., 90., 60., 30., 15., 11., 6., 1.])
>>> coeffs = np.polyfit(x, y, 3)
This function is valid over the range -2.0 < v <= 6.0; v < -2.0 returns
size = 160. and v > 6.0 returns size = 1.
Args:
v_mag: A float representing the visual magnitude of a sky object.
Returns:
A float to be used as the size of a marker depicting the sky object in a
matplotlib.plt scatterplot.
"""
if v_mag < -2.0:
size = 160.0
elif v_mag > 6.0:
size = 1.0
else:
coeffs = [-0.39439046, 6.21313285, -33.09853387, 62.07732768]
size = coeffs[0] * v_mag**3. + coeffs[1] * v_mag**2. + coeffs[2] * v_mag + coeffs[3]
return size
|
cb030993c5799da9f84e9bbadb9fe30680d74944
| 14,009
|
def upto(limit: str, text: str) -> str:
""" return all the text up to the limit string """
return text[0 : text.find(limit)]
|
0fbe1732954c225fe8d8714badb9126c8ab72a4d
| 14,010
|
import torch
def generate_part_labels(
vertices, faces, cam_t, neural_renderer, part_texture, K, R, part_bins
):
"""
:param vertices: (torch tensor NVx3) mesh vertices
:param faces: (torch tensor NFx3) mesh faces
:param cam_t: (Nx3) camera translation
:param neural_renderer: renderer
:param part_texture: (torch tensor 1xNx1x1x1x3)
:param K: (torch tensor 3x3) cam intrinsics
:param R: (torch tensor 3x3) cam rotation
:param part_bins: bins to discretize rendering into part labels
:return: parts (torch tensor Bx3xWxH) part segmentation labels,
:return: render_rgb (torch tensor Bx3xWxH) rendered RGB image
"""
batch_size = vertices.shape[0]
parts, depth, mask = neural_renderer(
vertices,
faces.expand(batch_size, -1, -1),
textures=part_texture.expand(batch_size, -1, -1, -1, -1, -1),
K=K.expand(batch_size, -1, -1),
R=R.expand(batch_size, -1, -1),
t=cam_t.unsqueeze(1),
)
render_rgb = parts.clone()
parts = parts.permute(0, 2, 3, 1)
parts *= 255.0 # multiply it with 255 to make labels distant
parts, _ = parts.max(-1) # reduce to single channel
parts = torch.bucketize(parts.detach(), part_bins, right=True)
parts = parts.long() + 1
parts = parts * mask.detach()
return parts.long(), render_rgb, depth.detach()
|
189adf5939bab026c197c84ac07bf8272abc5ca9
| 14,011
|
def check_occuring_variables(formula,variables_to_consider,allowed_variables) :
"""
Checks if the intersection of the variables in <formula> with the variables
in <variables_to_consider> is contained in <allowed_variables>
Parameters
----------
formula : list of list of integers
The formula to consider.
variables_to_consider : list of integers
Those variables in <formula> that shall be considered.
allowed_variables : list of integers
Must be contained in <variables_to_consider>.
Gives the subset of <variables_to_consider> that may occur in <formula>
Returns
-------
True if the intersection of the variables in <formula> with <variables_to_consider>
is contained in <allowed_variables>
"""
variable_set=set(allowed_variables)
for clause in formula :
variables_in_clause = {abs(l) for l in clause if abs(l) in variables_to_consider}
if not variables_in_clause <= variable_set:
return False, [v for v in variables_in_clause if not v in variable_set]
return True, []
|
16faf544cc6f4993afb1cad356037820d54225ba
| 14,012
|
def classproperty(f):
"""decorator that registers a function as a read-only property of the class."""
class _ClassProperty:
def __get__(self, _, cls):
# python will call the __get__ magic function whenever the property is
# read from the class.
return f(cls)
return _ClassProperty()
|
de5586548b77e8c1c72176690b64df603edacf78
| 14,014
|
def _merge_config_dicts(dct1, dct2):
"""
Return new dict created by merging two dicts, giving dct1 priority over
dct2, but giving truthy values in dct2 priority over falsey values in dct1.
"""
return {str(key): dct1.get(key) or dct2.get(key)
for key in set(dct1) | set(dct2)}
|
9d66c10438027254fbac7d8cc91185a07dd9da65
| 14,015
|
def from_list(*args):
"""
Input:
args - variable number of integers represented as lists, e.g. from_list([1,0,2], [5])
Output:
new_lst - a Python array of integers represented as strings, e.g. ['102','5']
"""
new_lst = []
for lst in args:
new_string = ''
for digit in lst:
new_string += str(digit)
new_lst.append(new_string)
return new_lst
|
c3c0a2224433104a00ffabdadf612127d1b0ed3c
| 14,017
|
def perpetuity_present_value(
continuous_cash_payment: float,
interest_rate: float,
):
"""Returns the Present Value of Perpetuity Formula.
Parameters
----------
continuous_cash_payment : float
Amount of continuous cash payment.
interest_rate : float
Interest rate, yield or discount rate.
Returns
-------
float:
Present value of perpetuity
Example
-------
>>> pv = perpetuity_present_value(5, 0.15)
>>> round(pv, 2)
33.33
"""
return continuous_cash_payment / interest_rate
|
26eb398776ce4f74348b23920b99b0390c462ff9
| 14,018
|
def fatorial(valor,show=False):
""" -> Calcula valor Fatorial de n
Param valor: recebe valor para calculo de fatorial
Param show: (OPCIONAL) mostra como o calculo foi realizado
return: retorna o resultado do fatorial de n
"""
fat = 1
if show == True:
#print(fat,end=' x ')
for x in range(valor,0,-1):
if x == 1:
print(x,end=' = ')
break
print(x,end=' x ')
fat *= x
else:
for x in range(valor,0,-1):
fat *= x
return fat
|
e0dacf6102cc38c019f122dd51d25f591c1b047d
| 14,021
|
def __avg__(list_):
"""Return average of all elements in the list."""
return sum(list_) / len(list_)
|
3204d823e83bd43efccf9886acd3ae8b01e1d7a0
| 14,022
|
import os
import pickle
def load_file(path):
"""
Load a file
:param path: File path
:return: file
"""
fn = os.path.join(os.path.dirname(__file__), path)
return pickle.load(open(fn, 'rb'))
|
aa0b6e44466faf37eca38c8f55a81ac35e48c623
| 14,024
|
def combine(main_busytime_list, merging_list):
"""
given two lists of busytimes, returns list of busytimes that have events of the same day together
"""
merged = [ ]
days = { }
current_date = ''
#this creates the dates dict
for busytime in main_busytime_list:
if busytime['date'] == '':
days[busytime['date']] = [ ]
days[busytime['date']].append(busytime)
else:
if busytime['date'] != current_date:
days[busytime['date']] = [ ]
days[busytime['date']].append(busytime)
current_date = busytime['date']
else:
days[busytime['date']].append(busytime)
for busytime in merging_list:
if busytime['date'] in days:
days[busytime['date']].append(busytime)
else:
days[busytime['date']] = [ ]
days[busytime['date']].append(busytime)
for day in days:
merged += days[day]
return merged
|
978946353dd4d7a57b57e66693a5d396f6e8d7f6
| 14,025
|
def _make_final_gr(x):
"""Apply function to do graduation rates"""
race, sixyrgr, sixyrgraah, comments = x
first_gr = sixyrgraah if race in ["B", "H", "M", "I"] else sixyrgr
if comments == "Posse":
return (first_gr + 0.15) if first_gr < 0.7 else (1.0 - (1.0 - first_gr) / 2)
else:
return first_gr
|
600157433efe8a1689d81c2e17114c1578c43c21
| 14,026
|
import requests
def check_hash(h):
"""
Do the heavy lifting. Take the hash, poll the haveibeenpwned API, and check results.
:param h: The sha1 hash to check
:return: The number of times the password has been found (0 is good!)
"""
if len(h) != 40:
raise ValueError("A sha1 hash should be 30 characters.")
h = h.upper()
chk = h[:5]
r = requests.get("https://api.pwnedpasswords.com/range/%s" % chk)
if r.status_code != 200:
raise EnvironmentError("Unable to retrieve password hashes from server.")
matches = {m: int(v) for (m, v) in [ln.split(':') for ln in r.content.decode('utf-8').split("\r\n")]}
#print("Prefix search returned %d potential matches." % len(matches))
for m in matches.keys():
if m == h[5:]:
return matches[m]
return 0
|
965dd75b5da095bc24ce6a6d733b271d9ec7aa80
| 14,028
|
def get_field_hint(config, field):
"""Get the hint given by __field_hint__ or the field name if not defined."""
return getattr(config, '__{field}_hint__'.format(field=field), field)
|
0d374daf93646caf55fe436c8eb2913d22151bbc
| 14,030
|
def btc(value):
"""Format value as BTC."""
return f"{value:,.8f}"
|
1d883384a6052788e8fa2bedcddd723b8765f44f
| 14,032
|
def parse_sbd_devices(options):
"""Returns an array of all sbd devices.
Key arguments:
options -- options dictionary
Return Value:
devices -- array of device paths
"""
devices = [str.strip(dev) \
for dev in str.split(options["--devices"], ",")]
return devices
|
d706efd6a456e2279b30492b634e018ebd8e873f
| 14,033
|
def pull_from_js(content, key):
"""Pull a value from some ES6."""
# we have to parse fucking ES6 because ES6 cannot natively import fucking JSON
return list(filter(lambda x: f"{key}:" in x, content.split("\n")))[0].split('"')[1]
|
0b5f7fae68e1f5d376c22cd18a6799c4c76a71f7
| 14,034
|
import time
def nagios_from_file(results_file):
"""Returns a nagios-appropriate string and return code obtained by
parsing the desired file on disk. The file on disk should be of format
%s|%s % (timestamp, nagios_string)
This file is created by various nagios checking cron jobs such as
check-rabbitmq-queues and check-rabbitmq-consumers"""
data = open(results_file).read().strip()
pieces = data.split('|')
if not len(pieces) == 4:
state = 'UNKNOWN'
ret = 3
data = "Results file malformed"
else:
timestamp = int(pieces[0])
time_diff = time.time() - timestamp
if time_diff > 60 * 2:
ret = 3
state = 'UNKNOWN'
data = "Results file is stale"
else:
ret = int(pieces[1])
state = pieces[2]
data = pieces[3]
return (ret, "%s: %s" % (state, data))
|
02697105ad5e9d01dd0eb504e232314f4d15a6a9
| 14,035
|
import math
def _width2wing(width, x, min_wing=3):
"""Convert a fractional or absolute width to integer half-width ("wing").
"""
if 0 < width < 1:
wing = int(math.ceil(len(x) * width * 0.5))
elif width >= 2 and int(width) == width:
# Ensure window width <= len(x) to avoid TypeError
width = min(width, len(x) - 1)
wing = int(width // 2)
else:
raise ValueError("width must be either a fraction between 0 and 1 "
"or an integer greater than 1 (got %s)" % width)
wing = max(wing, min_wing)
wing = min(wing, len(x) - 1)
assert wing >= 1, "Wing must be at least 1 (got %s)" % wing
return wing
|
38bdb809167b19b0ef5c7fad6858d2f7016ec310
| 14,036
|
def mb_bl_ind(tr1, tr2):
"""Returns the baseline index for given track indices.
By convention, tr1 < tr2. Otherwise, a warning is printed,
and same baseline returned.
"""
if tr1 == tr2:
print("ERROR: no baseline between same tracks")
return None
if tr1 > tr2:
print("WARNING: tr1 exepcted < than tr2")
mx = max(tr1, tr2)
bl = mx*(mx-1)/2 + min(tr1, tr2)
return bl.astype(int)
|
7d1bc958ca9928f54e51935510d62c45f7fc927f
| 14,037
|
from pathlib import Path
def check_installed(installed="/media/mmcblk0p1/installed"):
"""Check if LNCM-Box is installed"""
if Path(installed).is_file():
with open(installed, "r") as file:
lines = file.readlines()
for line in lines:
print(line)
return True
return False
|
31f73b8927e0a23c6d70ab13791ead9cf5d55b8c
| 14,038
|
def sliding_window(image_width,image_height, patch_w,patch_h,adj_overlay_x=0,adj_overlay_y=0):
"""get the subset windows of each patch
Args:
image_width: width of input image
image_height: height of input image
patch_w: the width of the expected patch
patch_h: the height of the expected patch
adj_overlay_x: the extended distance (in pixel of x direction) to adjacent patch,
make each patch has overlay with adjacent patch
adj_overlay_y: the extended distance (in pixel of y direction) to adjacent patch,
make each patch has overlay with adjacent patch
Returns: The list of boundary of each patch
"""
# output split information
f_obj = open('split_image_info.txt','w')
f_obj.writelines('### This file is created by split_image.py. mosaic_patches.py need it. Do not edit it\n')
f_obj.writelines('image_width: %d\n' % image_width)
f_obj.writelines('image_height: %d\n' % image_height)
f_obj.writelines('expected patch_w: %d\n' % patch_w)
f_obj.writelines('expected patch_h: %d\n' % patch_h)
f_obj.writelines('adj_overlay_x: %d\n' % adj_overlay_x)
f_obj.writelines('adj_overlay_y: %d\n' % adj_overlay_y)
count_x = (image_width - 2*adj_overlay_x) // patch_w
count_y = (image_height - 2*adj_overlay_y) // patch_h
patch_boundary = []
for i in range(0, count_x):
f_obj.write('column %d:' % i)
for j in range(0, count_y):
f_obj.write(' %d' % (i*count_y + j))
# extend the patch
xoff = i * patch_w + (image_width - count_x*patch_w - 2*adj_overlay_x)//2
yoff = j * patch_h + (image_height - count_y*patch_h - 2*adj_overlay_y)//2
xsize = patch_w + 2*adj_overlay_x
ysize = patch_h + 2*adj_overlay_y
new_patch = (xoff, yoff, xsize, ysize)
patch_boundary.append(new_patch)
f_obj.write('\n')
f_obj.close()
return patch_boundary
|
42af883927a72f313eb7804e9d99bd6ec37fa7f3
| 14,039
|
def datetime_format_to_js_datetime_format(format):
"""
Convert a Python datetime format to a time format suitable for use with
the datetime picker we use, http://www.malot.fr/bootstrap-datetimepicker/.
"""
converted = format
replacements = {
'%Y': 'yyyy',
'%y': 'yy',
'%m': 'mm',
'%d': 'dd',
'%H': 'hh',
'%I': 'HH',
'%M': 'ii',
'%S': 'ss',
}
for search, replace in replacements.items():
converted = converted.replace(search, replace)
return converted.strip()
|
a037146b17aae21831bc4c76d4500b12bc34feba
| 14,040
|
def make_data(current_data):
""" Formats the given data into the required form """
x = []
n = len(current_data)
for i in range(n - 1):
x.append(current_data[i])
x.append(1)
return x, current_data[n - 1]
|
04ff4ba93445895451c4c710e7aae59b9787a07d
| 14,042
|
def average_over_dictionary(mydict):
"""
Average over dictionary values.
"""
ave = sum([x for x in mydict.values()])/len(mydict)
return ave
|
584e8cb073c298b3790a96e2649dbacfd5716987
| 14,043
|
def concat_complex(list_complex, width_in_bits, imreOrder=True):
"""
Concatenates the real and imaginary part into one integer.
The specifed width counts for both the real and imaginary part.
Real part is mapped on the LSB. Imaginary part is shifted to the MSB.
"""
# PD
if imreOrder:
return [((int(i.imag) & (2**width_in_bits-1)) << width_in_bits) + (int(i.real) & (2**width_in_bits-1)) for i in list_complex]
else:
return [((int(i.real) & (2**width_in_bits-1)) << width_in_bits) + (int(i.imag) & (2**width_in_bits-1)) for i in list_complex]
# EK
result = []
for i in range(len(list_complex)):
real = int(list_complex[i].real) & (2**width_in_bits-1)
imag = int(list_complex[i].imag) & (2**width_in_bits-1)
if imreOrder:
result.append((imag << width_in_bits) + real)
else:
result.append((real << width_in_bits) + imag)
return result
|
831275da0695a477f9f75da1e238f231ea8d39df
| 14,044
|
import os
def get_best_model(dir_name):
"""Get the best model from model checkpoint folder. Normally it's the last
saved model in folder.
Args:
dir (String, optional): Directory to get the best trained model. Defaults to Config.checkpoint_model_path.
Returns:
str: name of best model
"""
h5_files = []
for _, _, files in os.walk(dir_name):
for file_name in files:
if ".h5" in file_name:
h5_files.append(file_name)
h5_files = sorted(h5_files, key=lambda x: float(x.split(":")[-1][:-3]))
return h5_files[0]
|
29603d001b28d0081c5128c375ba97e716b38eac
| 14,046
|
def treat_category(category: str) -> str:
""" Treats a list of string
Args:
category (str): the category of an url
Returns:
str: the category treated
"""
return category.lower().strip()
|
a4c08383bdfb40e5e64bbf576df53a7f46fc07da
| 14,047
|
def _get_fully_qualified_class_name(obj):
"""
Obtains the fully qualified class name of the given object.
"""
return obj.__class__.__module__ + "." + obj.__class__.__name__
|
e427991067254c2ac1963c5ae59468ec2c0835e2
| 14,049
|
import subprocess
def ping(hostname, n=3):
""" standard Windows ping """
output = subprocess.run(["ping", hostname, "-n", str(n)], stdout=subprocess.PIPE)
result = output.stdout.decode()
#and then check the response...
if 'Reply from ' + hostname + ': bytes=' in result:
return True
elif hostname.count('.') != 3:
if ': bytes=' in result and 'Reply from ' in result:
return True
else:
return False
|
5ad250bff210dac9c8a3571e9214f527e93f1851
| 14,050
|
import decimal
def new_decimal(value):
"""Builds a Decimal using the cleaner float `repr`"""
if isinstance(value, float):
value = repr(value)
return decimal.Decimal(value)
|
023809d3db3e863f66559913f125e61236520a6a
| 14,051
|
def add_frac(zaeler1, nenner1, zaeler2, nenner2):
"""Dieses Programm addiert 2 Brüche miteinander und kürzt sie mit ihrem größten gemeinsamen Teiler (ggt)"""
#Variablen einführen
zaeler=0
nennerg=0
ggt = 1
#überprüfen ob die Eingabe eine Ganzzahl ist
if not(isinstance(zaeler1,int) and isinstance(nenner1, int) and isinstance(zaeler2, int) and isinstance(nenner2, int)):
print("Gib bitte nur ganze Zahlen ein")
return 0
#neuen Nenner und neuen Zähler ausrechnen
else:
nennerg=nenner1*nenner2
zaeler1neu=zaeler1*nenner2
zaeler2neu=zaeler2*nenner1
zaeler=zaeler1neu+zaeler2neu
if zaeler<nennerg:
maximum = zaeler
else:
maximum=nennerg
#ggt durch iterieren ermitteln
for i in range(1,maximum):
ggt = maximum-i
if (zaeler%ggt)==0 and (nennerg%ggt)==0:
break
return zaeler/ggt, nennerg/ggt
|
c77734b40f83121656e49a7029cfe73c073ee45d
| 14,053
|
import os
def make_docker_cmd(image, exe, flags='', **kwargs):
"""Create command list to pass to subprocess
Parameters
----------
image : str
Name of docker image
exe : str
path to executable in docker
workdir : str, optional
working directory in docker container
volumes : list of tuples, optional
env : dictionary, optional
Returns
-------
cmd : list
list of arguments which can be passed to subprocess
"""
cmd = ['docker', 'run', flags]
# add workdir
try:
cmd += ['-w', kwargs['workdir']]
except KeyError:
pass
# add volumes
for src, dest in kwargs.get('volumes', []):
cmd += ['-v', os.path.abspath(src) + ':' + dest]
# add environmental variables
for name, value in kwargs.get('env', []):
cmd += ['-e', name + '=' + value]
# add image and executable names
cmd += [image, exe]
# finally call
return cmd
|
e3ac438bb00fb83da31d0512da2b580daafe164c
| 14,054
|
def BFS_MIN(graph, start):
"""最短路径问题, 树的结构"""
queue = []
queue.append(start)
seen = set()
seen.add(start)
parent = {start:None}
# 开始BFS搜索
while len(queue) > 0:
vertex = queue.pop(0)
nodes = graph[vertex]
for w in nodes:
if w not in seen:
queue.append(w)
seen.add(w)
parent[w] = vertex
return parent
|
91013649e813dd582b08d092f6f5c5c66e4cc92a
| 14,055
|
def sol(arr, n, p):
"""
If we have more chocolates than cost we just pay from the balance
otherwise we use all the balance and add the remaining to the total
cost also making bal 0
"""
bal = 0
res = 0
for i in range(n):
d = arr[i-1]-arr[i] if i > 0 else 0-arr[i]
# Careful about comparing with arr[-1] when i=0 !!!
if d > 0:
bal+=d
else:
d = abs(d)
if bal >= d:
bal -= d
else:
res += d-bal
bal = 0
return res*p
|
75592d2783eba2e4721721b03f84ea5cebffbc78
| 14,056
|
import subprocess
def get_stdout(*args):
"""Runs the subprocess and returns the stdout as a string"""
result = subprocess.run(args, stdout=subprocess.PIPE, universal_newlines=True)
result.check_returncode()
return result.stdout
|
e55b30a05caff148330f88aae961b2da20902d90
| 14,057
|
def ncol(self):
""" return the number of cols """
return len(self.columns)
|
723567269cf2779eef991c35f00814d7b541d75e
| 14,058
|
def insertionsort(arr):
"""
In a given iteration:
1. swap arr[lo] with each larger entry to the left.
2. Increment lo pointer
"""
lo = 0
hi = len(arr) - 1
while lo <= hi:
curr = lo
while curr - 1 >= 0:
if arr[curr] < arr[curr - 1]:
arr[curr], arr[curr - 1] = arr[curr - 1], arr[curr]
curr -= 1
lo += 1
return arr
|
64a44af7b563e708053f04da3970429dc05c4745
| 14,059
|
def add_method(cls):
"""Decorator. @add_method(cls) binds the following function to the class cls."""
def decorator(func):
#method = MethodType(func,cls)
#setattr(cls, func.__name__, method)
setattr(cls, func.__name__, func)
return func
return decorator
|
b6f350651dddf90c1c9f88467d04fa7f53b8204f
| 14,060
|
import unicodedata
def unidecode(s: str) -> str:
"""
Return ``s`` with unicode diacritics removed.
"""
combining = unicodedata.combining
return "".join(
c for c in unicodedata.normalize("NFD", s) if not combining(c)
)
|
589dc0403c95e29f340070e3a9e81a6f14950c8e
| 14,061
|
def AutoUpdateUpgradeRepairMessage(value, flag_name):
"""Messaging for when auto-upgrades or node auto-repairs.
Args:
value: bool, value that the flag takes.
flag_name: str, the name of the flag. Must be either autoupgrade or
autorepair
Returns:
the formatted message string.
"""
action = 'enable' if value else 'disable'
plural = flag_name + 's'
link = 'node-auto-upgrades' if flag_name == 'autoupgrade' else 'node-auto-repair'
return ('This will {0} the {1} feature for nodes. Please see '
'https://cloud.google.com/kubernetes-engine/docs/'
'{2} for more '
'information on node {3}.').format(action, flag_name, link, plural)
|
7153b7aa44d6d798aa58d9328ac49097016c0879
| 14,062
|
import ast
import collections
import logging
def _get_ground_truth_detections(instances_file,
allowlist_file=None,
num_images=None):
"""Processes the annotations JSON file and returns ground truth data corresponding to allowlisted image IDs.
Args:
instances_file: COCO instances JSON file, usually named as
instances_val20xx.json.
allowlist_file: File containing COCO minival image IDs to allowlist for
evaluation, one per line.
num_images: Number of allowlisted images to pre-process. First num_images
are chosen based on sorted list of filenames. If None, all allowlisted
files are preprocessed.
Returns:
A dict mapping image id (int) to a per-image dict that contains:
'filename', 'image' & 'height' mapped to filename & image dimensions
respectively
AND
'detections' to a list of detection dicts, with each mapping:
'category_id' to COCO category id (starting with 1) &
'bbox' to a list of dimension-normalized [top, left, bottom, right]
bounding-box values.
"""
# Read JSON data into a dict.
with open(instances_file, 'r') as annotation_dump:
data_dict = ast.literal_eval(annotation_dump.readline())
image_data = collections.OrderedDict()
# Read allowlist.
if allowlist_file is not None:
with open(allowlist_file, 'r') as allowlist:
image_id_allowlist = set([int(x) for x in allowlist.readlines()])
else:
image_id_allowlist = [image['id'] for image in data_dict['images']]
# Get image names and dimensions.
for image_dict in data_dict['images']:
image_id = image_dict['id']
if image_id not in image_id_allowlist:
continue
image_data_dict = {}
image_data_dict['id'] = image_dict['id']
image_data_dict['file_name'] = image_dict['file_name']
image_data_dict['height'] = image_dict['height']
image_data_dict['width'] = image_dict['width']
image_data_dict['detections'] = []
image_data[image_id] = image_data_dict
shared_image_ids = set()
for annotation_dict in data_dict['annotations']:
image_id = annotation_dict['image_id']
if image_id in image_data:
shared_image_ids.add(image_id)
output_image_ids = sorted(shared_image_ids)
if num_images:
if num_images <= 0:
logging.warning(
'--num_images is %d, hence outputing all annotated images.',
num_images)
elif num_images > len(shared_image_ids):
logging.warning(
'--num_images (%d) is larger than the number of annotated images.',
num_images)
else:
output_image_ids = output_image_ids[:num_images]
for image_id in list(image_data):
if image_id not in output_image_ids:
del image_data[image_id]
# Get detected object annotations per image.
for annotation_dict in data_dict['annotations']:
image_id = annotation_dict['image_id']
if image_id not in output_image_ids:
continue
image_data_dict = image_data[image_id]
bbox = annotation_dict['bbox']
# bbox format is [x, y, width, height]
# Refer: http://cocodataset.org/#format-data
top = bbox[1]
left = bbox[0]
bottom = top + bbox[3]
right = left + bbox[2]
if (top > image_data_dict['height'] or left > image_data_dict['width'] or
bottom > image_data_dict['height'] or right > image_data_dict['width']):
continue
object_d = {}
object_d['bbox'] = [
top / image_data_dict['height'], left / image_data_dict['width'],
bottom / image_data_dict['height'], right / image_data_dict['width']
]
object_d['category_id'] = annotation_dict['category_id']
image_data_dict['detections'].append(object_d)
return image_data
|
2650ff4ffeb13d0a7874164474fa47a82880d45d
| 14,063
|
import re
def combine_placeholders(string, placeholders, _placeholder_re=re.compile(r'%\(([^)]+)\)')):
"""Replace ``%(blah)`` placeholders in a string.
Each placeholder may contain other placeholders.
:param string: A string that may contain placeholders
:param placeholders: A dict containing placeholders and their
values.
"""
def _repl(m):
return placeholders[m.group(1)]
# ensure we don't have cycles that would cause an infinite loop
# resulting in massive memory use
for name, value in placeholders.items():
referenced = set()
while True:
matches = set(_placeholder_re.findall(value))
if not matches:
break
elif matches & referenced:
raise ValueError('placeholder leads to a cycle: ' + name)
referenced |= matches
value = _placeholder_re.sub(_repl, value)
# perform the actual replacements
while True:
string, n = _placeholder_re.subn(_repl, string)
if not n:
break
return string
|
f75dc1cd7b9c1c1d036dbdc60221b426bd56d012
| 14,065
|
def quadratic(a, b, c):
""" Always returns the smallest root as t0 """
discrim = b**2 - 4 * a * c
if discrim < 0:
return (False, 0, 0)
rootd = discrim**(1/2)
if b < 0:
q = -0.5*(b - rootd)
else:
q = -0.5*(b + rootd)
t0 = q / a
t1 = c / q
if t1 > t0:
tmp = t1
t1 = t0
t0 = 1
return (True, t0, t1)
|
3acafea305892bc6d303a3c090411d3eb4eb2204
| 14,066
|
def comparison_total(bst):
"""
-------------------------------------------------------
Sums the comparison values of all Letter objects in bst.
-------------------------------------------------------
Preconditions:
bst - a binary search tree of Letter objects (BST)
Postconditions:
returns
total - the total of all comparison fields in the bst
Letter objects (int)
-------------------------------------------------------
"""
total = 0
l = bst.inorder()
for letter in l:
total += letter.comparisons
return total
|
2fe199d5364c79fd969dbfb2a3dfb7efd62c599a
| 14,068
|
from os.path import dirname, join, abspath, isdir
def find_test_interop_dir():
"""Find the common tests directory relative to this script"""
f = dirname(dirname(dirname(dirname(abspath(__file__)))))
f = join(f, "tests", "interop")
if not isdir(f):
raise Exception("Cannot find tests/interop directory from "+__file__)
return f
|
46f41da24de5491543fde5b30fd86ad0758430c9
| 14,070
|
import os
def input_path(fp):
"""make fp an absolute path and checks it exist"""
cwd = os.path.abspath(os.getcwd())
fp = os.path.normpath(os.path.join(cwd, fp))
if not os.path.exists(fp):
raise ValueError("Invalid path !")
return fp
|
b439649b8b4603b76e46a9aded54fa08890c5e66
| 14,071
|
def aggregate_results(tweets, results):
"""
Aggregates results based on actual tweet labeling and the classified sentiment.
@param tweets: the list of labeled tweets.
@param results: the results form the classification.
@return:
"""
#print "INFO -- Aggregating results"
classification_results = []
# for all tweets
for i in range(len(tweets)):
# append the boolean value representing correct classification of the given tweet.
classification_results.append(tweets[i][1] == results[i])
# compiling the counts of correct and false classification
counts = dict((k, classification_results.count(k)) for k in set(classification_results))
# calculating the accuracy of the classifier
accuracy = (counts[True]*1.0) / (counts[False]+counts[True])
return counts, accuracy
|
d51bc4b173ca1b4fec632fef68a0f1537a19d799
| 14,073
|
import os
import tempfile
def database_files_path(test_tmpdir, prefix="GALAXY"):
"""Create a mock database/ directory like in GALAXY_ROOT.
Use prefix to default this if TOOL_SHED_TEST_DBPATH or
GALAXY_TEST_DBPATH is set in the environment.
"""
environ_var = f"{prefix}_TEST_DBPATH"
if environ_var in os.environ:
db_path = os.environ[environ_var]
else:
tempdir = tempfile.mkdtemp(dir=test_tmpdir)
db_path = os.path.join(tempdir, 'database')
return db_path
|
415d671a177330a52fa275ac8cd28873130883a5
| 14,074
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.