content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import copy
def fillMetadata (f, txid, extra):
"""
Retrieves a transaction by id and constructs a dict that contains
the given "extra" fields plus all the basic metadata (like inputs
and outputs) as retrieved from Xaya Core.
"""
rpc = f.env.createCoreRpc ()
tx = rpc.gettransaction (txid)["hex"]
data = rpc.decoderawtransaction (tx)
res = copy.deepcopy (extra)
res["txid"] = txid
res["btxid"] = data["btxid"]
res["inputs"] = [
{"txid": i["txid"], "vout": i["vout"]}
for i in data["vin"]
]
outs = {}
burn = 0
for o in data["vout"]:
spk = o["scriptPubKey"]
if "nameOp" in spk:
continue
if "burn" in spk and spk["burn"] == b"g/game".hex ():
burn += o["value"]
if "address" in spk:
if spk["address"] not in outs:
outs[spk["address"]] = 0.0
outs[spk["address"]] += o["value"]
res["out"] = outs
res["burnt"] = burn
return res
|
d2ccb2203f4943da9877a85ca98349fee5b55883
| 80,654
|
def is_available(resource):
"""
Helper to check if resource is available.
"""
return resource.get("status") == "ready"
|
997a3ab4667e31d3f58e250463ea62b90ef19d72
| 80,656
|
def elapsed(sec):
"""
Formatting elapsed time display
"""
mins, rem = int(sec / 60), sec % 60
text = "%.1f" %(rem)
ending = "s"
if mins > 0:
text = "%d:" %(mins) + text
ending = "m"
return text+ending
|
3cb61df21df5343473dadfbce80989407b3cdab4
| 80,661
|
def getLineAfterText(text, prefix, startPos=0, includeText=False):
"""Extracts the remainder of a line after a prefix"""
# StartPos = index in the text to start looking at
# Find the prefix
prefixLoc = text.find(prefix, startPos)
if prefixLoc < 0:
raise Exception('Desired text not found: ' + prefix)
# Find the bounds after the prefix
prefixEnd = prefixLoc + len(prefix)
nextEnd = text.find('\n', prefixEnd)
if nextEnd == -1: # If there was no trailing \n, use one past last character.
nextEnd = len(text)
# Check that we found the bounds
if (prefixEnd < startPos) or (nextEnd <= prefixEnd):
raise Exception('Desired text not found: ' + prefix)
# Extract the text
if includeText:
return text[prefixLoc:nextEnd]
else:
return text[prefixEnd:nextEnd]
|
83773442959bc25564240ca8e513bc2809737809
| 80,665
|
import json
def decode(params):
""" Converts a dictionary of SageMaker hyperparameters generated by `encode(...)`
to a JSON-encodable dictionary of parameters.
"""
result = {}
for k, v in params.items():
parts = k.split(".")
k, d = parts[0], result
for subk in parts[1:]:
d = d.setdefault(k, {})
k = subk
d[k] = json.loads(v)
return result
|
1f613fabe4aaff4bdf9460e7712d317a13463c58
| 80,666
|
async def get_index(db, index_id_or_version, projection=None):
"""
Get an index document by its ``id`` or ``version``.
:param db: the application database client
:type db: :class:`~motor.motor_asyncio.AsyncIOMotorClient`
:param index_id_or_version: the id of the index
:type index_id_or_version: Union[str, int]
:param projection: a Mongo projection to apply to the result
:type projection: Union[None, dict, list]
:return: an index document
:rtype: Union[None, dict]
"""
try:
return await db.indexes.find_one({"version": int(index_id_or_version)}, projection=projection)
except ValueError:
return await db.indexes.find_one(index_id_or_version, projection=projection)
|
a41c6afaf63222d0dde1f7c19b99d981561d4ced
| 80,667
|
def criterion_mapping(crit):
"""Returns mapping tuple (table, patient, start, end, code, status, value, fields)
table: is a table reference for the episode
patient: expression for episode patient ID
start: expression for episode start date/time
end: expression for episode end date/time or NULL
code: expression for episode coding or NULL, e.g.
diagnosis code
status: expression for episode status or NULL, i.e.
HQMF status string
negation: expression for episode negation, i.e. negation
rationale or NULL for non-negated/non-negatable
value: expression for episode value or NULL, e.g. lab
numeric value
"""
qds_data_type = crit.data['qds_data_type'].flatten()
return (qds_data_type,
'patient_id',
'start_dt',
'end_dt',
'code',
'status',
'negation',
'value')
|
9e41d14c28e081bfee0569d093b44d915598f8b3
| 80,669
|
from typing import Union
from typing import Tuple
from typing import List
def pack_version_number(version_buffer: Union[Tuple[int, int, int], List[int]]) -> str:
"""
Packs the version number into a string.
Args:
version_buffer: (Union[Tuple[int, int, int], List[int]]) the version to be packed
Returns: (str) the packed version number
"""
return f"{version_buffer[0]}.{version_buffer[1]}.{version_buffer[2]}"
|
1b71d43843876c4e07e799562fbc7a2a8270272a
| 80,675
|
def _to_bool(text):
"""Convert str value to bool.
Returns True if text is "True" or "1" and False if text is "False" or "0".
Args:
text: str value
Returns:
bool
"""
if text.title() in ("True", "1"):
result = True
elif text.title() in ("False", "0"):
result = False
else:
raise ValueError("Expected 'True', 'False', '1', '0'; got '%s'" % text)
return result
|
d84d523a71ac5550dba41b274dbcb3b1d08d74cd
| 80,684
|
def default(copysets, copyset):
"""Check that always passes.
The check function checks for custom constraints, such as rack or
tier awareness. 'copysets' contains a list of copysets
build_copysets has generated so far, and 'copyset' is the copyset
that build_copysets wants to check as a valid.
This is the default check, meaning we don't care about constraints
such as rack awareness.
"""
return True
|
94a3fb774c8254689baaf93f690023244a2777b4
| 80,688
|
def line_line_intersection(x1,y1,x2,y2,x3,y3,x4,y4):
"""
https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
Line 1 point 1 = (x1,y1)
Line 1 point 2 = (x2,y2)
Line 2 point 1 = (x3,y3)
Line 2 point 2 = (x4,y4)
"""
intersection_x = ((x1*y2-y1*x2)*(x3-x4)-(x1-x2)*(x3*y4-y3*x4))/((x1-x2)*(y3-y4)-(y1-y2)*(x3-x4))
intersection_y = ((x1*y2-y1*x2)*(y3-y4)-(y1-y2)*(x3*y4-y3*x4))/((x1-x2)*(y3-y4)-(y1-y2)*(x3-x4))
return intersection_x,intersection_y
|
1b4a8cf08c733cb5f2e24deb44bff2ff7603fec4
| 80,691
|
def compute_xdot(data, dt):
"""Compute xdot for a chunk of snapshots.
Parameters
----------
data : (Nfull, n_samples) ndarray
The dataset. Here Nfull = numvariable*38523.
dt : float
The timestep size
Returns
-------
xdot : (Nfull, n_samples-4) ndarray
Time derivative of data using 4th order 5 point stecil approximation.
See https://en.wikipedia.org/wiki/Five-point_stencil. No derivative is
computed for first 2 and last 2 timesteps due to the stencil.
"""
xdot = (data[:,0:-4] - 8*data[:,1:-3] + 8*data[:,3:-1] - data[:,4:])/(12*dt)
return xdot
|
be03d4862ba6eebc972e1396e3755f46edc4d341
| 80,695
|
from typing import Union
def non_negative(diff: Union[float, int]) -> Union[float, int]:
"""Returns 0 if diff is negative or positive value."""
return 0 if diff < 0 else diff
|
c0ef7f7661633e48558e6066e0b1cac4c4e89f62
| 80,699
|
def mode(lst):
"""Calculates the mode of a list"""
return max(set(lst), key=lst.count)
|
6bf4393a3e8b3904d0c06fee483e7ca1338af12a
| 80,701
|
def plot_map(m, x, y, data, clevs, cmap, norm, mesh, filled, **map_kwargs):
"""
Producing a map plot
Parameters
----------
m: Basemap object
Handle for the map object returned from 'map_setup' function
data: numpy array
2D data array to plot
x,y: numpy arrays
Arrays of map coordinates; returned from 'map_setup' function
clevs: List/array
Contour levels
cmap: string
Color map. See http://matplotlib.org/users/colormaps.html for more
information.
norm: Boundary norm object
Normalize data to [0,1] to use for mapping colors
mesh: boolean
Whether to plot data as mesh. If false (default), contour plot is
made.
filled: Boolean
Whether to color fill between contours or not. Defaults to True
**map_kwargs: keyword arguments
arguments (key=value) that can be used in pyplot.contour(f)
See
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.contour
Returns
-------
cs: Contour plot object
"""
if mesh:
cs = m.pcolormesh(x, y, data,
cmap=cmap,
norm=norm,
**map_kwargs)
else:
if filled:
cs = m.contourf(x, y, data,
levels=clevs,
norm=norm,
cmap=cmap,
**map_kwargs)
else:
if cmap is not None:
map_kwargs['colors'] = None
cs = m.contour(x, y, data,
levels=clevs,
norm=norm,
cmap=cmap,
**map_kwargs)
else:
cs = m.contour(x, y, data,
norm=norm,
levels=clevs,
**map_kwargs)
try:
m.drawcountries(linewidth=.7, color="#424242")
except ValueError:
pass
try:
m.drawcoastlines(linewidth=1.0, color='#383838')
except ValueError:
pass
return cs
|
e27edc9f8043925bf0a0de9da9dc18c5293f3585
| 80,704
|
def listtostring(listin):
"""
Converts a simple list into a space separated sentence, effectively reversing str.split(" ")
:param listin: the list to convert
:return: the readable string
"""
ans = ""
for l in listin:
ans = ans + str(l) + " "
return ans.strip()
|
e27b407d28535254d3375687d17d1c92df34d4d8
| 80,705
|
def paths(bot, nb_name):
"""Returns the paths of notebook to execute and to write."""
nb_dir = bot.config.papermill.nb_dir #Notebook directory (from config)
nb_dir = nb_dir if nb_dir.endswith('/') else nb_dir+'/' #append '/' if missing
path_in = nb_dir + nb_name
path_out = nb_dir + bot.config.papermill.prefix + nb_name
return(path_in, path_out)
|
775d7e3d3eb9cdab81fdfe5556470152dff7427e
| 80,706
|
from typing import Callable
def bisect(ok: int, ng: int, pred: Callable[[int], bool]) -> int:
"""Finds the boundary that satisfies pred(x).
Precondition: ok < ng
Args:
pred(x): A monotonic predicate (either decreasing or increasing).
ok: A known int value that satisfies pred(x).
ng: A known int value that satisfies !pred(x).
Returns:
The largest or the smallest x that satisfies pred(x).
"""
assert abs(ng - ok) >= 1
assert pred(ok)
assert not pred(ng)
while abs(ng - ok) > 1:
mid = (ok + ng) // 2 # ok < mid < ng
if pred(mid):
ok = mid
else:
ng = mid
assert abs(ng - ok) == 1
return ok
|
821ae5923e648153ed1eb8a110021cb6fc0fd246
| 80,709
|
def get_coordinate_from_line(coordinate, line):
"""
Returns a value of a coordinate from a line
"""
for word in line.split(","):
if str(coordinate)+"=" in word:
if coordinate == "phi":
return float(word[word.index("=")+1:])
else:
return float(word[word.index("=")+1:-1])
|
99b456d2ba78ae682514f1fcb884a5fc7b47879b
| 80,711
|
def max(x):
"""Find maximal of array elements and corresponding indices along axis = 1
Args:
x: array-like
Returns:
max_vals_along_axis and max_indices_along_array
"""
if len(x.shape) != 2:
raise ValueError('The size of x shape must be 2 dimension')
max_vals = x.max(1)
max_inds = x.argmax(1)
return max_vals, max_inds
|
11cea43653a600d65d074374fec3ca2228dea98b
| 80,715
|
from typing import Tuple
from typing import Union
import re
def parse_thread_url(url: str) -> Tuple[str, Union[str, None]]:
"""Parses a URL to a thread ID and optionally a post ID."""
pattern = re.compile(r"(t-[0-9]+)(?:.*#(post-[0-9]+))?")
match = pattern.search(url)
if not match:
raise ValueError("Thread URL does not match expected pattern")
thread_id, post_id = match.groups()
return thread_id, post_id
|
75588281b0d777543acaab15e59ac7b7e2b0791a
| 80,716
|
def primersOverlap(l,r):
"""
Inputs:
left primer Segment
right primer Segment
Check if two primers overlap
Returns:
Boolean
"""
return l[3]['coords'][1] > r[3]['coords'][0]
|
baa44503ac33f74eba4fda635f6247dd979a4dfb
| 80,718
|
def add_suffix(txt, suffix):
"""add the given to every element in given comma separated list.
adds suffix before -.
Example:
add_suffix("hello,john", "ga:") -> "ga:hello,ga:john"
"""
if txt is None:
return None
elements = txt.split(",")
elements = ["-" + suffix + e[1:] if e[0] == "-"
else suffix + e for e in elements]
return ",".join(elements)
|
f11865c6f3ab836e5e58336db7471f264ea181b0
| 80,723
|
def do_not_recurse(value):
"""
Function symbol used for wrapping an unpickled object (which should
not be recursively expanded). This is recognized and respected by the
instantiation parser. Implementationally, no-op (returns the value
passed in as an argument).
Parameters
----------
value : object
The value to be returned.
Returns
-------
value : object
The same object passed in as an argument.
"""
return value
|
f11a907e0376fec00e3b4335717edf95395acbc2
| 80,729
|
def services_contain_id(services_data, service_id):
"""
Tests if service_id is id of service.
:param services_data: list of services
:param service_id: id of service
:return: True if service_id is service
"""
for service in services_data:
if service_id == service['id']:
return True
return False
|
e404d8b13842f5d1533a7503c633a70405612c8c
| 80,730
|
from typing import List
def ngrams(tokens: List, n: int):
"""
Args:
tokens: List of elements
n: N-gram size
Returns:
List of ngrams
"""
return [tokens[i : i + n] for i in range(len(tokens) - n + 1)]
|
197bf76e6113eaf83589887e7ac35020a32ab1ab
| 80,738
|
import six
def get_model_instances(model_class, instances_or_ids, sep=',', ignore_invalid=True, raise_on_error=True):
"""
Get model instances by ids.
:param model_class:
:param instances_or_ids:
:param sep:
:param ignore_invalid:
:param raise_on_error: only effective when ignore_invalid is False. if True, raise error; else add None to results.
:return:
"""
results = []
if isinstance(instances_or_ids, six.string_types):
instances_or_ids = instances_or_ids.split(sep)
for item in instances_or_ids:
if isinstance(item, model_class):
results.append(item)
else:
try:
results.append(model_class.objects.get(pk=item))
except Exception as e:
if not ignore_invalid:
if raise_on_error:
raise e
else:
results.append(None)
return results
|
93500dfd7f244a7173136e094c8e0676b43d4f66
| 80,741
|
def add_or_delete(old, new, add_fun, del_fun):
"""
Given an 'old' and 'new' list, figure out the intersections and invoke
'add_fun' against every element that is not in the 'old' list and 'del_fun'
against every element that is not in the 'new' list.
Returns a tuple where the first element is the list of elements that were
added and the second element consisting of elements that were deleted.
"""
old_set = set(old)
new_set = set(new)
to_delete = old_set - new_set
to_add = new_set - old_set
for elem in to_delete:
del_fun(elem)
for elem in to_add:
add_fun(elem)
return to_add, to_delete
|
d11052532953c2d50dff27078591b587edb3c5ba
| 80,743
|
import math
def PSNR(L2loss, I=2):
"""
Function that calculates the PSNR metric according to eq. 3
L2loss: a float
I: set to 2 since the paper assumes the HR and SR pixel values are between [-1,1]
"""
x = I ** 2 / L2loss # calculating the argument for the log
psnr = 10 * math.log10(x) # calculating the psnr as in eq. 3
return psnr
|
fef7286356bea15b9dc4a875f76c989f322858fa
| 80,745
|
import base64
def get_session_key(request):
"""
Extract and decode the session key sent with a request. Returns None if no session key was provided.
"""
session_key = request.COOKIES.get('session_key', None)
if session_key is not None:
return base64.b64decode(session_key)
return session_key
|
4d40e33108f728ee47b82490f251b5002346ffb6
| 80,746
|
def check_pointing(timestamp, point_0, point_2, point_4, point_41):
"""Check if timestamp is at pointing 0, 2, 4, 41"""
if timestamp in point_0:
point = 0
elif timestamp in point_2:
point = 2
elif timestamp in point_4:
point = 4
else:
point = 41
return point
|
a339d267dca1129bdd7c4793749c85201460e07d
| 80,747
|
def query_periods(query_type=None, month_starts=[], month_ends=[]):
"""Generate a dictionary with consecutive monthly intervals to query where
dates are formatted a little differently depending on the API to query.
API date formatting:
- AirNow API: Expects dates in format ``'YYYY-MM-DDTHH'``
- Example: ``'2019-08-01T00'``
- AQS API: Expects dates in format ``'YYYYMMDD'``
- Example: ``'20190801'``
Args:
query_type (str):
The name of the API to query (either 'AirNow' or 'AQS').
month_starts (pandas datetimeindex):
An array of monthly start dates generated by Date_Range_Selector
month_ends (pandas datetimeindex):
An array of monthly end dates generated by Date_Range_Selector
Returns:
monthly_periods (dict):
Dictionary with monthly beginning and end dates formatted to the
scheme expected by the API to be queried.
"""
monthly_periods = {}
for start, end in zip(month_starts, month_ends):
month_name = start.month_name()
year_name = str(start.year)[-2:]
s_yr = str(start.year)
s_mo = str(start.month).zfill(2)
s_day = str(start.day)
e_yr = str(end.year)
e_mo = str(end.month).zfill(2)
e_day = str(end.day)
if query_type == 'AQS':
bdate = s_yr + s_mo + '0' + s_day
edate = e_yr + e_mo + e_day
monthly_periods.update({month_name + year_name: {'bdate': bdate,
'edate': edate}})
elif query_type == 'AirNow':
bdate = (s_yr + '-' + s_mo + '-' + '0' + s_day + 'T00')
edate = (e_yr + '-' + e_mo + '-' + e_day + 'T23')
monthly_periods.update({e_yr + s_mo: {'startDate': bdate,
'endDate': edate}})
else:
raise ValueError(f'Invalid value for query_type: {query_type}.'
' Accepted values include "AQS" and "AirNow"')
return monthly_periods
|
0d6d6a0c987fe05f91699597e84f6b2a867f1898
| 80,750
|
def set_offset(chan_obj):
""" Return a tuple of offset value and calibrate value.
Arguments:
chan_obj (dict): Dictionary containing channel information.
"""
physical_range = chan_obj['physical_max'] - chan_obj['physical_min']
digital_range = chan_obj['digital_max'] - chan_obj['digital_min']
calibrate = physical_range / digital_range
calibrated_dig_min = calibrate * chan_obj['digital_min']
offset = chan_obj['physical_min'] - calibrated_dig_min
return (offset, calibrate)
|
06ab3aeafcb1ba26799d3ac2696b62909928310d
| 80,751
|
def invcalcbarycentric(pointuv, element_vertices):
"""
Convert barycenteric coordinates into 3d
https://en.wikipedia.org/wiki/Barycentric_coordinate_system
https://math.stackexchange.com/questions/2292895/walking-on-the-surface-of-a-triangular-mesh
:param pointuv: Point in barycenteric coordinates (u, v)
:param element_vertices: Vertices of current element
:return: pointuv in 3d coordinates (x, y, z)
"""
return element_vertices[0] + pointuv[0] * (element_vertices[1] - element_vertices[0]) + pointuv[1] * (element_vertices[2] - element_vertices[0])
|
9aebf9e0579321788b242653a8c51b20dcad2fea
| 80,752
|
def get_hosp_given_case_effect(ve_hospitalisation: float, ve_case: float) -> float:
"""
Calculate the effect of vaccination on hospitalisation in cases.
Allowable values restricted to effect on hospitalisation being greater than or equal to the effect on developing
symptomatic Covid (because otherwise hospitalisation would be commoner in breakthrough cases than in unvaccinated
cases).
Args:
ve_hospitalisation: Vaccine effectiveness against hospitalisation (given exposure only)
ve_case: Vaccine effectiveness against symptomatic Covid (also given exposure)
Returns:
VE for hospitalisation in breakthrough symptomatic cases
"""
msg = "Hospitalisation effect less than the effect on becoming a case"
assert ve_hospitalisation >= ve_case, msg
ve_hosp_given_case = 1.0 - (1.0 - ve_hospitalisation) / (1.0 - ve_case)
# Should be impossible for the following assertion to fail, but anyway
msg = f"Effect of vaccination on hospitalisation given case: {ve_hosp_given_case}"
assert 0.0 <= ve_hosp_given_case <= 1.0, msg
return ve_hosp_given_case
|
1f3058e1d30ee78b65ad0b46d13187c7efe7d055
| 80,771
|
def get_first_line(comment):
"""Gets the first line of a comment. Convenience function.
Parameters
----------
comment : str
A complete comment.
Returns
-------
comment : str
The first line of the comment.
"""
return comment.split("\n")[0]
|
ce474c0c59105f505943b85a1f1ef8205b929e5e
| 80,776
|
def get_doc(mod_obj):
"""
Gets document-name and reference from input module object safely
:param mod_obj: module object
:return: documentation of module object if it exists.
"""
try:
doc_name = mod_obj.get('document-name')
ref = mod_obj.get('reference')
if ref and doc_name:
return '<a href="' + ref + '">' + doc_name + '</a>'
elif ref:
return '<a href="' + ref + '">' + ref + '</a>'
elif doc_name:
return doc_name
except Exception as e:
raise Exception(e)
return 'N/A'
|
13921ee2354385dbb3988dcd698552c606784602
| 80,777
|
def convert_args_to_latex(file: str) -> list:
"""
Takes the file created when plotting figures with 'save=True'
with quantum_HEOM and converts the arguments into strings
that render correctly in LaTeX, printing them to console.
The 2 lines corrspond to the 2 sets of arguments for 1)
initialising the QuantumSystem object and 2) calling its
plot_time_evolution() method.
Parameters
----------
file : str
The absolute path of the input file.
"""
args = []
with open(file, 'r') as f:
for line in f:
if line.startswith('args') or line.startswith('plot_args'):
line = line.replace('\'', '\"').replace(', "', ', \\newline "')
line = line.replace('{', '\{').replace('}', '\}')
line = line.replace('_', '\_')
args.append(line)
return args
|
59abf521939aa85b7f2bd7b045b6f697a51dda46
| 80,778
|
def class_is_list(cls):
"""
Return True if cls_name is a list object
"""
return (cls.find("of_list_") == 0)
|
4ff0b83efcef4cfcc5bd4a92fe5549923919972f
| 80,780
|
def unite(oEditor, partlist, KeepOriginals=False):
"""
Unite the specified objects.
Parameters
----------
oEditor : pywin32 COMObject
The HFSS editor in which the operation will be performed.
partlist : list
List of part name strings to be united.
KeepOriginals : bool
Whether to keep the original parts for subsequent operations.
Returns
-------
objname : str
Name of object created by the unite operation
Examples
--------
>>> import Hyphasis as hfss
>>> [oAnsoftApp, oDesktop] = hfss.setup_interface()
>>> oProject = hfss.new_project(oDesktop)
>>> oDesign = hfss.insert_design(oProject, "HFSSDesign1", "DrivenModal")
>>> oEditor = hfss.set_active_editor(oDesign, "3D Modeler")
>>> tri1 = hfss.create_polyline(oEditor, [0, 1, 0], [0, 0, 1], [0, 0, 0])
>>> tri2 = hfss.create_polyline(oEditor, [0, -1, 0], [0, 0, 1], [0, 0, 0])
>>> tri3 = hfss.unite(oEditor, [tri1, tri2])
"""
# partliststr = ""
# for item in partlist:
# partliststr += (',' + item)
selectionsarray = ["NAME:Selections", "Selections:=", ','.join(partlist)]
uniteparametersarray = ["NAME:UniteParameters", "KeepOriginals:=", KeepOriginals]
oEditor.Unite(selectionsarray, uniteparametersarray)
return partlist[0]
|
7f89ea41c65dfd7daca885cc5c138130db0f7bc0
| 80,783
|
def better_bottom_up_mscs(seq: list) -> tuple:
"""Returns a tuple of three elements (sum, start, end), where sum is the sum
of the maximum contiguous subsequence, and start and end are respectively
the starting and ending indices of the subsequence.
Let sum[k] denote the max contiguous sequence ending at k. Then, we have
sum[0] = seq[0]
sum[k + 1] = max(seq[k], sum[k] + seq[k]).
To keep track where the max contiguous subsequence starts, we use a list.
Time complexity: O(n).
Space complexity: O(1)."""
_max = seq[0]
_sum = seq[0]
index = 0
start = end = 0
for i in range(1, len(seq)):
if _sum > 0:
_sum = _sum + seq[i]
else:
_sum = seq[i]
index = i
if _sum > _max:
_max = _sum
end = i
start = index
return _max, start, end
|
a41f2f8a772cba2cf9166c2db2e39abd30bb7361
| 80,787
|
def create_cubes(n):
"""returns list of cubes from 0 to n"""
result = []
for x in range(n):
result.append(x**3)
# entire 'result' list in memory (inefficient)
return result
|
59055269162ba33407ea0ede9747b77b74e504db
| 80,788
|
import re
def get_device_number(device):
"""Extract device number.
Ex: "D1000" → "1000"
"X0x1A" → "0x1A
"""
device_num = re.search(r"\d.*", device)
if device_num is None:
raise ValueError("Invalid device number, {}".format(device))
else:
device_num_str = device_num.group(0)
return device_num_str
|
4a62aae822ed931a12574c31feafa3108bf783e3
| 80,792
|
def extract_roi(image, bounding_box):
"""Extract region of interest from image defined by bounding_box.
Args:
image: grayscale image as 2D numpy array of shape (height, width)
bounding_box: (x, y, width, height) in image coordinates
Returns:
region of interest as 2D numpy array
"""
(x, y, width, height) = bounding_box
return image[y:y + height, x:x + width]
|
2a5639492d67bb173f4c68b3d755ed0fe0ab6e31
| 80,795
|
from typing import Callable
def filter_dict(d: dict, cond: Callable = bool) -> dict:
"""Filter a `dict` using a condition *cond*.
Args:
d: `dict` to filter
cond: `Callable` which will be called for every value in the
dictionary to determine whether to filter out the key.
Defaults to a truthy check.
Returns:
A new dictionary consisting of only the keys whose values
returned a truthy value when ran through the *cond* function.
"""
return {key: value for key, value in d.items() if cond(value)}
|
8b704db11baa7d07ef61a0ebf7db6c0dfc2baa88
| 80,799
|
from typing import Iterable
def simple_chunker(a: Iterable, chk_size: int):
"""Generate fixed sized non-overlapping chunks of an iterable ``a``.
>>> list(simple_chunker(range(7), 3))
[(0, 1, 2), (3, 4, 5)]
Most of the time, you'll want to fix the parameters of the chunker like this:
>>> from functools import partial
>>> chunker = partial(simple_chunker, chk_size=3)
>>> list(chunker(range(7)))
[(0, 1, 2), (3, 4, 5)]
Note, the type of the chunks is always tuples, but you can easily change that using ``map``.
For example, to change the type to be list:
>>> list(map(list, chunker(range(7))))
[[0, 1, 2], [3, 4, 5]]
>>> a = range(6)
>>> list(simple_chunker(a, 3))
[(0, 1, 2), (3, 4, 5)]
>>> list(simple_chunker(a, 2))
[(0, 1), (2, 3), (4, 5)]
>>> list(simple_chunker(a, 1))
[(0,), (1,), (2,), (3,), (4,), (5,)]
"""
return zip(*([iter(a)] * chk_size))
|
863b15cf69eda9aa097c6e13ef7798c83961feba
| 80,802
|
def message_get_signal(message, signal_name):
"""Loop over signals to find the requested signal.
Arguments:
message: dict, the message provided by message_decode()
signal_name: str, name of the signal (from DBC)
Return:
signal: dict, information about the decoded signal
"""
for signal in message.get('signals', []):
if signal.get('name') == signal_name:
return signal
|
6ec04e9d8229a32e85e1edc9ed679d89d6a12867
| 80,806
|
def find_brute(T, P):
"""Return the lowest index of T at which substring P begins (or else -1)."""
n, m = len(T), len(P) # introduce convenient notations
for i in range(n-m+1): # try every potential starting index within T
k = 0 # an index into pattern P
while k < m and T[i+k] == P[k]: # kth character of P matches
k += 1
if k == m: # if we reached the end of pattern,
return i # substring T[i:i+m] matches P
return -1 # failed to find a match starting with any i
|
914d736e3a801fd6b44bb6f10e58535cb348bcf4
| 80,813
|
def linear_decay(x0, alpha, T, t):
"""Compute the linear decay rate of quantity x at time t.
x(t) = x0 - (1-alpha) * x0 * t / T if t <= T
x(t) = alpha * x0 if t > T
Args:
x0: Initial value
alpha: Linear decay coefficient (alpha > 0)
T: Time at which to stop decaying
t: Current time
"""
if t <= T:
return x0 - (1 - alpha) * x0 * t / T
else:
return alpha * x0
|
68effbff6a15d895d599d6f21c836f1dc12d6dd0
| 80,815
|
def parse (line):
"""Parses line into an `(action, value)` pair."""
return line[0], int(line[1:])
|
1b48e5b8979fafc40129e620c15b292591f09112
| 80,816
|
def sub(x, y):
""" subtract two values and returns a value"""
return x - y
|
1ccfb2086bfb8bdc4d575fa1e78873d76a539d65
| 80,824
|
def indices_between_times(times, start, end):
"""
When provided with a list of times, a start time and an end time,
returns a tuple containing the first index where the time is greater than
'start' and the last index where the time is less than 'end'.
"""
indices = [-1, -1]
for i in range(0, len(times)):
t = times[i]
if t >= start and indices[0] is -1:
indices[0] = i
if t >= end and indices[1] is -1:
indices[1] = i - 1
return tuple(indices)
|
25af6ee6cd0e026d0779dff5897aa3d2535d7996
| 80,831
|
def describe_pressure(pressure):
"""Convert pressure into barometer-type description."""
if pressure < 970:
description = "storm"
elif 970 <= pressure < 990:
description = "rain"
elif 990 <= pressure < 1010:
description = "change"
elif 1010 <= pressure < 1030:
description = "fair"
elif pressure >= 1030:
description = "dry"
else:
description = ""
return description
|
ad43061cc5e715ac8450a746c4322401aa380b41
| 80,833
|
import torch
def Conv2dGroup(
in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: int = 1,
padding: int = 0,
bias: bool = True,
num_groups=1,
**kwargs,
):
"""A 2D convolution followed by a group norm and ReLU activation."""
return torch.nn.Sequential(
torch.nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias,
),
torch.nn.GroupNorm(num_groups, out_channels),
torch.nn.ReLU(inplace=True),
)
|
eb723c3673a263c573022681cc5b30018c8647b6
| 80,836
|
def parseOptions(userOptions, jobOptions):
"""
Verifies that user supplied options fit the criteria for a set of
job options
Args:
userOptions: a set of user supplied options
jobOptions: an option schema for a job
Returns:
a list of errors (can be empty)
options to send to the job server
"""
sendOptions = {}
errors = []
for name, info in jobOptions.items():
# grab info about the option
optionType = info["TYPE"]
required = info["REQUIRED"]
default = info.get("DEFAULT",None)
# retrieve the user value for an option
value = userOptions.get(name,None)
# handle default/required case
if value is None:
if default is not None:
value = default
else:
if required:
errors.append("The option '%s' is required" % name)
continue
# coerce the type of an option
if optionType == "string":
value = str(value)
elif optionType == "int":
value = int(value)
elif optionType == "float":
value = float(value)
sendOptions[name] = value
return errors, sendOptions
|
166ce216cbe511b5a2c25e6b928b94e0ffd1f66e
| 80,837
|
def _create_issue_search_results_json(issues, **kwargs):
"""Returns a minimal json object for Jira issue search results."""
return {
"startAt": kwargs.get("start_at", 0),
"maxResults": kwargs.get("max_results", 50),
"total": kwargs.get("total", len(issues)),
"issues": issues,
}
|
2bf918fc89a1ebf1bbedde2f65ae8527499272c2
| 80,840
|
def _full_qualified_name(obj):
""" Gets the full qualified name of an object """
klass = obj.__class__
module = klass.__module__
if module == 'builtins':
return klass.__qualname__ # avoid outputs like 'builtins.str'
return f'{module}.{klass.__qualname__}'
|
d062c119cea05b6a58a17ae85afa2505c210ea05
| 80,845
|
def get_yes_or_no_input(prompt):
"""
Prompts the user for a y/n answer
returns True if yes, and False if no
:param prompt: (String)
prompt for input.
:return: (Boolean)
True for Yes
False for No
"""
while True:
value = input(prompt + " [y/n]: ")
if value == 'y':
return True
elif value == 'n':
return False
else:
print("Input not accepted, please try again\n")
|
f1796ee4cd648c2f3ccfd62ea0ad6463813c084f
| 80,846
|
def make_multi_simple(edge_list):
"""
Takes the edge list of a graph and returns the simple graph obtained by removing loops and multiple edges
"""
new_edges = sorted(set([tuple(sorted(e)) for e in edge_list])) # slow remove duplicates
new_edges = list(filter(lambda e: e[0] != e[1], new_edges)) # remove loops
return new_edges
|
e8b3c615b1378f8d534567449c0da5959562e2e7
| 80,847
|
def month2label(month):
"""
Convert month to four season numbers
:param month:
:return:
"""
if month in [1,2,3]:
return 1
elif month in [4,5,6]:
return 2
elif month in [7,8,9]:
return 3
else:
return 4
|
ed1ba43094648b8ba2c96f5afd33fedc8624d9f9
| 80,853
|
import random
import string
def get_tmp_suffix(length=None):
"""
Returns random filename extension as a string of specified length
"""
length = 10 if length is None else length
return "." + "".join(random.choices(string.ascii_uppercase + string.digits, k=length))
|
cd3c67ccadc375b34dbc97ed299182dfadc98328
| 80,854
|
from typing import Any
from typing import Iterable
def objectIsObjectIterable(o: Any) -> bool:
"""Decide whether o is an iterable of objects.
like typing.Iterable, but more restricted for serializabiliy checking.
:return: True if o is of a type considered to be an iterable of objects, but not a string
:rtype: False
"""
return isinstance(o, Iterable) and not isinstance(o, str)
|
8c29d7f96914be56e2ed645c37034f6fb265ea6b
| 80,857
|
def string_fraction(numerator, denominator):
"""
Format a fraction as a simplified string.
"""
if denominator == 1:
return f"{numerator}"
else:
return f"{numerator}/{denominator}"
|
c0449d66cb90246ef204f9aa512e31eed8f3989e
| 80,862
|
def _get_serial_bitrate(config):
"""
Get the serial port bitrate to be used for the tests.
"""
return config.getoption("--serial-bitrate")
|
4efc298f19c16d2e7cda9eaf280127f8b642bd9d
| 80,863
|
def process_string(string):
"""
# strip() method removes whitespace, \n, \t at the beginning and end (both sides) of a string
:param string: any string
:return: processed string
"""
string = string.strip()
string = string.strip('\n')
string = string.strip('\t')
string = string.strip()
return string
|
b519784fdc4ce7b2dbed320aada4e920ac0df0ff
| 80,867
|
def is_greyscale_palette(palette):
"""Return whether the palette is greyscale only."""
for i in range(256):
j = i * 3
if palette[j] != palette[j + 1] != palette[j + 2]:
return False
return True
|
4a8473736a7ad77cb6a2330d09b344ce5ad4ba5d
| 80,870
|
def decimal_to_hexadecimal(decimal: int)-> str:
"""Convert a Decimal Number to a Hexadecimal Number."""
if not isinstance(decimal , int):
raise TypeError("You must enter integer value")
if decimal == 0:
return '0x0'
is_negative = '-' if decimal < 0 else ''
decimal = abs(decimal)
chars = '0123456789abcdef'
hex: list[str] = []
while decimal > 0:
hex.insert(0 , chars[decimal % 16])
decimal //= 16
return f'{is_negative}0x{"".join(hex)}'
|
feb4c7e1f93527f140f3a037c535711d9d65df88
| 80,872
|
def search_services(query, services, quiet):
"""
Search map services for the given query string.
query is a string to search for in featureclasses, databases, maps, or service names
services is a list of MapService objects to search through
quiet is a value in [0, 1, 2] that determines what to return:
0: all info
1: name and url
2: just url
returns a list of string representations of the matching MapService objects.
"""
cards = set()
for svc in services:
if (svc.uses_feature(query) or svc.uses_database(query) or
query.upper() in svc.mxd.upper() or query.upper() in svc.name.upper()):
if quiet >= 2:
cards.add(svc.veryquiet_repr())
elif quiet == 1:
cards.add(svc.quiet_repr())
else:
cards.add(repr(svc))
return list(cards)
|
df92cb9a2eee9425efb8ceaa9ad6a25b34d00b12
| 80,873
|
def add_dicts(d1, d2):
"""
Function takes two dictionaries and merges those to one dictionary by adding values for same key.
"""
if not d2:
return d1
for _k in d2:
if _k in d1:
d1[_k] += d2[_k]
else:
d1[_k] = d2[_k]
return d1
|
7d8b07fdfc5b84f873180bac7186c5c48be3747a
| 80,874
|
def select_nodes(batch_data, roots):
"""
Run 'select' in MCTS on a batch of root nodes.
"""
nodes = []
for i, root in enumerate(roots):
data = batch_data[i]
game = data[0]
state = data[1]
player_1 = data[2]
player_2 = data[3]
root = roots[i]
player = player_1 if game.player(state) else player_2
nodes.append(player.select(root))
return nodes
|
bf6684814314b78200e115fdbb245c0a861fd74a
| 80,876
|
def ParentId(tpe, id):
"""
A criterion used to search for records by their parent's id. For example
* search for observables by case id
* search for tasks by case id
* search for logs by task id
* search for jobs by observable id
Arguments:
tpe (str): class name of the parent: `case`, `case_task`, `case_artifact`...
id (str): the parent id's value
Returns:
dict: JSON repsentation of the criterion
```python
# Query to search for tasks of a case by id
query = ParentId('case', '1234545643')
```
produces
```json
{
"_parent": {
"_type": "case",
"_id": "1234545643"
}
}
```
"""
return {'_parent': {'_type': tpe, '_id': id}}
|
ca94147e2c750b5e6b0a4b2d37c520d6682e90cb
| 80,878
|
def oct2hex(x):
"""
Convert octal string to hexadecimal string.
For instance: '32' -> '1a'
"""
return hex(int(x, 8))[2:]
|
78dee661443be2ba2b2e39a21746a3da3cf6ec5c
| 80,882
|
def extract_data(df, filter_missing=True):
"""Extract data from evalmetrics output.
An evalmetrics output contains the data as well as metadata fields
associated with the data. This function takes a df containing evalmetrics
output fields as columns, applies cleaning and conforming, and extracts
the data series containing just the data field.
"""
if filter_missing:
df = df[df["missing"] != 100]
return df["data"]
|
81697ab4bd906bac68f4755b8ff04cd511d7b480
| 80,886
|
def remove_query_string(url):
"""
Returns url without any query string parameters.
"""
return url.split("?")[0]
|
f99a607e68e9e086f3c0f3296806dba8979a45fd
| 80,897
|
def train_test_valid_split(ts, prediction_length):
"""
This function slices input Time Series ts into train, test and validation set with the following ratio:
* Training set will start at the beginning of the input time series and is truncated right before
2*prediction_length elements from the end.
* Test set will start from the beginning of the time series and stops at exactly prediction_length elements
from the end of the input time series. Hence, it will include the training set and it will add exactly
prediction_length more to it.
* Validation set will be the last prediction_length elements from the input time series.
:param ts: Time series to be predicted
:param prediction_length: prediction length to be used for data splitting
:return: train set, test set and validation set
"""
train_size = int(len(ts) - prediction_length * 2)
test_size = int(len(ts) - prediction_length)
valid_size = prediction_length
ts_train, ts_test, ts_valid = ts[0:train_size].copy(), ts[0:test_size].copy(), ts[test_size:].copy()
return ts_train, ts_test, ts_valid
|
cdd49a9bcdf02f42c74cb80939f8b17f0ff85b24
| 80,899
|
def get_sail_angle(awa):
"""Get the sail angle from an apparent wind angle."""
assert isinstance(awa, float) or isinstance(awa, int)
if awa <= 180:
sail_angle = awa / 2
else:
sail_angle = (360 - awa) / 2
return round(sail_angle, 2)
|
554dfe1f4c2be1b9781efcac13255ea04b8d02d0
| 80,915
|
def decimal_converter(amount: int = 0, decimals: int = 18) -> str:
"""
Moves the decimal point for token amount.
Parameters
----------
amount: int
The amount of tokens.
decimals: int
The decimals of the token.
Returns
-------
str
Amount of tokens divided by 10 to the power of `decimals`.
"""
return "{:,.18f}".format(amount / 10 ** decimals)
|
ca3f4eb15c9a59b41f371f4c639650e17b9934fb
| 80,918
|
def molarity_to_normality(nfactor: int, moles: float, volume: float) -> float:
"""
Convert molarity to normality.
Volume is taken in litres.
Wikipedia reference: https://en.wikipedia.org/wiki/Equivalent_concentration
Wikipedia reference: https://en.wikipedia.org/wiki/Molar_concentration
"""
return round(moles / volume * nfactor)
|
977fecee48a6964b17b14448255f81b1ba177da3
| 80,919
|
def expand_2d_to_3d(file_data_2d, lens):
"""
Restore the 2D file data back to 3D.
Args:
file_data_2d: A 2D nested list
lens: A list of integers recording the number of files in each CL.
Returns:
A 3D nested list.
"""
restore_3d_file_data = []
prefix_train = 0
for l in lens:
restore_3d_file_data.append(
file_data_2d[prefix_train:prefix_train + l].tolist())
prefix_train += l
return restore_3d_file_data
|
a03a2a1faed79b1433046e92de20bc801e2bdcba
| 80,920
|
from typing import Sequence
def decode_chunk_path(chunk_path: str) -> Sequence[int]:
"""Split a string chunk path into integer indices"""
parts = chunk_path.split(".")
int_parts = [int(x) for x in parts]
return int_parts
|
4428ec3c8348d568b9bd326b5dcf7f3c25c807f9
| 80,924
|
def apply_headers(response):
"""Add headers to each response."""
response.headers["X-Thoth-Version"] = "v0.6.0-dev"
return response
|
a7a603e470b1bff0937b2da467c5ed4a3daf2769
| 80,925
|
import textwrap
import base64
def _bytes_to_pem_str(der_bytes, pem_type):
"""
Utility function for creating PEM files
Args:
der_bytes: DER encoded bytes
pem_type: type of PEM, e.g Certificate, Private key, or RSA private key
Returns:
PEM String for a DER-encoded certificate or private key
"""
pem_str = ""
pem_str = pem_str + "-----BEGIN {}-----".format(pem_type) + "\n"
pem_str = pem_str + "\r\n".join(textwrap.wrap(base64.b64encode(der_bytes).decode('ascii'), 64)) + "\n"
pem_str = pem_str + "-----END {}-----".format(pem_type) + "\n"
return pem_str
|
75a6963f31c405b9ef38dd33be7e08e1f779ea3d
| 80,926
|
import time
def unixtime(msg):
"""Get the unix timestamp from a spark message object"""
t = time.strptime(msg.created, '%Y-%m-%dT%H:%M:%S.%fZ')
return int(time.mktime(t))
|
17ca1c2d8315a06837c329f6ea3f13523a090a2f
| 80,928
|
def mask_to_range(mask):
"""converts a boolean mask into a range string,
e.g. [True,True,True,False,True] -> '1-3,5'"""
rangespec = []
i = 0
while i < len(mask):
if not mask[i]:
i = i+1
continue
j = i+1
while j < len(mask) and mask[j]:
j = j+1
j = j-1
if i != j:
rangespec.append('%i-%i' % (i+1,j+1))
else:
rangespec.append('%i' % (i+1))
i = j+1
return ','.join(rangespec)
|
3357543bc0f7ecbd7db7f336c8c3a28afff31794
| 80,930
|
def parse_response_status(status: str) -> str:
"""Create a message from the response status data
:param status:
Status of the operation.
:return:
Resulting message to be sent to the UI.
"""
message = status
if status == 'SUCCESS':
message = "Face authentication successful"
elif status == 'NEW_USER':
message = "Face signup successful"
elif status == 'USER_NOT_FOUND':
message = "User not registered"
elif status == 'FAILED':
message = "Face authentication failed"
return message
|
7eab8fa4b115d79c014070fd78d7d088011bf226
| 80,931
|
def supplement_flag_str(name,entry):
""" Generate flag for newly-assigned committee member.
Arguments:
name (str): Faculty name
entry (dict): Student entry
Returns:
(str): flag string
"""
value = "#" if (name in entry.get("supplement_committee",set())) else ""
return value
|
811d7106edb61717568ff96f205702e5b3a968bf
| 80,932
|
def readFile(fileName):
"""Read all file lines to a list and rstrips the ending"""
try:
with open(fileName) as fd:
content = fd.readlines()
content = [x.rstrip() for x in content]
return content
except IOError:
# File does not exist
return []
|
f44d8230990c9a1388161381e0cf5903dfb530c7
| 80,935
|
from typing import Dict
def json_to_selector(selectors: Dict[str, str]) -> str:
"""Convert a json dict into a selector string."""
return ', '.join(f"{k}={v}" for k, v in selectors.items())
|
7e7f275e2f805cc968709a3c7825223482295d7d
| 80,936
|
def name_string_cleaner(record):
"""
Pandas Helper function to clean "name" column.
Args:
record (str): Strings in "name" column.
Returns:
str: Cleaned string.
"""
if "MIDAS" in str(record):
return "midas"
elif "TMU" in str(record):
return "tmu"
elif "TAME" in str(record):
return "tame"
elif "Legacy Site" in str(record):
return "Legacy Site"
else:
return record
|
b6db22384517880e55524dfe585d90f5887e856a
| 80,942
|
import csv
def read_delim(path):
"""Read in tab delimited file."""
data = []
with open(path) as handle:
myreader = csv.reader(handle, delimiter='\t')
data = list(myreader)
return data
|
b487c6c070fe8b5094d0c9d14c2b3453a2e84f24
| 80,946
|
import re
def check_money_words(line):
"""
Return true this line contains words that reference money
:param line: the line to check
:return: True if this line references money
"""
m = re.compile('(gem|orb|bloodsilver)').search(line)
if m:
return True
return False
|
e3d61783515cbdd28939489d77977d936a6d1520
| 80,949
|
import torch
def compute_class_weight(n_samples, n_classes, class_bincount):
"""
Estimate class weights for unbalanced datasets.
Class weights are calculated by: n_samples / (n_classes * class_sample_count)
:param n_samples:
:param n_classes:
:param class_bincount:
:return:
"""
return torch.FloatTensor(n_samples / (n_classes * class_bincount))
|
1f8f02bee178cfab957d939bce543ae015e39407
| 80,953
|
def inventory_report(products):
"""Print a summary of the list of products
"""
product_count = len(products)
if product_count <= 0:
return "No products!"
total_price, total_weight, total_flam = 0, 0, 0
for prod in products:
total_price += prod.price
total_weight += prod.weight
total_flam += prod.flammability
avg_price = total_price / product_count
avg_weight = total_weight / product_count
avg_flam = total_flam / product_count
print("ACME CORPORATION OFFICIAL INVENTORY REPORT")
print("Unique product names:", len(set(products)))
print("Average price:", avg_price)
print("Average weight:", avg_weight)
print("Average flammability:", avg_flam)
|
35172b62de6f945de3fcd287bf268d703744a456
| 80,954
|
import csv
def write_spds_to_csv_file(spds,
path,
delimiter=',',
fields=None):
"""
Writes the given spectral power distributions to given *CSV* file.
Parameters
----------
spds : dict
Spectral power distributions to write.
path : unicode
Absolute *CSV* file path.
delimiter : unicode, optional
*CSV* file content delimiter.
fields : array_like, optional
*CSV* file spectral data fields names. If no value is provided the
order of fields will be the one defined by the sorted spectral power
distributions *dict*.
Returns
-------
bool
Definition success.
Raises
------
RuntimeError
If the given spectral power distributions have different shapes.
"""
shapes = [spd.shape for spd in spds.values()]
if not all(shape == shapes[0] for shape in shapes):
raise RuntimeError(('Cannot write spectral power distributions with '
'different shapes to "CSV" file!'))
wavelengths = tuple(spds.values())[0].wavelengths
with open(path, 'w') as csv_file:
fields = list(fields) if fields is not None else sorted(spds.keys())
writer = csv.DictWriter(csv_file,
delimiter=str(delimiter),
fieldnames=['wavelength'] + fields)
# Python 2.7.x / 3.4.x only.
# writer.writeheader()
writer.writerow(dict((name, name) for name in writer.fieldnames))
for wavelength in wavelengths:
row = {'wavelength': wavelength}
row.update(
dict((field, spds[field][wavelength]) for field in fields))
writer.writerow(row)
return True
|
8ab38b2b91a2e81ca3b18eb5528e5050d795b32d
| 80,956
|
def df_fn(s, mobi_fn):
""" Derivative (element-wise) of water fractional flow
Parameters
----------
s : ndarray, shape (ny, nx) | (ny*nx,)
Saturation
mobi_fn : callable
Mobility function lamb_w, lamb_o, dlamb_w, dlamb_o = mobi_fn(s, deriv=True) where:
lamb_w : water mobility
lamb_o : oil mobility
dlamb_w : derivative of water mobility
dlamb_o : derivative of oil mobility
"""
lamb_w, lamb_o, dlamb_w, dlamb_o = mobi_fn(s, deriv=True)
return dlamb_w / (lamb_w + lamb_o) - lamb_w * (dlamb_w + dlamb_o) / (lamb_w + lamb_o)**2
|
1658a604d2c531c371f7536002a1f8d074b2a842
| 80,957
|
def exclude_by_dict(text, known_words):
"""
Determines whether text is not good enough and should be excluded.
"Good enough" is defined as having all its words present in the
`known_words' collection."""
return not all(map(lambda word: word in known_words, text.split()))
|
4129ea2430537f6b16fb4d987deff1257354fc1f
| 80,959
|
def _f3_matrix_multiplication(matrix, backend):
"""
Computes X @ X^H @ X and X^H @ X.
"""
matrix_dag_matrix = backend.matmul(
matrix.conj(), matrix, transpose_A=True, transpose_B=False)
matrix3 = backend.matmul(
matrix, matrix_dag_matrix, transpose_A=False, transpose_B=False)
return matrix3, matrix_dag_matrix
|
fccfad4e8732f22dbbbc3037268fc9e1e863f83a
| 80,960
|
def score_defaults(gold_labels):
"""Calculates the "all false" baseline (all labels as unrelated) and the max possible score.
Parameters
----------
gold_labels : Pandas DataFrame
DataFrame with the reference GOLD stance labels.
Returns
-------
null_score : float
The score for the "all false" baseline.
max_score : float
The max possible score.
"""
unrelated = [g for g in gold_labels.itertuples() if g.Stance == 'unrelated']
null_score = 0.25 * len(unrelated)
max_score = null_score + (len(gold_labels) - len(unrelated))
return null_score, max_score
|
876d47c60c53d6532f99632d4350fed9bbd83e92
| 80,964
|
def get_sequence_start(mmap, position):
"""
Get start of sequence at position (after header)
"""
return mmap.find(b"\n", position + 1)
|
40c8e78c4aadc899fc5d46bdafb98c8aadcb2cd8
| 80,967
|
def merge_small_dims(var_shape, reshape_size):
"""Computes the shape of the variable for preconditioning.
If the variable has several small dimensions, we can reshape it so
that there are fewer big ones. e.g for a convolution (512, 5, 5, 1024)
we could reshape it into (512, 25, 1024).
Args:
var_shape: the shape of the variable
reshape_size: maximum size of a reshaped dimension
Returns:
shape: a list of integers. Product(shape) = number of elements in var.
"""
shape = []
product = 1
for d in var_shape:
if product * d <= reshape_size:
product *= d
else:
if product > 1:
shape.append(product)
product = d
if product > 1:
shape.append(product)
return shape
|
268e69cae2fc4b1aab78e519eb735453fd774f25
| 80,976
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.