content stringlengths 39 14.9k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def unpack_vlq(data):
"""Return the first VLQ number and byte offset from a list of bytes."""
offset = 0
value = 0
while True:
tmp = data[offset]
value = (value << 7) | (tmp & 0x7f)
offset += 1
if tmp & 0x80 == 0:
break
return value, offset | b459327c50d806e5c4bd251fa3f849a8680b7f27 | 109,857 |
import time
import socket
def wait_for_service(host, port, timeout):
"""Waits for a service to start listening on a port.
Args:
host: Hostname where the service is hosted.
port: Port where the service is expected to bind.
timeout: Time in secs to wait for the service to be available.
Returns:
False, if the timeout expires and the service is unreachable, True otherwise.
"""
start = time.time()
while True:
try:
s = socket.create_connection((host, int(port)), float(timeout))
s.close()
return True
except socket.error:
pass
time.sleep(1)
if time.time() - start > timeout:
return False | 4cdccb02fe6b66ce2710155b229b76dd60c4087a | 109,858 |
def policy_to_mitigation(s):
"""
We have defined suppression as 1=unmitigated. For display we usually want
mitigated = (1 - suppression). This converts that string repr.
Parameters
----------
s: str
String to convert. Structure is e.g. suppression_policy__1.0
Returns
-------
mitigation: str
Mitigation display.
"""
return f'{100 * (1 - float(s.split("__")[1])):.0f}% Mitigation' | 0634cffc5fb7fb59e57048b9bf5d32354a693f98 | 109,859 |
from typing import Tuple
def rect_area(a: Tuple[int, int, int, int]) -> int:
"""helper function for calculate rectangle area
Args:
a: <List[int]> rectangle
Returns:
<int> calculated rectangle area
"""
return a[2] * a[3] | 1f59be9abbbe58ecd66798e498f742048c9a4dd9 | 109,862 |
def cleanup_uri(uri):
"""remove angle brackets from a URI, if any"""
if uri.startswith('<') and uri.endswith('>'):
return uri[1:-1]
return uri | 7ea9fbc76216d2a599d04f0d1b7cef310bd61d94 | 109,865 |
def ndims(x):
"""
Return the ndims of a tensor.
"""
return len(x.shape) | c72af6dcd49d4ebebac7d9a346efe72d63fb9cc8 | 109,870 |
def normal_percentile_to_label(percentile):
"""
Assigns a descriptive term to the MMSE percentile score.
"""
if percentile >= 98:
return 'Exceptionally High'
elif 91 <= percentile <= 97:
return 'Above Average'
elif 75 <= percentile <= 90:
return 'High Average'
elif 25 <= percentile <= 74:
return 'Average'
elif 9 <= percentile <= 24:
return 'Low Average'
elif 2 <= percentile <= 8:
return 'Below Average'
elif percentile < 2:
return 'Exceptionally Low' | 7c2e92ee51b00308f10f14e89869cb339ee551b1 | 109,871 |
def total_seconds(delta):
"""Determines total seconds with python < 2.7 compat."""
# http://stackoverflow.com/questions/3694835/python-2-6-5-divide-timedelta-with-timedelta
return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 1e6) / 1e6 | 77984c21738879d4f201a5ce55a63d1d88de3991 | 109,872 |
def determine_duration_and_scale_factor_from_parameters(chirp_mass):
"""Determine appropriate duration and roq scale factor from chirp mass
Parameters
----------
chirp_mass: float
The chirp mass of the source (in solar masses)
Returns
-------
duration: int
roq_scale_factor: float
"""
roq_scale_factor = 1
if chirp_mass > 90:
duration = 4
roq_scale_factor = 4
elif chirp_mass > 35:
duration = 4
roq_scale_factor = 2
elif chirp_mass > 13.53:
duration = 4
elif chirp_mass > 8.73:
duration = 8
elif chirp_mass > 5.66:
duration = 16
elif chirp_mass > 3.68:
duration = 32
elif chirp_mass > 2.39:
duration = 64
elif chirp_mass > 1.43:
duration = 128
elif chirp_mass > 0.9:
duration = 128
roq_scale_factor = 1 / 1.6
else:
duration = 128
roq_scale_factor = 1 / 2
return duration, round(1 / roq_scale_factor, 1) | 51c8e405d95816b48fde6a071847d2cbf275868b | 109,876 |
import mpmath
def sf(x, mu=0, sigma=1):
"""
Log-normal distribution survival function.
"""
if x <= 0:
return mpmath.mp.one
lnx = mpmath.log(x)
return mpmath.ncdf(-lnx, -mu, sigma) | 1c5b801e488c5cc901b2e5d2ad0a3140540a190a | 109,877 |
def draw(cards, deck):
""" draws a single card and adds it to hand
:param cards:
:return: cards + one card
"""
cards += (deck.pop(),)
return cards | ee010f6b00094f6cde6f3e9d918746126db6a04e | 109,878 |
from typing import Tuple
def _split_sampling_rate_byte_11_2(sampling_rate_byte: int) -> Tuple[int, int]:
"""Separate sampling rate into its own byte."""
return sampling_rate_byte & 0x0F, sampling_rate_byte & 0xF0 | 126e64fe194802661e012c4d01f06a654ff96d0f | 109,879 |
def listToCol(lst):
"""
Converts list to column vector
"""
return [[item] for item in lst] | 137bb0661d54ef8e2f6f85786cb6c82434a0e772 | 109,882 |
def dimension_to_bounds(x, y, width, height):
"""
Convert a positioned rectangular section into a bounding box coordinates
indicating the top left and bottom right.
:param x: x position or rectangular section
:param y: y position or rectangular section
:param width: width of section
:param height: height of section
:return: a tuple representing the top left and bottom right corners
(x1, y1, x2, y2)
"""
return x, y, x + width, y + height | 3cafaab201ae4256d7204d2f545f4e263859433c | 109,886 |
import binascii
def is_gz_file(file_path: str):
"""
Tests to see if a file is gzipped or not. This was taken from
https://stackoverflow.com/questions/3703276/how-to-tell-if-a-file-is-gzip-compressed
Returns
-------
True if it is gzipped, False otherwise.
"""
with open(file_path, 'rb') as f:
return binascii.hexlify(f.read(2)) == b'1f8b' | bd20a584877b9981cf3244453cc5ac804e620c4e | 109,887 |
def Lobify (data,group,formants):
"""
Applies Lobanov (z-score) normalization to vowels according to a grouping variable
Adds a column to original dataframe for each formant, with zsc prefixed to the formant name
--
Required parameters:
data = dataframe containing data to transform
group = string matching the name of column to normalize by, for example "speaker"
formants = list of strings matching the names of columns to be normalized
"""
zscore = lambda x: (x - x.mean()) / x.std()
for formant in formants:
name = str("zsc"+formant)
col = data.groupby([group])[formant].transform(zscore)
data.insert(len(data.columns),name,col)
return data | 83b4a41de5267950ed56c311d19a6270ca4acc29 | 109,894 |
def dfunc(variable_x):
"""
目标函数一阶导数也即是偏导数实现
args:
variable_x: 目标函数
return:
2 * variable_x: 目标函数一阶导数
"""
return 2 * variable_x | b7caca1ba686644c8b67d958eadd3944a9ee8f12 | 109,895 |
import re
def remove_trailing_spaces(code):
"""
Removes spaces from end of lines.
For example, remove_trailing_spaces('hello \nworld \n') -> 'hello\nworld\n'.
"""
return re.sub(' +$', '', code, flags=re.MULTILINE) | b6db3b9dfe8458df540aad06f7a735e51f90224b | 109,897 |
def get_cities() -> list:
"""
Open Craigslist cities from text
Args:
None
Return: list
"""
results = []
with open('src/craigslist_cities.txt', 'r', encoding='utf8') as file:
for line in file:
results.append(line.strip())
return results | 4a53a346df2d0ab4f3839cd26a34432ea8c6bf5c | 109,901 |
def wpcsys(self, wn="", kcn="", **kwargs):
"""Defines the working plane location based on a coordinate system.
APDL Command: WPCSYS
Parameters
----------
wn
Window number whose viewing direction will be modified to be normal
to the working plane (defaults to 1). If WN is a negative value,
the viewing direction will not be modified.
kcn
Coordinate system number. KCN may be 0,1,2 or any previously
defined local coordinate system number (defaults to the active
system).
Notes
-----
Defines a working plane location and orientation based on an existing
coordinate system. If a Cartesian system is used as the basis (KCN)
for the working plane, the working plane will also be Cartesian, in the
X-Y plane of the base system. If a cylindrical, spherical, or toroidal
base system is used, the working plane will be a polar system in the
R-θ plane of the base system.
If working plane tracking has been activated (CSYS,WP or CSYS,4), the
updated active coordinate system will be of a similar type, except that
a toroidal system will be updated to a cylindrical system. See the
Modeling and Meshing Guide for more information on working plane
tracking.
This command is valid in any processor.
Some primitive generation commands will not honor R-theta
transformations for non-cartesian coordinate systems. Refer to the
primitive commands table for more information.
"""
command = f"WPCSYS,{wn},{kcn}"
return self.run(command, **kwargs) | e1a7111da7de39623be97a513afb75bbb4849293 | 109,907 |
import socket
import errno
def checksocketfamily(name, port=20058):
"""return true if we can listen on localhost using family=name
name should be either 'AF_INET', or 'AF_INET6'.
port being used is okay - EADDRINUSE is considered as successful.
"""
family = getattr(socket, name, None)
if family is None:
return False
try:
s = socket.socket(family, socket.SOCK_STREAM)
s.bind(("localhost", port))
s.close()
return True
except socket.error as exc:
if exc.errno == errno.EADDRINUSE:
return True
elif exc.errno in (errno.EADDRNOTAVAIL, errno.EPROTONOSUPPORT):
return False
else:
raise
else:
return False | c9d9746a9cef28d7d4f289a77aad1c0ad84b5c4d | 109,908 |
def mean(feature_vector):
"""
:param feature_vector: List of integer/float/double..
:return: Mean of the feature vector.
"""
return sum(f for f in feature_vector)/len(feature_vector) | 5ed9dfa0d54cc6fa2a882c32e52ba41be67ff3d3 | 109,911 |
def dict_removeSafe(dict, key):
"""Tries to remove `key`. If there is no `key`, ignore"""
if (not key in dict):
return None
return dict.pop (key) | 4d2d13de4f65bc95f47476e118575fc06382bcc0 | 109,916 |
def bytedata(a):
"""Return a the raw data corresponding to a."""
if isinstance(a, (bytearray, bytes, memoryview)):
return a
elif hasattr(a, "data"):
return a.data
else:
raise ValueError(a, "cannot return bytedata") | 4702f54abae5014c7263afeb2b83c90244581c49 | 109,920 |
def kib_to_gib(size: float) -> float:
"""Convert disk space unit from KiB to GiB."""
return round(size / 2 ** 20, 2) | c3daecfef75df3051d504e0a8df03b3970dba709 | 109,923 |
def get_attr_cols(df, non_attr_cols):
"""
:param df: A data frame of model results
:param non_attr_cols: Names of columns not associated with attributes
:return: List of columns associated with sample attributes
"""
# index of the columns that are associated with attributes
attr_cols = df.columns[~df.columns.isin(non_attr_cols)]
if attr_cols.empty:
raise ValueError
return attr_cols.tolist() | 2e7d01664e6338f67deb7b9c8ca53b5ba4a53220 | 109,924 |
def quaternion_conjugate(q):
"""
Computes quaternion conjugate
:param q: Quaternion
:return:
"""
return [q[0], -q[1], -q[2], -q[3]] | 5c3cb680cf5ad15c9ee7f996d3cb9ff0af93a9e4 | 109,926 |
def _cbrange_user_defined(cbrange, cbrange_user):
"""Set user-specified colorbar range.
Parameters:
cbrange (list):
Input colorbar range.
cbrange_user (list):
User-specified colorbar range. If a value is None, then use the
previous value.
Returns:
list: Colorbar range.
"""
for i in range(2):
if cbrange_user[i] is not None:
cbrange[i] = cbrange_user[i]
return cbrange | b9aeb3074c2a85f0e4c4859d1ffd799cceed5394 | 109,929 |
def _is_overlap(i, j, used_spans):
"""
Returns whether the span (i, j) overlaps with the interval(s) described by the items in `used_spans`
"""
minval = min([i] + [s[0] for s in used_spans]) # smallest start index among spans
maxval = max([j] + [s[1] for s in used_spans]) # largest end index among spans
used_words = [0]*(maxval - minval + 1)
for (span_start, span_end) in used_spans:
assert used_words[span_start-minval: span_end-minval+1] == [0]*(span_end-span_start+1) # check that no span has covered these words, which should be the case because spans aren't allowed to overlap
used_words[span_start-minval: span_end-minval+1] = [1]*(span_end-span_start+1) # (inclusive) interval has now been covered by a span
return sum(used_words[i-minval: j-minval+1]) > 0 | 883fb70917d31d0c222f630440af235dfcc12c2f | 109,934 |
import math
def distance(x1, y1, x2, y2):
"""
Given two ordered pairs calculate the distance between them.
Round to two decimal places.
"""
return round(math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2) * 1.0),2) | 5679b964d2ddf1a6b9d74a8445126b34a23e5162 | 109,938 |
def clamp(number: int, max_value: int, min_value: int) -> int:
""" Clamp value into a range """
return max(min(number, max_value), min_value) | addfdea377cdf40501ef3aeabbb421316b0083d8 | 109,950 |
def get_year_filename(file):
"""Get the year from the datestr part of a file."""
date_parts = [int(d) for d in file.split('.')[-2].split('-')]
return date_parts[0] | f546e25dccc2e51093a0215125f20b26faf092ca | 109,951 |
def format_bytes(count):
"""
Format bytes in human-readable format
"""
for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB']:
if abs(count) < 1024.0:
return f"{count:3.1f} {unit}"
count /= 1024.0
return f"{count:.1f} YiB" | f734a20584667f724af90e4fa8ce0f873f7a09a0 | 109,952 |
def get_accuracy(output, targets):
"""calculates accuracy from model output and targets
"""
output = output.detach()
predicted = output.argmax(-1)
correct = (predicted == targets).sum().item()
accuracy = correct / output.size(0) * 100
return accuracy | 692b605224f412d397bdce44a90b518800176322 | 109,954 |
from socket import inet_aton
from struct import unpack
def ip2long(ip_addr):
"""Converts an IP address string to an integer."""
ip_packed = inet_aton(ip_addr)
ip = unpack('!L', ip_packed)[0]
return ip | 7a5fd921468e388579c20e05e9afb49f9dc21302 | 109,955 |
from typing import Iterable
from typing import Dict
def get_tag(tags: Iterable[Dict[str, str]], key: str) -> str:
"""Takes a list of tags and a key name, returns the the value for the tag with the given key name."""
if not tags:
return ''
name_tag = filter(lambda t: t['Key'] == key, tags)
return next(map(lambda t: t['Value'], name_tag), '') | 597afd98294c51df6b0b863bfbce031fe54501d5 | 109,957 |
def dim_comparator(val1, val2, name1, name2, dim_string, tolerance=0):
"""
Get the string representation of the relations.
Args:
val1: dimensional value of object 1 (int).
val2: dimensional value of object 2 (int).
name1: name of object 1 (str).
name2: name of object 2 (str).
dim_string: dimension being compared (str).
tolerance: level of tolerance for the comparator (int).
Return:
A string representing the correct comparative relation between the objects
in the dimension.
"""
diff = val1 - val2
if abs(diff) <= tolerance: # val1 == val2
return 's_' + dim_string + '(' + str(name1) + ',' + str(name2) + ')'
elif diff > tolerance: # val1 > val2
return 'm_' + dim_string + '(' + str(name1) + ',' + str(name2) + ')'
elif diff < -tolerance: # val1 < val2
return 'l_' + dim_string + '(' + str(name1) + ',' + str(name2) + ')' | 1dde9de76411ffbd284887e6fcb6d7611e51ba56 | 109,958 |
def ER(radius):
"""
Return equivalent radius (ER)
"""
return radius | 8c1ac8f11c9cf5ba0c3463693b1bf554f39757d8 | 109,959 |
def get_subcoll_obj_paths(coll):
"""Return paths to all files within collection and its subcollections
recursively"""
ret = []
for sub_coll in coll.subcollections:
ret += get_subcoll_obj_paths(sub_coll)
for data_obj in coll.data_objects:
ret.append(data_obj.path)
return ret | 75716ca9b949ce78f7f73749aa8e7092b367cfa1 | 109,964 |
def _is_hdf5_filepath(filepath):
"""Predicate the filepath is a h5 file."""
return (filepath.endswith('.h5') or filepath.endswith('.hdf5') or
filepath.endswith('.keras')) | 2be4518b2a83d93642373d0ec8663cba73df69cd | 109,965 |
def termlists_are_equal(terms1, terms2):
""" Check whether two given lists of terms are (syntactic-wise) equal. """
return len(terms1) == len(terms2) and all(x.is_syntactically_equal(y) for x, y in zip(terms1, terms2)) | 7aefe9150529095dfd7b588f8497e135caa99e16 | 109,968 |
from functools import reduce
def class_recall(queried_dataset, top_indices, query_image_class):
"""Computes the recall for the results of the given top distances query.
Args:
queried_dataset: the dataset that was queried. It must have the exact same classes as the query dataset.
It is used to retrieve the classes for the given `top_indices`.
top_indices: a list with the indices, in the `queried_dataset`, of the most similar images to the
query image.
query_image_class: the class index (not name) of the query image.
Returns:
the class recall, which corresponds to the percentage of the retrieved results (the given most similar
images to the query image) have the same class as the query image
"""
# noinspection PyTypeChecker
return reduce(lambda part, i: part + queried_dataset[i][1] == query_image_class, top_indices, 0) / len(top_indices) | 9df81d07d8a7c951712510d65a9d9e22a370f8c2 | 109,969 |
def evaluate_var(**context):
"""Receives 2 variables, the evaluation condition and the 2 results for the case of
result is True or False
Inputs:
templates_dict:
var_in str -- value 1 to evaluate
var_eval str -- value 2 of the condition to evaluate
op_kwargs:
expr str -- must be <,>,=,!=,<=,>= represents the evaluation condition
if_true [any] -- return value if result is True
if_false [any] -- return value if result is False
Returns:
[any] -- return value defined by if_true and if_false
"""
def eval_expr(if_statement, iftrue, iffalse):
if if_statement:
return iftrue
else:
return iffalse
var_in = context['templates_dict']['var_in']
var_eval = context['templates_dict']['var_eval']
expression = context['expr']
if_true = context['id_true']
if_false = context['id_false']
if expression == '=':
return eval_expr(var_in == var_eval, if_true, if_false)
elif expression == '>=':
return eval_expr(var_in >= var_eval, if_true, if_false)
elif expression == '>':
return eval_expr(var_in > var_eval, if_true, if_false)
elif expression == '<=':
return eval_expr(var_in <= var_eval, if_true, if_false)
elif expression == '<':
return eval_expr(var_in < var_eval, if_true, if_false)
elif expression == '!=':
return eval_expr(var_in != var_eval, if_true, if_false) | 5c57943a78a3285989ed99f31bd5d9c8757d1ed8 | 109,974 |
def yDP2LP(dpY, dptZero, lPix = 1.0):
"""Convert device coordinates into logical coordinates
dpY - y device coordinate
dptZero - device coordinates of logical 0,0 point
lPix - zoom value, number of logical points inside one device point (aka pixel)
return coordinate in logical coordinates
"""
return (dptZero.y - dpY) * lPix; | 178ae2bb8f9bccaf9526ce73ec4a7703791733f5 | 109,975 |
def is_box_section(title):
"""
Box sections have specific titles: "Box N | Title goes here".
This function determines whether title belongs to a box section.
"""
if '|' not in title:
return False
return 'Box' in title.split('|')[0] | 6ad00af2a5be7acf6d5a4b8289430898e8fc0e4a | 109,978 |
def email_to_uni(email):
"""parse emails to retrieve UNIs"""
return email.split('@')[0] | 1731da481c4fb3b130771e8b108fbd71e7ed0a6c | 109,981 |
def create_fmt(field_widths, keep_fields):
"""Given two lists: 1) the field widths 2) list with a 1 or 0 indicating whether or not to keep a field,
create a fmt string
Field Widths - https://docs.python.org/3.4/library/struct.html
Format C Type Python Type Standard Size
x pad byte no value
c char bytes of length 1 1
s char[] bytes
"""
keep_fields_pos_neg = [-1 if keep == 0 else keep for keep in keep_fields]
field_widths_pos_neg = [fw*keep for fw, keep in zip(field_widths, keep_fields_pos_neg)]
fmt_string = ''.join('{}{}'.format(abs(fw), 'x' if fw == 0 else 's')
for fw in field_widths_pos_neg)
return fmt_string | 7e2caedbadf442a3f01455241a2cfad8ab55bac7 | 109,983 |
def weekday_name(day_of_week):
"""Return name of weekday.
>>> weekday_name(1)
'Sunday'
>>> weekday_name(7)
'Saturday'
For days not between 1 and 7, return None
>>> weekday_name(9)
>>> weekday_name(0)
"""
if day_of_week == 1:
return "Sunday"
elif day_of_week == 2:
return "Monday"
elif day_of_week == 3:
return "Tuesday"
elif day_of_week == 4:
return "Wednesday"
elif day_of_week == 5:
return "Thursday"
elif day_of_week == 6:
return "Friday"
elif day_of_week == 7:
return "Saturday"
return None | 839ea8ac9db8bbc46cf1ad36c93c693d55d19442 | 109,986 |
import configparser
def _read_config_file_section(section, config_file_path, items):
"""Read a configuration file section
Arguments
---------
section : str
Name of the section to read
config_file_path : str
Absolute path to the config file
items : list of str
A list of names of the config items to read
Returns
-------
dict
Keys are the item names
Raises
------
ValueError if the configuration item does not exist in the file
"""
data_paths = {}
config = configparser.ConfigParser()
config.read(config_file_path)
for item, item_type in items:
try:
if item_type == 'str':
data_paths[item] = config.get(section, item)
elif item_type == 'bool':
data_paths[item] = config.getboolean(section, item)
elif item_type == 'int':
data_paths[item] = config.getint(section, item)
except configparser.NoOptionError:
msg = "Option '{}' doesn't exist in section '{}' in '{}'"
raise ValueError(msg.format(item, section, config_file_path))
except configparser.NoSectionError:
msg = "Section '{}' doesn't exist in '{}'"
raise ValueError(msg.format(section, config_file_path))
return data_paths | ad5c89c0c84f213dfc572c6a709a9460ffa98bf8 | 109,988 |
def longest_common_prefix(s1: str, s2: str) -> str:
"""
Finds the longest common prefix (substring) given two strings
s1: First string to compare
s2: Second string to compare
Returns:
Longest common prefix between s1 and s2
>>> longest_common_prefix("ACTA", "GCCT")
''
>>> longest_common_prefix("ACTA", "ACT")
'ACT'
>>> longest_common_prefix("ACT", "ACTA")
'ACT'
>>> longest_common_prefix("GATA", "GAAT")
'GA'
>>> longest_common_prefix("ATGA", "")
''
>>> longest_common_prefix("", "GCCT")
''
>>> longest_common_prefix("GCCT", "GCCT")
'GCCT'
"""
i = 0
while i < min(len(s1), len(s2)):
if s1[i] != s2[i]:
break
i += 1
return s1[:i] | dfe955335bc97617755eae97714abf60fc61d731 | 109,990 |
import torch
def no_masker(batch_size, mask_shape):
"""Return a mask of all 1."""
mask = torch.ones((batch_size, *mask_shape)).byte()
return mask | 9ab65a27b98929441e8796740924997a681aed8f | 109,991 |
def wants_spousal_support(responses, derived):
""" Return whether or not the user wants an order for spousal support """
return 'Spousal support' in derived['orders_wanted'] | 42f485767d9f842975ba393d8455228dc9eaf3ce | 109,994 |
def zigbee2mqtt_device_name(topic, data, srv=None):
"""Return the last part of the MQTT topic name."""
return dict(name=topic.split("/")[-1]) | 09a4e5e71f51a6acabe54ef941c787867367bd8c | 109,996 |
def num_params(model, count_fixed=False, display_all_modules=False,
print_stats=False):
"""Counts number of parameters and layers in the model
_exclude_from_layer_count contains names that shouldn't be counted
as layers, shortcut layer, for example.
Args:
model (nn.Module):
count_fixed (bool): if True, modules with fixed params
(.requires_grad=False) are counted toward total_num_params too
display_all_modules (bool): if True, prints info on all of the modules
Returns:
total_num_params, total_num_layers
"""
_exclude_from_layer_count = ['shortcut']
total_num_params = 0
total_num_layers = 0
for n, p in model.named_parameters():
if p.requires_grad or count_fixed:
num_params = 1
for s in p.shape:
num_params *= s
if len(p.data.size()) > 1:
excl_in_name = len(list(filter(lambda excl: excl in n, _exclude_from_layer_count))) > 0
if not excl_in_name:
total_num_layers += 1
else:
num_params = 0
if print_stats and display_all_modules: print("{}: {}".format(n, num_params))
total_num_params += num_params
if print_stats:
print("-" * 50)
print("Total number of parameters: {:.2e}".format(total_num_params))
print("-" * 50)
print("Total number of layers: {}".format(total_num_layers))
return total_num_params, total_num_layers | fd183fc9476cd4b722f1090af64ad850da4a1e31 | 110,004 |
from typing import Any
from typing import MutableMapping
from typing import MutableSequence
def is_adjacency_list(item: Any) -> bool:
"""Returns whether 'item' is an adjacency list."""
return (isinstance(item, MutableMapping)
and all(isinstance(v, MutableSequence) for v in item.values())) | 9b0eabb451f6ab10713f55a8e08b1df76e0f7f86 | 110,005 |
def realcar(np_raster, np_filtrada):
""" Função auxiliar que incrementa um imagem filtrada de passa alta a imagem original
Args:
np_raster (np.ndarray): imagem original
np_filtrada (np.ndarray): imagem com filtro de passa alta
Returns:
(np.ndarray): imagem realçada
"""
return np_raster + np_filtrada | bf0bd9af019e754db705275410e0ee48a964dfdd | 110,006 |
import torch
def one_hot_add(inputs, shift):
"""Performs (inputs - shift) % vocab_size in the one-hot space.
Args:
inputs: Tensor of shape `[..., vocab_size]`. Typically a soft/hard one-hot
Tensor.
shift: Tensor of shape `[..., vocab_size]`. Typically a soft/hard one-hot
Tensor specifying how much to shift the corresponding one-hot vector in
inputs. Soft values perform a "weighted shift": for example,
shift=[0.2, 0.3, 0.5] performs a linear combination of 0.2 * shifting by
zero; 0.3 * shifting by one; and 0.5 * shifting by two.
Returns:
Tensor of same shape and dtype as inputs.
"""
shift = shift.type(inputs.dtype)
vocab_size = inputs.shape[-1]
# Form a [..., vocab_size, vocab_size] matrix. Each batch element of
# inputs will vector-matrix multiply the vocab_size x vocab_size matrix. This
# "shifts" the inputs batch element by the corresponding shift batch element.
shift_matrix = torch.stack([torch.roll(shift, i, dims=-1)
for i in range(vocab_size)], dim=-2)
shift_matrix = torch.transpose(shift_matrix, -1, -2)
outputs = torch.einsum('...v,...uv->...u', inputs, shift_matrix)
return outputs | 4fb16d099557c268c7854a609beb870aab97f629 | 110,008 |
def get_variables_used(string, variable_dict):
"""Returns what variables are used in the given string as a list."""
used_variables = []
for key in variable_dict:
temp_string = string.replace(key, "")
if temp_string != string:
used_variables.append(key)
string = temp_string
return used_variables | 0b242da05640617f8efca9521bbfe61ce17182dc | 110,012 |
def if_(condition_func, validation_func):
""" Generates a validator to conditionally run a validation function
Args:
condition_func: a function that evaluates to a boolean
validation_func: a validation function to apply
"""
def wrapper(obj, prop):
if not condition_func(obj):
return (True, None)
return validation_func(obj, prop)
return wrapper | a1ab0648851cde955deaa826a00cb0953e3f6f37 | 110,013 |
def _is_surf(config):
"""Returns True iff we are on the surface"""
return "surface_file" in config and config["surface_file"] | 5d88b36dc14f5b293af17123f211da3082f87788 | 110,015 |
def is_int(value):
"""Check if value is an int."""
return isinstance(value, int) | a72d0367da43b3b3b6e0ca8edb1f19c3aac671cb | 110,016 |
def constructor_is_method_name(constructor: str) -> bool:
"""Decides wheter given constructor is a method name.
Cipher is not a method name
Cipher.getInstance is a method name
getS is a method name
"""
found_index = constructor.find(".")
method_start_i = found_index + 1
return len(constructor) > method_start_i and constructor[method_start_i].islower() | ee410e1c8df218235fe60ba028dd61bb93cda67b | 110,019 |
def mk_residue(name, num, restype='X', chain=None, label=None, mol='',
color='magenta', style=None):
"""Generate the PyMol code to display a residue."""
return """
create %(label)s, %(name)s and resi %(num)s %(chain)s and not (name %(bbone)s)
show %(style)s, %(label)s
color %(color)s, %(label)s
util.cnc %(label)s
""" % {
'label': label or ("%s%s-%s" % (restype, num, mol)),
'name': name,
'chain': ('and chain %s' % chain) if chain else '',
'bbone': "c,n,o" if restype != 'P' else "c,o",
'num': num,
'color': color,
'style': style or ('sticks' if restype != 'G' else 'spheres'),
} | cccf466b281ff52b200186fc4e7f60f22878d083 | 110,022 |
import random
def large_integers(n):
""" Return sequence of N large random integers. """
return [random.randint(-50, 50) * 1000000 + random.randint(0, 10000)
for _ in range(n)] | 14dd7ea4ae9c328a872f864523b4c9fa5bf9ff6b | 110,024 |
def find_project_dir(runpath):
"""
Scans up directories until it finds the project folder.
:param runpath: pathlib.Path, where summit_core is called from.
:return: pathlib.Path, the base project directory
"""
runpath = runpath.resolve()
if runpath.name == "Summit" or runpath.name == 'summit_master':
return runpath
else:
runpath = runpath / '..'
return find_project_dir(runpath) | bb77ab81d5b3c08baa06fdabbae6afad5b120b59 | 110,025 |
def basis(x):
"""
Return the fixed basis of ``x``.
EXAMPLES::
sage: V = VectorSpace(QQ,3)
sage: S = V.subspace([[1,2,0],[2,2,-1]])
sage: basis(S)
[
(1, 0, -1),
(0, 1, 1/2)
]
"""
return x.basis() | 6293b4df223274700da485131c4533605bf969a1 | 110,029 |
import re
def is_function_decl(index, lines):
"""Check that the line is a function declaration"""
line = lines[index]
previous_line = lines[index - 1]
types = ["char", "signed char", "unsigned char", "string", "short", "short int",
"signed short", "signed short int", "unsigned short", "unsigned short int",
"int", "signed", "signed int", "unsigned", "unsigned int", "long", "long int",
"signed long", "signed long int", "unsigned long", "unsigned long int", "long long",
"long long int", "signed long long", "signed long long int", "unsigned long long",
"unsigned long long int", "float", "double", "long double", "struct", "bool", "void"]
return_types = "".join([r"(?<=%s\s)|" % x for x in types])[:-1]
previous_types = "".join(["%s|" % t for t in types])[:-1]
return (re.search(r"\(", line) and
re.search(r"^[^<;>]+$", line) and
re.search(r"^[^<:>]+$", line) and
not re.search(r"^\#", line) and
not re.search(r"\(.*?\(", line) and
not re.search(r"\=", line) and
(re.search(r"(?<=%s).*?(?=\s?\()" % return_types, line) or
re.search(r"((%s)\s*)$" % previous_types, previous_line))) | 0bdf490032aeb30056fe660f8615f75318aa81a3 | 110,031 |
def verse(bottle):
"""Sing a verse and account for plurality"""
next_bottle = bottle - 1
s1 = '' if bottle == 1 else 's'
s2 = '' if next_bottle == 1 else 's'
num_text = 'No more' if bottle == 1 else next_bottle
return '\n'.join([
f'{bottle} bottle{s1} of beer on the wall,',
f'{bottle} bottle{s1} of beer,',
f'Take one down, pass it around,',
f'{num_text} bottle{s2} of beer on the wall!'
])
##### My first attempt #####
# if bottle != 1:
# singular = 'bottle'
# plural = 'bottles'
# return '\n'.join([
# f'{bottle} bottles of beer on the wall,',
# f'{bottle} bottles of beer,',
# 'Take one down, pass it around,',
# f'{bottle - 1} {plural if bottle - 1 > 1 else singular} of beer on the wall!'
# ])
# else:
# return '\n'.join([
# f'{bottle} bottle of beer on the wall,',
# f'{bottle} bottle of beer,',
# 'Take one down, pass it around,',
# 'No more bottles of beer on the wall!'
# ]) | 88ad36a7b51ac819ea78e50ccc49acabb899c87f | 110,035 |
def pressure_to_elevation(pressure: float, p0: float) -> float:
"""
Calculate the elevation for a given pressure.
Args:
pressure: The pressure, in hPa (mbars)
p0: P0, the pressure at sea level.
Returns: Elevation in meters
"""
return (1 - ((pressure / p0) ** 0.190284)) * 145366.45 * 0.3048 | 21ffca9dca1bfb1eff3690865c8f8381075e5fc8 | 110,039 |
import cmath # Can return complex numbers from square roots
def quad_roots(a=1.0, b=2.0, c=0.0):
"""Returns the roots of a quadratic equation: ax^2 + bx + c = 0.
INPUTS
=======
a: float, optional, default value is 1
Coefficient of quadratic term
b: float, optional, default value is 2
Coefficient of linear term
c: float, optional, default value is 0
Constant term
RETURNS
========
roots: 2-tuple of complex floats
Has the form (root1, root2) unless a = 0
in which case a ValueError exception is raised
EXAMPLES
=========
>>> quad_roots(1.0, 1.0, -12.0)
((3+0j), (-4+0j))
"""
if a == 0:
raise ValueError("The quadratic coefficient is zero. This is not a quadratic equation.")
else:
sqrtdisc = cmath.sqrt(b * b - 4.0 * a * c)
r1 = -b + sqrtdisc
r2 = -b - sqrtdisc
twoa = 2.0 * a
return (r1 / twoa, r2 / twoa) | 665e79754a37ffca1f7c9396e4b3f0186f80022a | 110,041 |
def is_tf(df):
"""
Determines if a string is a 'boolean string': 't', 'f', 'True', or 'False'.
Args:
df (pandas.DataFrame): A dataframe that contains strings.
Returns:
pandas.DataFrame: A dataframe of booleans that is True if the entry
is any of the valid 'boolean string' values.
"""
return (df == 't') | (df == 'f') | (df == 'True') | (df == 'False') | 18a92d3a0e0cc5f7bb7ebca5daba362e3c1abb8d | 110,042 |
def AdaptReadableDate(date_obj):
"""Adapts a datetime.date object to its ISO-8601 date notation."""
return date_obj.isoformat() | eb9a236b47819cfa469483be2551975c14d60798 | 110,047 |
def get_contract(path: str) -> str:
"""
Acquire contract content from storages.
Currently only supports file storage
Args:
path (str): File path
Returns:
str: Context string
"""
contract: str = ""
# Storage Path
with open(path, "r") as file:
contract = file.read()
return contract | 5b7d993b7ae030ebd30cd1bf272b38aec6128574 | 110,054 |
def rexists(sftp_client, path):
"""
Returns true if the remote directory or file under path exists.
"""
try:
sftp_client.stat(str(path))
except IOError as err:
if err.errno == 2:
return False
raise
else:
return True | d3ea31a42871e6f030f7786edaf7cd8faddedb0b | 110,058 |
import re
def _parse_gcs_path(gcs_path):
"""Parse GCS path into bucket and path.
Args:
gcs_path: a GCS path, e.g. gs://bucket/foo/bar
Returns:
(bucket, path), e.g. ('bucket', 'foo/bar') for gs://bucket/foo/bar'.
Raises:
ValueError: if gcs_path is not a GCS path.
"""
m = re.match("^gs://([^/]+)(?:/(.*))?$", gcs_path)
if m is None:
raise ValueError('Not a gcs path: "{}"'.format(gcs_path))
return (m.group(1), m.group(2) or "") | 9fae5cbd3aa787350c90523416984f1da875c60d | 110,061 |
import requests
def check_pwn(email):
"""Takes a string (email address) as input and checks whether the email
address is contained within a breach registered in the Haveibeenpwnd database.
If the request is empty or generates any other errors, nothing is returned.
This is done to make it testable whether an email address hasn't been pwnd.
Otherwise the result is a JSON structure of pwnd information.
"""
headers = {'api-version': '2', 'User-Agent': 'Pwnbak-checker-Linux'}
req = requests.get("https://haveibeenpwned.com/api/v2/breachedaccount/%s" % email, headers=headers)
try:
return req.json()
except:
return | 63c239854a573486e0da1ddee50fb3063edd5f32 | 110,062 |
def signIn(command):
""" Check if command is to Sign In (s | signin). """
return (command.strip().lower() == 's' or command.strip().lower() == 'signin') | edeef11514d7b2d1986effa9fe679c1dbd1ed0e4 | 110,066 |
def inverse_dict(original):
"""Return a dictionary that is the inverse of the original.
Given the pair original[key] = value, the returned dictionary will give
ret[value] = key. It is important to keep two separate dictionaries in case
there is key/value collision. Trying to insert a value that matches a key
as a key will overwrite the old key.
Example:
original = {"a": "b", "foo": "a"}
original["a"] = "foo" # Lost pair {"a": "b"}.
:original: Dictionary.
:returns: Inverse dictionary of `original`.
"""
ret = dict()
for key, value in original.items():
ret[value] = key
return ret | 6b541bf60be88f8007514c1ef95598dc005bb412 | 110,067 |
import json
def json_output(input):
"""Format JSON output."""
try:
return json.dumps(input, indent=2, sort_keys=True)
except ValueError:
return input | 3a893993bf8011514e2b4e6be36a5a78c0e4c9f1 | 110,069 |
def _sparse_ftrs_indices0(ftr_name):
"""Returns the name of the 0th axis indices for `ftr_name`"""
return f"{ftr_name}_indices0" | c78c44a69439377008398ba16ca93d924ab10a76 | 110,073 |
import random
def generate_unique_randomized_list(list_size=None):
"""
Purpose:
Generate a Randomized List with Unique Values
of a sepcified size
Args:
list_size (Int): Size of list to generate. Lists
default to 50 ints
Returns:
randomized_list (List): Unsorted and randomized
list
"""
randomized_list = []
if not list_size:
list_size = random.randint(1, 50)
for x in range(list_size):
randomized_list.append(
random.randint(0, 10000)
)
return list(set(randomized_list)) | d642f0b0ab62f95c26c5c9a39af8f615f1d1050c | 110,081 |
from collections import defaultdict
def convert_idsw_to_heatmap_format(filepath, dest_file):
"""Convert idsw format to heatmap formats
idsw: <frame> <id1_gt> <id1> <bb1_left> <bb1_top> <bb1_width> <bb1_height> <id2_gt> <id2> <bb2_left> <bb2_top> <bb2_width> ...
heatmap: <frame> <bb1_left> <bb1_top> <bb1_width> <bb1_height> <bb2_left> <bb2_top> <bb2_width> <bb2_height> ...
Args:
filepath ([str]): idsw path
dest_file : save file path
"""
ids_group = defaultdict(list)
obj_infos = []
with open(filepath, "r") as f:
for line in f:
# <frame> <id1_gt> <id1> <bb1_left> <bb1_top> <bb1_width> <bb1_height> <id2_gt> <id2> <bb2_left> <bb2_top> <bb2_width>
p = line.rstrip().split(" ")
p = list(map(int, p))
# get number of objects in current frame
num_obj = int((len(p) - 1) / 6)
for idx in range(num_obj):
# <frame> <id1_gt> <id1> <bb1_left> <bb1_top> <bb1_width> <bb1_height>\n <frame> <id2_gt> <id2> <bb2_left> <bb2_top> <bb2_width>
obj_infos.append([p[0]] + p[1 + 6 * idx: 7 + 6 * idx])
for obj in obj_infos:
ids_group[obj[1]].append(obj)
# print(ids_group)
with open(dest_file, 'w') as f:
for _, objs in ids_group.items():
objs = sorted(objs, key=lambda x: x[0])
if len(objs) % 2 == 0:
for i in range(1, len(objs), 2):
tmp = list(map(str, objs[i]))
# print(tmp)
line = tmp[0] + " " + tmp[3] + " " + tmp[4] + " " + tmp[5] + " " + tmp[6] + "\n"
f.write(line)
return 1 | a1654e99d581aa58f7cd5f11872083051d91fd8f | 110,082 |
def add_to_inventory(inventory, added_items):
"""Combine a list of loot with an inventory."""
for loot in added_items:
inventory.setdefault(loot, 0)
inventory[loot] += 1
return(inventory) | e8f9835f505e5e740d18995537bf90465bac8300 | 110,095 |
def gtVcf(ref, alt, gt):
""" Get genotype in VCF style string"""
if gt == ref: return "0"
if gt == alt: return "1"
return "." | e751e2d35447ffb97652a0b095b374081422af93 | 110,096 |
import socket
import time
def find_devices(find_first=False, sock_timeout=3, search_timeout=3):
"""Finds spectrometers broadcasting on the local network
Args:
find_first (bool): whether to return only the first spectrometer found
sock_timeout (int): UDP socket timeout in seconds
search_timeout (int): How long to look for systems in seconds
Returns:
A set of spectrometers with the following format:
{(hostname,port), interface, serial}
Where interface is typically wlan0 or eth0, depending on how your
spectrometer is set up. Serial is a 64-bit identifier unique to
each spectrometer.
"""
multicast_port = 12345
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("", multicast_port))
sock.settimeout(sock_timeout)
tstart = time.time()
spectrometers = set()
while (time.time() - tstart) < search_timeout:
data, address = sock.recvfrom(33)
if data.find(b'msp1000') == 0:
iface, serial = data.split(b',')[:2]
spectrometers.add((address, iface[7:], serial))
if find_first is True:
break
sock.close()
return spectrometers | 7a0587cec96ce6046da84f63c0433946affd65a9 | 110,099 |
def get_geo_coordinates(tweet):
"""
Get the user's geo coordinates, if they are included in the payload
(otherwise return None)
Args:
tweet (Tweet or dict): A Tweet object or dictionary
Returns:
dict: dictionary with the keys "latitude" and "longitude"
or, if unavaiable, None
Example:
>>> from tweet_parser.getter_methods.tweet_geo import get_geo_coordinates
>>> tweet_geo = {"geo": {"coordinates": [1,-1]}}
>>> get_geo_coordinates(tweet_geo)
{'latitude': 1, 'longitude': -1}
>>> tweet_no_geo = {"geo": {}}
>>> get_geo_coordinates(tweet_no_geo) #returns None
"""
if "geo" in tweet:
if tweet["geo"] is not None:
if "coordinates" in tweet["geo"]:
[lat, lon] = tweet["geo"]["coordinates"]
return {"latitude": lat, "longitude": lon}
return None | a78af74152670cc29277a9b3c4f23dce4d778c7e | 110,100 |
def copy_seats(seats):
"""Create copy of seats list of list."""
return [[seat for seat in row] for row in seats] | fada667a52cdb83c09221776f9e7ad784c7a002a | 110,101 |
import torch
def noncausal_denominator(qs, ks):
"""Computes FAVOR normalizer in noncausal attention.
Args:
qs: query_prime tensor of the shape [L,B,H,M].
ks: key_prime tensor of the shape [L,B,H,M].
Returns:
FAVOR normalizer in noncausal attention.
"""
all_ones = torch.ones([ks.shape[0]]).to(ks.device)
ks_sum = torch.einsum("lbhm,l->bhm", ks, all_ones)
return torch.einsum("lbhm,bhm->lbh", qs, ks_sum) | 723baaf4a2d471d26ac7d9728193c3cb0151c0fe | 110,105 |
import ipaddress
def cidr_stix_pattern_producer(data):
"""Convert a CIDR from TC to a STIX pattern."""
if isinstance(ipaddress.ip_network(data.get('summary'), strict=False), ipaddress.IPv6Network):
return f"[ipv6-addr:value = '{data.get('summary')}']"
return f"[ipv4-addr:value = '{data.get('summary')}']" | 56f7d94fef6d913d6c2bd3afa48d747dc5a4c549 | 110,115 |
def processLine(line):
""" Given a string like this: '01. anime_name---------: Naruto'
This function returns a list: ['01. anime name', 'Naruto']
which is later used as key-value pairs in our data dictionary.
"""
sep_index = line.find('-:')
if sep_index == -1:
err_msg = 'Index Error. Can\'t find the index of "-:"'
err_msg += 'in the following input line.\n"' + line + '"\n'
raise ValueError(err_msg)
key_end_index = -1
for i in range(sep_index - 1, -1, -1): # if sep_index is 4, loop through 3, 2, 1, 0
if line[i] != '-':
key_end_index = i
break
key = line[0:key_end_index + 1]
value = line[sep_index + 3:] # 'key-: value' sep_index = 3, index of value, 3 + 3
return [key, value] | 8134dcc0ffa4d2eedb7281f0ad8b4f6ea6b8ff87 | 110,116 |
def subdict(d, expected_dict):
"""Return a new dict with only the items from `d` whose keys occur in `expected_dict`.
"""
return {k: v for k, v in d.items() if k in expected_dict} | a6ee4fc371c7be3303770b40a8983c7dd7d2678e | 110,118 |
def gen_Datastream(temperature, position, drone_id):
"""Generate a datastream objects."""
datastream = {
"@type": "Datastream",
"Temperature": temperature,
"Position": position,
"DroneID": drone_id,
}
return datastream | 5ce2ab58a8a39f22df1fe3284f58c9b6bf26b1af | 110,120 |
def round_df_scores(df_scores, score_types):
"""Round scores to the precision set in the score type.
Parameters
----------
df_scores : pd.DataFrame
the score dataframe
score_types : list of score types
Returns
-------
df_scores : the dataframe with rounded scores
"""
df_scores_copy = df_scores.copy()
for column, score_type in zip(df_scores_copy, score_types):
df_scores_copy[column] = [round(score, score_type.precision)
for score in df_scores_copy[column]]
return df_scores_copy | 0ae2eb03ccaaa76862410d4df1ea0b7c72bf44ca | 110,126 |
import torch
def get_gae(value, reward, args, is_normalize=False, eps=1e-8):
"""Compute generalized advantage estimator
Args:
value (list): Contains value function across trajectories
reward (list): Contains rewards across trajectories for specific agent
args (argparse): Python argparse that contains arguments
is_normalize (bool): Normalize baseline if flag is True. Default: False
eps (float): Epsilon for numerical stability. Default: 1e-8
Returns:
GAE (torch.Tensor): Estimated generalized advantage function
References:
https://github.com/dgriff777/rl_a3c_pytorch/blob/master/train.py
"""
value = torch.stack(value, dim=1)
assert value.shape == (args.traj_batch_size, args.ep_horizon), \
"Shape must be: (batch, ep_horizon)"
value = torch.cat((value, torch.zeros(value.shape[0], 1)), dim=1)
reward = torch.stack(reward, dim=1)
assert reward.shape == (args.traj_batch_size, args.ep_horizon), \
"Shape must be: (batch, ep_horizon)"
gae, advantage = 0., []
for timestep in reversed(range(args.ep_horizon)):
delta = (reward[:, timestep] + args.discount * value[:, timestep + 1]) - value[:, timestep]
gae = gae * args.discount * args.lambda_ + delta
advantage.insert(0, gae)
advantage = torch.stack(advantage, dim=1)
assert reward.shape == advantage.shape
if is_normalize:
advantage = advantage - torch.mean(advantage)
std = torch.sqrt(torch.mean(advantage ** 2))
advantage.div_(std + eps)
return advantage | f375f5e1756279d942b67b95a4e6bbadbd84df31 | 110,130 |
from typing import List
import click
def output_result(proc_packages: int, errors: List[str]) -> bool:
"""Output number of packages processed and errors."""
click.echo(f"Checked {proc_packages} package(s).")
if errors:
click.echo(f"{len(errors)} errors found:\n" + ("\n").join(errors))
return False
else:
click.echo("No errors detected.")
return True | d3bd98416fb5dc8085aa2f677ebea190cf2cc4af | 110,133 |
def prob(factor, *entry):
"""
argument
`factor`, a dictionary of domain and probability values,
`entry`, a list of values, one for each variable in the same order as specified in the factor domain.
Returns p(entry)
"""
return factor['table'][entry] | 9fd220352ffd846fd63970fec02965f3f5e322cd | 110,136 |
from typing import Union
def topic_to_bytes(topic: Union[bytes, str]) -> bytes:
"""
Return the passed topic as a :class:`bytes` object.
"""
if isinstance(topic, str):
topic = topic.encode()
return topic | c89a30ce60390cee517ef79cd1c480dcae116e99 | 110,137 |
def _extract_first_field(data):
"""Extract first field from a list of fields."""
return list(next(iter(zip(*data)))) | 1d9ab5619caef894963cc4a00626274731f792af | 110,138 |
def get_game_recap(content_feed):
""" Searches the content feed for the game recap.
Args:
content_feed (dict): NHL Content Feed
Returns:
recap (dict): Dictionary of the full recap event
nhl_video_url (string): URL pointing to the NHL Video Recap
"""
epg = content_feed["media"]["epg"]
recap = next(x for x in epg if x["title"] == "Recap")
video_id = recap["items"][0]["id"]
nhl_video_url = f"https://www.nhl.com/video/c-{video_id}?tcid=tw_video_content_id"
return recap, nhl_video_url | ae692ba71bfc6759f5ff1e4619ede7eeaeac5e7d | 110,145 |
def add_sky(config):
"""Add sky temperature model, EoR, and foregrounds, using defaults."""
sky_config = """
sky:
Tsky_mdl: !Tsky
datafile: HERA_Tsky_Reformatted.npz
interp_kwargs:
pol: xx
eor:
noiselike_eor: {}
foregrounds:
diffuse_foreground: {}
pntsrc_foreground: {}
"""
return config + sky_config[1:] | 766d4e3e2642324703eabe9d9884325501527c24 | 110,146 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.