content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
|---|---|---|
def poly_lines_to_lines(polyline_list):
"""Convert a set of polylines to a simple lines (start and end points)
Arguments:
polyline_list {list} -- Set of polylines
Returns:
list -- Set of lines
"""
line_list = []
for polyline in polyline_list:
prev_pt = None
for the_pt in polyline:
if prev_pt is not None:
line_list.append([prev_pt, the_pt])
prev_pt = the_pt
return line_list
|
fbe408e0f4eb2bfff56cd2ecc128953a254a781f
| 134,508
|
import re
def wikify(value):
"""Converts value to wikipedia "style" of URLS, removes non-word characters
and converts spaces to hyphens and leaves case of value.
"""
value = re.sub(r'[^\w\s-]', '', value).strip()
return re.sub(r'[-\s]+', '_', value)
|
dc4504ea6eb7905b5e18a1d1f473a4f337697b26
| 707,192
|
def check_type(obj, accept_types):
"""Verify the type of an object is an anticipated type or is contained
within a list of accepted types.
Args:
obj (type ambiguous):
The object whose type will be assessed.
accept_types (list):
A list of types to check the passed object against.
Raises:
TypeError: If the object type is not in the list of accepted
(anticipated) types.
Returns:
obj_type (type): The type of the object.
"""
obj_type = type(obj)
if obj_type not in accept_types:
raise TypeError('Invalid object type. Received '
'type {0} but expected one of the following '
'types: {1}'.format(obj_type, accept_types))
return obj_type
|
b8cf91e2ca22ef1d9530e362a2f1e79db2b849d7
| 235,561
|
def hex_to_rgb(value):
"""Convert a hex-formatted color to rgb, ignoring alpha values."""
value = value.lstrip("#")
return [int(value[i:i + 2], 16) for i in range(0, 6, 2)]
|
c33dd902baa84c822b085ce35bc7b7284c03c460
| 225,734
|
def unique(S, start, stop):
"""Return True if there are no duplicate elements in slice S[start: stop]"""
print("start = {}, stop = {}".format(start, stop))
if stop - start <= 1: # at most 1 item
return True
elif not unique(S, start, stop-1): # first part has duplicate
print("checking uniqueness in (S, {}, {})".format(start, stop-1))
return False
elif not unique(S, start+1, stop): # second part has duplicate
print("checking uniqueness in (S, {}, {})".format(start+1, stop))
return False
else:
print("Check uniqueness of 1st and last element for start = {} and stop = {}"
.format(start, stop)) # do first and last differ
return S[start] != S[stop] # do first and last differ
|
daf738f83cb7ccc1b33978a25e022638bbfc63fe
| 43,434
|
from typing import Union
def duplicate(nested_list: Union[list, int]) -> list:
"""Return a new nested list with all numbers in <nested_list> duplicated.
Each integer in <nested_list> should appear twice *consecutively* in the
output nested list. The nesting structure is the same as the input,
only with some new numbers added. See doctest examples for details.
If <nested_list> is an int, return a list containing two copies of it.
>>> duplicate(1)
[1, 1]
>>> duplicate([])
[]
>>> duplicate([1, 2])
[1, 1, 2, 2]
>>> duplicate([1, [2, 3]]) # NOT [1, 1, [2, 2, 3, 3], [2, 2, 3, 3]]
[1, 1, [2, 2, 3, 3]]
"""
new_list = []
# base case:
if isinstance(nested_list, int):
new_list.extend([nested_list, nested_list])
else:
# nested_list is a list.
for item in nested_list:
# item is either an int or a list.
if isinstance(item, int):
new_list.extend(duplicate(item))
else:
# item is another list.
new_list.append(duplicate(item))
return new_list
# try it with listcomp and ternary operators.. challenging!
|
8000a255bd57cc2a8eda2650f62b2a0b924882f5
| 405,466
|
def key_prefix_replace(d, prefix, new_prefix=""):
"""
replaces the list of prefix in keys of a flattened dict
:param d: the flattened dict
:param prefix: a list of prefixes that are replaced with a new prefix. Typically this will be ""
:type prefix: list of str
:param new_prefix: The new prefix. By default it is set to ""
:return: the dict with the keys replaced as specified
"""
items = []
for k, v in d.items():
new_key = k
for p in prefix:
new_key = new_key.replace(p, new_prefix, 1)
items.append((new_key, v))
return dict(items)
|
f2d3e3eb5172d9933ebebbb57402003e43177f12
| 562,882
|
def find_all_indexes(text, pattern):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found."""
'''
worksheet: String Algorithms, pt. 2
let p = len(pattern)
let t = len(text)
p * t = checking every letter in text against every letter in pattern
p(p-1) = p(p^2 - 1) = # REVIEW N/A VALUES IN WORKSHEET
Best time complexity -- O(___),
Worst time complexity --
O( p*t - p^2 + p ) =
O( p*t + 1) - p^2 ) =
O( p*t - p^2 ) remove constant since it doesn't have that much of an effect
if p < t, then O(p * t)
'''
if pattern == '':
return list(range(0, len(text)))
index = 0
t_index = 0 # first letter of pattern
matching_indexes_list = []
while index <= len(text) - len(pattern):
if text[index + t_index] == pattern[t_index]:
# if t_index is the last index in pattern
if t_index == len(pattern) - 1:
matching_indexes_list.append(index)
t_index = 0
index += 1
else:
t_index += 1
else:
t_index = 0 # reset
index += 1
return matching_indexes_list
|
a10590d54a23854ae5126c56e88debd0fb0dae93
| 62,695
|
def get_ccd_geometry(header):
"""Get basic geometry of CCD.
Args:
header (:class:`astropy.io.fits.header`): FITS header.
Returns:
tuple: A tuple of **(x1, x2, xbin, ybin)**, where
**(0, x1)** is the columns of the prescan region,
**(x1, x2)** is the columns of the science region,
**(x2, )** is the columns of the overscan region,
and **(xbin, ybin)** are the CCD binning along X and Y axes.
"""
cdelt1 = int(round(header['CDELT1']))
cdelt2 = int(round(header['CDELT2']))
# check nx
if header['ESO DET CHIP1 NX'] == header['ESO DET OUT1 NX']:
nx = header['ESO DET OUT1 NX']
else:
raise ValueError
# check ny
if header['ESO DET CHIP1 NY'] == header['ESO DET OUT1 NY']:
ny = header['ESO DET OUT1 NY']
else:
raise ValueError
prex = header['HIERARCH ESO DET OUT1 PRSCX']
prey = header['HIERARCH ESO DET OUT1 PRSCY']
ovrx = header['HIERARCH ESO DET OUT1 OVSCX']
ovry = header['HIERARCH ESO DET OUT1 OVSCY']
if prey != 0 or ovry != 0:
raise ValueError
if prex + nx + ovrx != header['NAXIS1']:
raise ValueError
if prey + ny + ovry != header['NAXIS2']:
raise ValueError
if cdelt1*nx != 2048:
raise ValueError
if cdelt2*ny != 4096:
raise ValueError
x1 = prex
x2 = prex + nx
binx = cdelt1
biny = cdelt2
return x1, x2, binx, biny
|
952ac4fbaf9d9a9e5b60175a210a1caa4dbd81e2
| 385,104
|
import shutil
from pathlib import Path
def locate_program(name):
"""Locates program path by name.
Args:
name (str): Name of executable
Returns:
pathlib.Path: Pathlike Object to Program
NoneType: If no suitable path is found
"""
prog_path = shutil.which(name)
if not prog_path:
return None
return Path(prog_path)
|
68823f2eb18a075db22507aae1c0f88ee3b160ef
| 216,666
|
def get_key(dict, value):
"""
Get the first key of the given value in the given dictionary
:param dict:dict
:param value:str
:return:str
"""
for item in dict.items():
if item[1] == value:
return item[0]
|
632ce093957a4f6de6807e721bbe2676610a97c9
| 69,255
|
import math
def tile_resolution(zoom, latitude):
"""Compute resolution for a given position
Notes:
https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
Args:
zoom (int): zoom level
latitude (float): latitude in degrees
Returns:
(float): [m.pix-1] number of meters per pixel in the tile
"""
return 156543.034 * math.cos(math.radians(latitude)) / (2 ** zoom)
|
e5ba787b7f5db86740b63192f24c3799b18d5987
| 386,168
|
def count_steps(splitting="OVRVO"):
"""Computes the number of O, R, V steps in the splitting string"""
n_O = sum([step == "O" for step in splitting])
n_R = sum([step == "R" for step in splitting])
n_V = sum([step == "V" for step in splitting])
return n_O, n_R, n_V
|
ed70482017c2f123d7f9992f9e681b6f3f651910
| 199,479
|
def get_region(chrom, start, end, region_trees):
"""Check if a position overlapps any regions
Arguments:
chrom (str): The chromosome
start (int): The start position for the feature
end (int): The stop position for the feature
region_trees (dict): A dictionary with chromosomes as keys and interval trees as values
Returns:
regions (set): The regions that the variant ovelapps
"""
regions = set()
if chrom in region_trees:
tree = region_trees[chrom]
result = tree[start:end]
for interval in result:
regions.add(interval.data)
return regions
|
1ef390f0df7734efbdd379c48bd09f1c96645d6e
| 244,019
|
import textwrap
def shorten(text, width=70, placeholder='...'):
"""Shortens text to a max length.
Shortens text to a max length using some optional placeholder
with textwrap (if Python > 3.3) or a custom method.
:param text: The text to shorten
:type text: str
:param width: The max width, defaults to 70
:type width: number, optional
:param placeholder: The placeholder to truncate with, defaults to '...'
:type placeholder: str, optional
:returns: The shortened text with placeholder
:rtype: {str}
"""
try:
textwrap.indent
except AttributeError: # function wasn't added until Python 3.3
return (text
if len(text) <= width
else text[:width - len(placeholder)] + placeholder)
else:
return textwrap.shorten(text, width=width, placeholder=placeholder)
|
359ce44ece03e1eca9ac46ef71f77d6cdc241247
| 101,377
|
def get_value(kwargs, key_str, default_val=None):
"""
Returns kwargs[key_str] if there is one. Else it returns default_val if
there is one. Else aborts.
Parameters
----------
kwargs : dict[str, float]
key_str : str
default_val : float
Returns
-------
float
"""
if key_str in kwargs:
return kwargs[key_str]
elif default_val:
return default_val
else:
assert False, "must pass-in keyword " + key_str +\
' in ' + str(kwargs)
|
731addd0b4fe2d2dfb3d06d4d251a39c22249f1f
| 360,327
|
def pop_target(unpopped_data):
"""
Removed target column from the data frame.
Args:
unpopped_data: Data frame with target column.
Returns:
data: Data frame without target column.
target: Target column.
"""
data = unpopped_data.copy()
target = data.pop('CONTROL_STATUS')
return data, target
|
d2f6dee0480a473c749c53b7759c909e20bbcc29
| 89,840
|
def build_get_meta_side_effects(metas):
"""Returns a mock side effects method to be used as a side effect for the purpose of unit testing.
This method replaces doctype metas for user defined ones. The returned method should be passed
to a unittest.mock.MagicMock instance via patch or mock. This prevents database calls and enforces
unit test isolation.
Params:
metas: list -> A list of Meta instances created with mock_meta()
Example:
@patch('frappe.get_doc')
@patch('frappe.get_meta')
def test_fetch_quotation(self, get_meta, get_doc):
# Mock Quotation
get_doc.side_effect = [Document({
"doctype": "Quotation",
"name": "test-quotation",
"customer_name": "test-customer"
})]
# Mock Quotation Metadata. Notice the limited set of fields.
get_meta.side_effect = build_get_meta_side_effects(
mock_meta("Quotation", fields=[
{ "fieldname": "name", "fieldtype": "Data" },
{ "fieldname": "customer_name", "fieldtype": "Link", "options": "Customer" }
])
)
# Fetches the mocked quotation
doc = frappe.get_doc("Quotation", "test-quotation")
# Then perform your tests
get_doc.assert_called()
get_meta.assert_called()
self.assertTrue(doc.customer == "test-customer")
"""
def side_effect(doctype, cached=False):
for meta in metas:
if meta.name == doctype:
return meta
raise Exception("Unexpected get_meta doctype: {}".format(doctype))
return side_effect
|
5912034ce93c7bc6047735109f8bcce24329b029
| 252,279
|
import math
def to_pi_mpi_range(angle):
"""
Puts an angle in the -pi, pi range
"""
if angle > math.pi:
return angle - math.pi
elif angle < - math.pi:
return math.pi + angle
else:
return angle
|
9e456b7ca414d60f4738363cbab8af3d1da21a21
| 184,244
|
import re
def has_2x_in_row(chars):
"""
It contains at least one letter that appears twice in a row, like xx,
abcdde (dd), or aabbccdd (aa, bb, cc, or dd).
"""
if re.search(r'(\w)\1+', chars):
return True
return False
|
6530e4379e5fb4284f7f0e6f3fc6ecce99daf2fd
| 292,258
|
def autolabel(dropdown):
"""Automatically set Dropdown label on_click"""
def callback(event):
for label, _value in dropdown.menu:
if event.item == _value:
dropdown.label = label
dropdown.on_click(callback)
return callback
|
69c228ccefe15474d30b8d42314f2d76f5964314
| 623,340
|
def Convert(string):
"""converts string to list"""
li = list(string.split(" "))
return li
|
a446d46be5d7c2df7139460a461e0825784f5e89
| 16,102
|
import torch
def rot_z(gamma):
"""
Rotation around Z axis
"""
if not torch.is_tensor(gamma):
gamma = torch.tensor(gamma, dtype=torch.get_default_dtype())
return gamma.new_tensor([
[gamma.cos(), -gamma.sin(), 0],
[gamma.sin(), gamma.cos(), 0],
[0, 0, 1]
])
|
ce2a05f27484ff7fe6cb7d0cfbc777f4eaf2b442
| 663,222
|
def count_consonants(string):
""" Function which returns the count of
all consonants in the string \"string\" """
consonants = "bcdfghjklmnpqrstvwxz"
counter = 0
if string:
for ch in string.lower():
if ch in consonants:
counter += 1
return counter
|
4a852b3ec9f8f660d71dde547cbffb0c25b1e209
| 10,548
|
def remove_trailing_whitespace(line):
"""Removes trailing whitespace, but preserves the newline if present.
"""
if line.endswith("\n"):
return "{}\n".format(remove_trailing_whitespace(line[:-1]))
return line.rstrip()
|
2c5f7b3a35152b89cda6645911569c9d2815f47e
| 703,559
|
def set_view_timeout(config):
"""Set timeout duration for generating static images."""
return config["settings"].get("view_timeout", 60)
|
327824f8c3a7120326bb659b8d8d203420a4ae6f
| 313,617
|
def handle_tensorboard_timeout(e):
"""Handle exception: TensorBoard does not respond."""
return "Tensorboard does not respond. Sorry.", 503
|
e7a6077f1628bb3cf71e6609e93a73a5a6282ca2
| 279,375
|
def dimensions(rotor_lst):
""" Get the dimensions of each of the rotors
"""
return tuple(len(rotor) for rotor in rotor_lst)
|
8233d342c18165c6d30f3e732714b75d7f262ad8
| 424,668
|
def _process_field_value(field):
"""Used in the csv writing task to return an
appropriate representation
"""
if field.value == "":
return "[Empty]"
if field.type == "boolean":
return "True" if field.value else "False"
return field.value
|
21179ede601c15b80b04ef4373a66e1893d12058
| 384,057
|
from functools import reduce
import operator
def calculate_nmea_checksum(nmea_line):
"""
Given the complete nmea line (including starting '$' and ending checksum '*##')
calculate the checksum from the body of the line.
NOTE: this does not check for structural correctness, so you
should check that '$' and '*##' checksum are present
and that the checksum matches before calling this function.
"""
#
# xor all characters in the message to get a one byte checksum.
# don't include starting '$' or trailing checksum '*##'
#
return reduce(operator.xor, map(ord, nmea_line[1:-3]), 0)
|
919f03a254cd630285600028735b1412bb3bc106
| 280,650
|
import re
def has_problem_form(word):
"""
has_problem_form()
Purpose: Checks if the word has problem form.
@param word. A string
@return the matched object if it has problem form, otheriwse None.
>>> has_problem_form('prognosis') is not None
True
>>> has_problem_form('diagnosis') is not None
True
>>> has_problem_form('diagnostic') is not None
True
>>> has_problem_form('arachnophobic') is not None
True
>>> has_problem_form('test') is not None
False
>>> has_problem_form('ice') is not None
False
"""
regex = r".*(ic|is)$"
return re.search(regex, word)
|
ac5a0da900feb75272a8f998d1a819a005c15fdf
| 394,551
|
def __require_tab_separator(section):
"""
Given a section name, returns True iff in that section of the
project config only tab separators should be permitted.
This exception initially introduced to allow slighlty different
syntax for the [labels] section than others.
"""
return section == "labels"
|
dff3ad1f6df2af223033067d5868f7c2578f229a
| 495,086
|
def _store_chunks_in_order(seen: str, unseen: str, seen_first: bool) -> list:
"""
Save two strings (a 'seen' and an 'unseen' string) in an array where the 'seen' string is
the first element if the seen_first parameter is True.
@param seen: The 'seen' string
@param unseen: The 'unseen' string
@return: An array where either the seen or unseen strings are the first/second element
depending on the value of the seen_first parameter.
"""
if seen_first:
return [seen, unseen]
else:
return [unseen, seen]
|
7fbdfe90f8bd5a2a14853483c541c6e1926244b3
| 479,882
|
import calendar
def dt2ts(dt):
"""Converts a datetime object to UTC timestamp
naive datetime will be considered UTC.
"""
return calendar.timegm(dt.utctimetuple())
|
35e17cfc419424eea503b6594113347ec7a67edc
| 554,532
|
import re
def file_is_of_type(file_name, extension):
"""
Return whether a file is of a certain type.
Args:
file_name the name of the file to test.
extension the part of the name after the . which will be checked
with a regular expression.
Returns:
True if file_name ends with extension.
"""
type_re = re.compile(r'\.%s' % extension)
return type_re.search(file_name) != None
|
28a1c10624c39b53e317dc04c09188bae8055d24
| 189,562
|
def jaccard_similarity(b1, b2):
"""Jaccard similarity between two set b1 and b2
:param b1: set of index if there is a rate for business b1
:param b2: set of index if there is a rate for business b2
:return: jaccard similarity of two sets
"""
return len(b1.intersection(b2))/len(b1.union(b2))
|
a1da6b361573e6b3ab34322a6782eeec3d34f482
| 67,433
|
from typing import Tuple
import re
def get_line_property_key_value(text: str, reversed_key: bool) -> Tuple[str, str]:
"""Returns the unformatted key and formatted value for a given line.
Examples:
reversed_key = False
key= value 123 -> key, value
reversed_key = True
value 123= key asdf -> key, value
Args:
text (str): Text from where to extract key and value.
reversed_key (bool): Whether the key comes before or after the value.
Returns:
Tuple[str, str]: Unformatted key and formatted value.
"""
key_idx, value_idx = (-1, 0) if reversed_key else (0, -1)
text_parts = re.split(":|=", text.strip())
key_part = ""
if len(text_parts) > 1:
# Only consider a value if there is more than one field.
key_part = text_parts[key_idx].strip()
value_part = text_parts[value_idx].strip()
if value_part:
# If there was a value in the first field, clean it.
return key_part, re.split(" ", value_part)[0].strip()
return key_part, ""
|
3a679a689cf078016a1dc302b1f59761e5013195
| 501,564
|
import hashlib
def calculate_digest(body):
"""Calculate the sha256 sum for some body (bytes)"""
hasher = hashlib.sha256()
hasher.update(body)
return hasher.hexdigest()
|
11d6c4e9d331bc7ffb97118e707143bdee59bb04
| 672,589
|
def has_params(data, *args):
"""
Validates required parameters against an object.
:param data:
:param args: required parameters
:return:
"""
if not data:
return False
for a in args:
if not a in data:
return False
v = data[a]
if not v or v.isspace():
return False
return True
|
aeff14dcec72dec81945dcff4798ff981ff45ccf
| 584,110
|
def create_labels(dcr_root):
"""
Returns activity name event label mapping function
:param dcr_root: The Etree root element of a dcr graph
:return: Dictionary of all labels and names
"""
mappings = []
for mapping in dcr_root.iter('labelMapping'):
mappings.append(mapping.get('labelId'))
return mappings
|
e01eab4bd17e66a04d2db5cb387220fcdd15dea1
| 308,892
|
def calc_angle(main_vector, second_vector):
"""
Calculate angle between two vectors
:param main_vector: DB.XYZ
:param second_vector: DB.XYZ
:return: Angle with sign
:rtype: float
"""
angle = main_vector.AngleTo(second_vector)
sign = 1 if main_vector.CrossProduct(second_vector).Z >= 0 else -1
return sign * angle
|
f78cc96239e2c84bf75f011a8cac483f9a19896e
| 536,931
|
def IntegrateLambdaOrRho(terms):
"""
IntegrateLambdaOrRho(terms):
terms: A hash table containing lambda or rho coefficients.
For example, if lambda(x) = .4 x^2 + .6 x^3, then
the input would be {2:.4, 3:.6}.
Returns The integral of the argument from 0 to 1.
"""
sum = 0
total = 0
for i in terms.keys():
sum = sum + terms[i]
total = total + terms[i]/float(i)
assert(abs(sum-1.0) < .0001)
return total
|
59641f8ffaf7ac513f7259e44a4fd69adbb09992
| 471,117
|
def get_issue_assigned_names(repo_issues) -> list:
"""Get the list of issues from the no-verified-domain-email-repo and create a list of the names of the assignees
Args:
repo_issues([type]): The list of repo issues in the no-verified-domain-email-repo
Returns:
list: The list of user usernames assigned to the issues in the repo
"""
issue_assigned_names_list = []
for issue in repo_issues:
if issue["assignees"]["edges"]:
if issue["state"] == "OPEN":
issue_assigned_names_list.append(
issue["assignees"]["edges"][0]["node"]["login"]
)
return issue_assigned_names_list
|
11ffc6b81f054f0f2e0ef38541451a8ea7a5edcc
| 356,446
|
import re
def picard_friendly(umi):
"""
Converts a UMI string into a format that Picard Tools can understand
>>> picard_friendly("ACGT+ACGT")
'ACGT-ACGT'
>>> picard_friendly("ACGT_ACGT")
'ACGT-ACGT'
>>> picard_friendly("TTTAA")
'TTTAA'
"""
return re.sub("[^actgnACTGN]", "-", umi)
|
3a6f16cd88758d66d4f0e8bbed007627cc6da86f
| 187,434
|
def is_js_file(filename):
"""
Return true if the filename ends in .js and is not a packed or
minified file (no '.pack' or '.min' in the filename)
>>> is_js_file('jquery.min.js')
False
>>> is_js_file('foo.json')
False
>>> is_js_file('ui.combobox.js')
True
"""
return filename.endswith('.js') \
and not '.pack' in filename \
and not '.min' in filename
|
1d2395cd46ffc3f31c1e051b8fde3fa0a0faa146
| 494,585
|
from pathlib import Path
def is_file(file_obj):
"""Check a file exists on the system.
Examples
--------
::
>>> from pyutil.sys_checks import is_file
>>> if is_file('path/to/file'):
>>> pass
"""
p = Path(file_obj)
return p.is_file()
|
a2acf2c6d53c6de91a033bcb16629219e438e5db
| 617,910
|
def pascal_to_torr(pascal):
"""Convert Pascal to Torr."""
return pascal / 101325.0 * 760.0
|
4aff589dea5d0b24030bd3bbc227ef3ecb1ace19
| 331,416
|
def mutate_DataFrame(df, **kwargs):
"""Verb: add columns to a DataFrame defined by kwargs:
Parameters
----------
kwargs : scalar, pd.Series, callable, dict
* scalar, pd.Series -> assign column
* callable - call callable(df) and assign result
* dict (None: column) - result of itergroups on non-grouped DF to have parity with mutate_DataFrameGroupBy
Examples
--------
add a rank for one column::
dp(mtcars).mutate(hp_rank = X.hp.rank)
rank all columns::
# dict comprehension for illustrative purposes
dp(mtcars).mutate(**{f"{column}_rank": X[column].rank() for column in X.columns}).pd
# more efficient
dp(mtcars).rank().pd()
one rank per group using callback::
dp(mtcars).group_by('cyl').mutate(rank = lambda X: X['hp'].rank()).pd
add_count variant 1 (see :func:`dppd.single_verbs.add_count`)::
dp(mtcars).group_by('cyl').mutate(count=lambda x: len(x)).pd
add_count variant 2::
dp(mtcars).group_by('cyl').mutate(count={grp: len(sub_df) for (grp, sub_df) in X.itergroups()}).pd
"""
for k, v in kwargs.items():
if isinstance(v, dict):
if len(v) != 1:
raise KeyError("Expected dict with single key: None")
kwargs[k] = v[None]
to_assign = kwargs
return df.assign(**to_assign)
|
e62d1314ba9268e6575a621543bf724bc0670432
| 407,297
|
import base64
def b2s(bs: bytes) -> str:
"""Bytes to b64 string."""
return base64.b64encode(bs).decode('utf-8')
|
98d73442c390e8aacbaba8229009730e49d0a88f
| 87,291
|
def user_upload_directory(instance, filename):
"""Return the upload directory for a given File (should have an owner)."""
return 'user_{0}/{1}'.format(instance.owner.id, filename)
|
3357bd3fa6e55af0519c77759c9c661b72a08247
| 53,320
|
from typing import Dict
from typing import List
import pkg_resources
import json
def load_weat() -> Dict[str, List[str]]:
"""Load the word sets used in the paper *Semantics Derived Automatically*
*From Language Corpora Contain Human-Like Biases*.
It includes gender (male, female), ethnicity (black, white)
and pleasant, unpleasant word sets, among others.
References
----------
Semantics derived automatically from language corpora contain human-like
biases.
Caliskan, A., Bryson, J. J., & Narayanan, A. (2017).
Science, 356(6334), 183-186.
Returns
-------
word_sets_dict : dict
A dictionary with the word sets.
"""
resource_package = __name__
resource_path = "/".join(("data", "WEAT.json"))
weat_data = pkg_resources.resource_string(resource_package, resource_path)
data = json.loads(weat_data.decode())
return data
|
ce48f86c1d7b30f84cc3758742c59a186fe68a15
| 521,077
|
from typing import List
def get_nodes_for_homek8s_group(inventory, group_name) -> List[str]:
"""Return the nodes' names of the given group from the inventory as a list."""
hosts_dict = inventory['all']['children']['homek8s']['children'][group_name]['hosts']
if hosts_dict:
return list(hosts_dict.keys())
else:
return []
|
806394259816ec4311e69dcd46e7b111c7ca0652
| 4,475
|
def key_generator(value):
"""Simple key generator that maps all values to lower case."""
return value.lower()
|
daabfd06cb709e6020c68ca6baa05add4375d58d
| 375,743
|
import string
def remove_stop_words(words):
"""
Remove stop words and single letter words from WORDS
"""
with open('../stop_words.txt') as stop_words_file:
stop_words = stop_words_file.read().split(',')
# single-letter words are also stop_words
stop_words.extend(list(string.ascii_lowercase))
indexes = []
for i, word in enumerate(words):
if word in stop_words:
indexes.append(i)
for i in reversed(indexes):
words.pop(i)
return words
|
254419717e352ab497cd037be50d79814b613acf
| 331,324
|
def _prefix_with_swift_module(path, resource_info):
"""Prepends a path with the resource info's Swift module, if set.
Args:
path: The path to prepend.
resource_info: The resource info struct.
Returns: The path with the Swift module name prepended if it was set, or just
the path itself if there was no module name.
"""
swift_module = resource_info.swift_module
if swift_module:
return swift_module + "-" + path
return path
|
f2a12f59a3c30c09fa20d65b806779ad47f49b90
| 705,471
|
def get_raw_path(request) -> str:
"""
Returns the raw_path inside the request. The request can either be a Quart Request object (that
encodes the raw path in request.scope['raw_path']) or a Werkzeug WSGi request (that encodes the
raw path in request.environ['RAW_URI']).
:param request: the request object
:return: the raw path if any
"""
if hasattr(request, "environ"):
# werkzeug/flask request
return request.environ.get("RAW_URI", request.path)
if hasattr(request, "scope"):
# quart request
return request.scope.get("raw_path", request.path)
raise ValueError("cannot extract raw path from request object %s" % request)
|
0e02876b0b87e2cd195e5d11e71541328d64ad6f
| 324,644
|
def ParseGitInfoOutput(output):
"""Given a git log, determine the latest corresponding svn revision."""
for line in output.split('\n'):
tokens = line.split()
if len(tokens) > 0 and tokens[0] == 'git-svn-id:':
return tokens[1].split('@')[1]
return None
|
79634b7abcd6d993a6d598fb3bed3a4eae2a4f07
| 335,334
|
def import_dotted_name(name):
"""Get an object by its "dotted name", a string representing its import
location. The last dot can also be a colon instead.
.. versionadded:: 0.6
"""
name = str(name)
if ':' in name:
module, obj = name.split(':', 1)
elif '.' in name:
module, obj = name.rsplit('.', 1)
else:
return __import__(name, level=0)
mod = __import__(module, fromlist=[obj], level=0)
return getattr(mod, obj)
|
5dcb954863b98714bb820c19bcaa62ccfbe1e31d
| 424,381
|
def parse_healing_and_source(line, is_lifetap, is_absorb):
"""Helper method that finds amount of healing and character providing it"""
split_line = line.split()
source = ''
if is_lifetap:
num = int(split_line[3])
elif is_absorb:
num = int(split_line[5])
else:
source = ' '.join(split_line[5:split_line.index('for')])
num = int(split_line[split_line.index('for')+1])
source = source.replace('the ', '')
return [num, source]
|
37a04f878ba559b1b9d9c96f87181c5801fa3273
| 124,933
|
import re
def make_test_name(description):
"""
generate a test name from a description
:param description: plain english description of a test
:type description: string
:return: the generated test name
:rtype: string
"""
return 'test_' + re.sub(r'\s+', '_', description.strip().lower())
|
6cc03887b277926ebdd8380c48df9def0830227f
| 229,846
|
def reformat(formula):
"""Add spaces around each parens and negate and split the formula."""
formula = ''.join(f' {i} ' if i in '~()' else i for i in formula)
return formula.split()
|
661327cc35faa2fe85c6fd0e38013f2ae4b55482
| 693,450
|
def jac(a, b):
"""return the Jaccard similarity of two sets"""
if type(a) != set:
a = set(a)
if type(b) != set:
b = set(b)
n = len(a.intersection(b))
return n / float(len(a) + len(b) - n)
|
ca8778e0cd6ee90389b6a103f3d7644c958ad7f1
| 505,415
|
from pathlib import Path
def create_flag_file(filepath: str) -> str:
"""
Create a flag file in order to avoid concurrent build of same previews
:param filepath: file to protect
:return: flag file path
"""
flag_file_path = "{}_flag".format(filepath)
Path(flag_file_path).touch()
return flag_file_path
|
80ad8e181574600fcb1b9ded6e5c64c3c0d5b457
| 27,996
|
def listlike(var):
"""
Takes a variable and returns the variable in a list if it is not already a list
:param var: variable
:return: List
"""
if isinstance(var, list):
return var
else:
return [var]
|
67798c75b845f15444a0d66f5559cdb2f40bbe43
| 302,088
|
def copy_df(df):
"""
DESCRIPTION
-----------
Deep copy a pandas dataframe.
PARAMETERS
----------
df : pd.DataFrame
A pandas dataframe instance
RETURNS
-------
A deep copy of a given pandas dataframe
MODIFICATIONS
-------------
Created : 4/26/19
"""
return df.copy(deep=True)
|
a130830820a9aef0c2419580a2493bb1f14111df
| 49,447
|
def is_user_active(user_obj):
"""
Helps to find user is active or not.
Returns boolean value True or False
:param user_obj: user queryset of User model
:return: boolean value
"""
if user_obj.is_active:
return True
return False
|
1008ab560600803988b32b40ca9c861df0b15ce3
| 691,297
|
def _cmp(x, y):
"""
Replacement for built-in function cmp that was removed in Python 3
Compare the two objects x and y and return an integer according to
the outcome. The return value is negative if x < y, zero if x == y
and strictly positive if x > y.
"""
return (x > y) - (x < y)
|
fcb2eb6c35463fcace8f84b5d4c9995b1dda4ada
| 281,560
|
def get_agency_url(operator_code):
"""Get url for operators"""
operator_urls = {
'OId_LUL': "https://tfl.gov.uk/maps/track/tube",
'OId_DLR': "https://tfl.gov.uk/modes/dlr/",
'OId_TRS': "https://www.thamesriverservices.co.uk/",
'OId_CCR': "https://www.citycruises.com/",
'OId_CV': "https://www.thamesclippers.com/",
'OId_WFF': "https://tfl.gov.uk/modes/river/woolwich-ferry",
'OId_TCL': "https://tfl.gov.uk/modes/trams/",
'OId_EAL': "https://www.emiratesairline.co.uk/",
#'OId_CRC': "https://www.crownrivercruise.co.uk/",
}
if operator_code in list(operator_urls.keys()):
return operator_urls[operator_code]
else:
return "NA"
|
9a9c4c9abb0a3f88c8d58fc03c5fc3d5aede71d3
| 327,394
|
from typing import List
def list_truncated_nums(n: int) -> List[int]:
"""
Returns a list of all left and right truncated numbers of n
>>> list_truncated_nums(927628)
[927628, 27628, 92762, 7628, 9276, 628, 927, 28, 92, 8, 9]
>>> list_truncated_nums(467)
[467, 67, 46, 7, 4]
>>> list_truncated_nums(58)
[58, 8, 5]
"""
str_num = str(n)
list_nums = [n]
for i in range(1, len(str_num)):
list_nums.append(int(str_num[i:]))
list_nums.append(int(str_num[:-i]))
return list_nums
|
ec349e55e60e3164373702d2d020a9cd791ec039
| 632,128
|
def calculate_total_bill(subtotal):
"""
(float) -> float
subtotal is passed through as an input
HST_RATE variable in this function is multiplied by inputted variable
Function returns the resulting variable "total", rounded and formatted to 2 decimal points.
Variable "total" is then rounded to the nearest 5 cents using the following nickel rounding scheme standard rules in Canada:
0.01 to 0.02 will round down to 0.00. 0. 03 to 0.04 will round up to 0.05. 0.06 to 0.07 will round down to 0.05.
0.08 to 0.09 will round up to 0.10
>>> calculate_total_bill(3.0)
3.40
>>> calculate_total_bill(6.67)
7.55
>>> calculate_total_bill(2.05)
2.30
"""
HST_RATE = 1.13
total_bill = subtotal *HST_RATE
return format(round(0.05 * round(float(total_bill)/0.05), 2), '.2f')
|
6335c9e85e37e6d897eaa48ad09557b5d77d2e1b
| 24,066
|
def xr_to_np(S):
"""Convert xarray into numpy array"""
if hasattr(S, "values"):
S = S.values
return S
|
3ad9a76a974c473cb14134ffceb6cef4a2104448
| 522,851
|
import json
def parseBQNumeric(s):
"""
takes a string with response JSON, and returns the BQNumeric resposnes as an array of arrays.
Each BQNumeric will have a single response.
:param s: the json structure with responses
:return: answer string
"""
answerlist = []
try:
RespDict = json.loads(s)
for records in RespDict["Response"]:
answerlist.append(records["Response"])
except:
return None
return answerlist
|
65a36114d9a5d38948246796cb84c1b1a1444970
| 548,309
|
def _pairs_to_compute(rids, cids):
"""Determine the pairs of samples to compute distances between
Parameters
----------
rids : Iterable
The row IDs in the partial pairwise computation.
cids : Iterable
The column IDs in the partial pairwise computation.
Raises
------
ValueError
When determining ID pairs for blocks that fall outside of the diagonal
of the resulting distance matrix, if a pair corresponds to the lower
triangle, complain loudly.
Returns
-------
list of tuple
The ID pairs to compute distances between.
"""
# if identical, gather the upper triangle
if len(rids) == len(cids) and (rids == cids).all():
return [(i, j) for idx, i in enumerate(rids) for j in rids[idx+1:]]
# otherwise, grab pairwise combinations disregarding the diagonal
else:
if set(rids).intersection(set(cids)):
raise ValueError("Attempting to compute a lower triangle")
return [(i, j) for i in rids for j in cids if i != j]
|
ef2970275ed4dce29a01fb53da493a221589defb
| 319,688
|
def avg(numbers: list) -> float:
"""Calculate the average of a list of numbers
numbers is a list of numbers
Returns a float representing the average"""
if len(numbers) != 0: # Check if the list is empty
count = 0
for number in numbers:
count += number
return count / len(numbers)
return 0
|
9538a80456de9500b5bcd58e8d0de7cf0cb2b920
| 467,107
|
import torch
def deriv_tanh(x):
""" derivative of tanh """
y = torch.tanh(x)
return 1.0 - y * y
|
bf02250c67b0ec7ae56a77f51d8ceee3cdc211c8
| 519,011
|
from typing import Any
from typing import MutableSequence
def ensure_list(value: Any, convert_csv: bool = False, delimiter: str = ',') -> MutableSequence[Any]:
"""Convert an object, response, or (optionally) comma-separated string into a list"""
if not value:
return []
elif isinstance(value, dict) and 'results' in value:
value = value['results']
elif convert_csv and isinstance(value, str) and delimiter in value:
return [s.strip() for s in value.split(delimiter)]
if isinstance(value, MutableSequence):
return value
elif isinstance(value, (tuple, set)):
return list(value)
else:
return [value]
|
76262a0620e4988071eac3a28bd504801b938915
| 659,758
|
def get_imphash(pefile_pe):
"""
Retrieve the imphash of a PE file.
Returns None if imphash could not be extracted.
"""
imphash = pefile_pe.get_imphash()
if imphash == '':
return None
else:
return imphash
|
46bf01254081254db86b4ae2336dc54e9756e2a9
| 271,408
|
def mu_na(n: float, a: float) -> float:
"""
mu = n^2 / a^3
:param n: mean motion in degrees
:type n: float
:param a: semi-major axis
:type a: float
:return: mu
:rtype: float
"""
return n * n * a * a * a
|
5301959b886d4a299dad69d62974df7621ea8dc3
| 95,469
|
import re
def unescape(string: str) -> str:
"""
Unescaping escaped characters typically inside attribute values.
Args:
string [str] -- string to be unescaped
Returns:
[str] -- unescaped string
"""
return re.sub(r"\\(.)", r"\1", string)
|
23f28151b2bb936f82e7c24badb7a09c7ea62564
| 521,535
|
def _to_image(data, width):
"""Convert data to an image.
Parameters
----------
data : array_like
Values of the pixels
width : int
Width of the image in pixels
Returns
-------
ndarray
Data reshaped into an ndarray with width `width`
"""
return data.reshape(len(data) // width, width)
|
7359a848cc82bbc6ce43bca3625b4b22a0e8342f
| 605,081
|
def get_rounded_number(
number: float,
number_of_digits_after_separator: int = 0,
) -> float:
"""
Return rounded float number.
Parameters
----------
number : float
Number to be rounded.
number_of_digits_after_separator
Number of digits after
decimal separator in `number`.
Returns
-------
float
Rounded float number.
"""
if number_of_digits_after_separator < 0:
raise ValueError(
'`number_of_digits_after_separator` '
'must to be positive integer number.'
)
sign = -1.0 if number < 0 else 1.0
_number = abs(number)
_multiplier = int('1' + '0' * number_of_digits_after_separator)
_number_without_separator = _number * _multiplier
_integer_part = int(_number_without_separator)
_first_discarded_digit = int(
(_number_without_separator - _integer_part) * 10
)
if _first_discarded_digit >= 5:
_integer_part += 1
result = _integer_part / _multiplier * sign
return result
|
1a259398e50ca84c4d741b1f25c3229adfd38ad9
| 481,840
|
from functools import cmp_to_key
def sort(array, comparison=None, key=None, reverse=False):
"""Sort `array` using optional `comparison`, `key`, and `reverse` options
and return sorted `array`.
Note:
Python 3 removed the option to pass a custom comparison function and
instead only allows a key function. Therefore, if a comparison
function is passed in, it will be converted to a key function
automatically using ``functools.cmp_to_key``.
Args:
array (list): List to sort.
comparison (callable, optional): A custom comparison function used to
sort the list. Function should accept two arguments and return a
negative, zero, or position number depending on whether the first
argument is considered smaller than, equal to, or larger than the
second argument. Defaults to ``None``. This argument is mutually
exclusive with `key`.
key (callback, optional): A function of one argument used to extract a
a comparison key from each list element. Defaults to ``None``. This
argument is mutually exclusive with `comparison`.
reverse (bool, optional): Whether to reverse the sort. Defaults to
``False``.
Returns:
list: Sorted list.
Warning:
`array` is modified in place.
Example:
>>> sort([2, 1, 4, 3])
[1, 2, 3, 4]
>>> sort([2, 1, 4, 3], reverse=True)
[4, 3, 2, 1]
>>> results = sort([{'a': 2, 'b': 1},\
{'a': 3, 'b': 2},\
{'a': 0, 'b': 3}],\
key=lambda item: item['a'])
>>> assert results == [{'a': 0, 'b': 3},\
{'a': 2, 'b': 1},\
{'a': 3, 'b': 2}]
.. versionadded:: 2.2.0
"""
# pylint: disable=redefined-outer-name
if comparison and key:
raise Exception(
'The "comparison" and "key" arguments are mutually exclusive')
if comparison:
key = cmp_to_key(comparison)
array.sort(key=key, reverse=reverse)
return array
|
1dacad94e4392f9cf5f2a331e6bf8a02d6006f41
| 564,136
|
def Chunkify(list, chunk_size):
"""divides an array into chunks of chunk_size"""
return [list[i:i + chunk_size] for i in range(0, len(list), chunk_size)]
|
9bf6d1a67af957847b7af585c619a5b86ded1b59
| 150,484
|
import json
import tempfile
def _modify_json_with(json_filename, modifier_func):
"""
Given a modifier_func which modifies the data in the given file,
updates the json file and returns a Tempfile holding the new data
"""
# Update the sidecar data
with open(json_filename, 'r+', encoding='utf-8') as file_obj:
data = json.load(file_obj)
modifier_func(data)
# Write it to a tempfile
modified_json_tempfile = tempfile.NamedTemporaryFile() # pylint: disable=consider-using-with
with open(modified_json_tempfile.name, 'w', encoding='utf-8') as file_obj:
json.dump(data, file_obj)
return modified_json_tempfile
|
1daec729dc526131df9637f834ddb21407619124
| 583,098
|
from typing import Union
import torch
def check_data_types(data) -> Union[list, torch.Tensor]:
"""
Utility function to check if data passed is all on tensor format. Can handle iterables or single structures
:param data: Data to be checked
:return: list of tensors
"""
if torch.is_tensor(data):
return data
else:
try:
return torch.tensor(data, dtype=torch.float)
except ValueError:
return [torch.tensor(x, dtype=torch.float) for x in data]
|
a5b0ae79eecb59fecd33431b1a10a52f74c8dbf2
| 201,833
|
import torch
def max_shape(data):
"""Gets the maximum length along all dimensions in a list of Tensors"""
shapes = torch.Tensor([_.shape for _ in data])
return torch.max(shapes.transpose(0, 1), dim=1)[0].int()
|
accc61c9b3a137257457ed4c9e5aa0888529261c
| 270,959
|
def get_str_from_list(message_list: list, cc: str = "and", punct: bool = True) -> str:
"""Returns list as a formatted string for speech.
message list: [list] of the components to be joined.
cc: [str] coordinating conjunction to place at end of list.
punct: bool - indicates if should include punctuation at end of list.
"""
speech_list = []
if not message_list:
return ''
elif len(message_list) == 1:
message = str(message_list[0])
if punct:
message += "."
return message
for i in range(len(message_list)):
if i == len(message_list) - 1:
speech_list.append(' and ')
speech_list.append( str(message_list[i]))
if i != len(message_list) - 1:
speech_list.append(', ')
if punct:
speech_list.append('.')
return ''.join(speech_list)
|
224821fe0e0ca6b30001f467816df97bad37237a
| 47,990
|
import torch
def get_torch_zeros(shape, device='cuda'):
"""
Generate zero tensor with given shape
:param shape: array shape
:param device: sets training device
:return: zero tensor with given shape
"""
return torch.zeros(shape, dtype=torch.float32).to(device)
|
8260b2e0cc11f870d518348403a5381a25ca2165
| 327,616
|
def flatten_list(messy_list):
"""
Flatten an unruly list of lists. Cf. <http://stackoverflow.com/a/952914>
"""
return [item for sublist in messy_list for item in sublist]
|
47b01b1e724e192dbab115cad97b915f17a53155
| 326,338
|
def Switch(node):
"""
Root of switch/case branch
Args:
node (Switch): Current position in node-tree
Return:
str : Translation of current node.
Children:
Expression Case+ Otherwise?
Expression:
Test-expression
Case:
Case-block
Otherwise:
Otherwise-block
Examples:
>>> print(matlab2cpp.qscript("a=1; switch a; case b; c; otherwise; d"))
a = 1 ;
if (b == a)
{
c ;
}
else
{
d ;
}
"""
if node[0].cls == "Var":
out = ""
# create switch variable
else:
node.type = node[0].type
out = "%(type)s _var_%(type)s = %(0)s ;\n"
return out + "\n".join(map(str, node[1:]))
|
538658b3ac8e913a897722f18c3fcfd45cb486b4
| 654,018
|
def _number_convert(match):
"""
Convert number with an explicit base
to a decimal integer value:
- 0x0000 -> hexadecimal
- 16'h0000 -> hexadecimal
- 0b0000 -> binary
- 3'b000 -> binary
- otherwise -> decimal
"""
prefix, base, number = match.groups()
if prefix is not None:
return str(match.group(0))
if base in "xh":
return str(int(number, 16))
if base == "b":
return str(int(number, 2))
return str(int(number, 10))
|
adef8f8f80342fbcd79c461068eb04f99427f88c
| 702,549
|
def parse_utf(s):
""" Decode a utf-8 encoded string. """
return s.strip().decode('utf-8') or None
|
933319627f7b0905af2b2b7a79080fbff4d4b033
| 285,917
|
def get_fix_range_value(tot_elms, index):
"""Percentage value of the given index in a set of elements.
The value returned is in the middle:
For example:
0.0 1.0
|-----|-----|-----|
^ ^ ^
Params:
tot_elems (int): number of elements in the set
index (int): the index of the current element,
starting from 1
Return:
float: percentage with dot notation,
so from 0.0 to 1.0
"""
step = (1. / tot_elms) / 2.
return float((index / tot_elms) - step)
|
92e1f5f2e71f7afac5252a12594e390d803cfdbd
| 379,037
|
def get_total(lines):
"""
This function takes in a list of lines and returns
a single float value that is the total of a particular
variable for a given year and tech.
Parameters:
-----------
lines : list
This is a list of datalines that we want to total.
Returns:
--------
total : float
This is the sum total from the data lines.
"""
total = 0.0
for line in lines:
data_sep = line.split()
total += float(data_sep[0])
return total
|
284f8061f3659999ae7e4df104c86d0077b384da
| 705,377
|
from pathlib import Path
def _check_cache(arg_hash):
""" Check whether a cache file exists for the given arg_hash. """
path = Path(f"./{arg_hash}.pt")
return path.is_file()
|
6b9fa03421d3b2a6df5bd8501237817bc54bcb34
| 137,645
|
import requests
import json
from typing import OrderedDict
def get_model(uri):
"""
Return a capture model as ordered JSON object
:param uri: URI for capture model
:return: ordered JSON object
"""
if uri:
r = requests.get(uri)
if r.status_code == requests.codes.ok:
source = r.text
model = json.loads(source, object_pairs_hook=OrderedDict)
return model
else:
return None
else:
return None
|
0c57c3a3aabfdd2e0dbbc229fee1cf39c63b6dd7
| 58,821
|
def test_pkgrepo_with_architectures(pkgrepo, grains, sources_list_file, subtests):
"""
Test managing a repo with architectures specified
"""
name = "deb {{arch}}http://foo.com/bar/latest {oscodename} main".format(
oscodename=grains["oscodename"]
)
def _get_arch(arch):
return "[arch={}] ".format(arch) if arch else ""
def _run(arch=None, test=False):
return pkgrepo.managed(
name=name.format(arch=_get_arch(arch)),
file=sources_list_file,
refresh=False,
test=test,
)
with subtests.test("test=True"):
# Run with test=True
ret = _run(test=True)
assert ret.changes == {"repo": name.format(arch="")}
assert "would be" in ret.comment
assert ret.result is None
with subtests.test("test=False"):
# Run for real
ret = _run()
assert ret.changes == {"repo": name.format(arch="")}
assert ret.comment.startswith("Configured")
assert ret.result is True
with subtests.test("test=True repeat"):
# Run again with test=True, should exit with no changes and a True
# result.
ret = _run(test=True)
assert not ret.changes
assert "already" in ret.comment
assert ret.result is True
with subtests.test("test=False repeat"):
# Run for real again, results should be the same as above (i.e. we
# should never get to the point where we exit with a None result).
ret = _run()
assert not ret.changes
assert "already" in ret.comment
assert ret.result is True
expected_changes = {
"line": {
"new": name.format(arch=_get_arch("amd64")),
"old": name.format(arch=""),
},
"architectures": {"new": ["amd64"], "old": []},
}
with subtests.test("test=True arch=amd64"):
# Run with test=True and the architecture set. We should get a None
# result with some expected changes.
ret = _run(arch="amd64", test=True)
assert ret.changes == expected_changes
assert "would be" in ret.comment
assert ret.result is None
with subtests.test("test=False arch=amd64"):
# Run for real, with the architecture set. We should get a True
# result with the same changes.
ret = _run(arch="amd64")
assert ret.changes == expected_changes
assert ret.comment.startswith("Configured")
assert ret.result is True
with subtests.test("test=True arch=amd64 repeat"):
# Run again with test=True, should exit with no changes and a True
# result.
ret = _run(arch="amd64", test=True)
assert not ret.changes
assert "already" in ret.comment
assert ret.result is True
with subtests.test("test=False arch=amd64 repeat"):
# Run for real again, results should be the same as above (i.e. we
# should never get to the point where we exit with a None result).
ret = _run(arch="amd64")
assert not ret.changes
assert "already" in ret.comment
assert ret.result is True
expected_changes = {
"line": {
"new": name.format(arch=""),
"old": name.format(arch=_get_arch("amd64")),
},
"architectures": {"new": [], "old": ["amd64"]},
}
with subtests.test("test=True arch=None"):
# Run with test=True and the architecture set back to the original
# value. We should get a None result with some expected changes.
ret = _run(test=True)
assert ret.changes == expected_changes
assert "would be" in ret.comment
assert ret.result is None
with subtests.test("test=False arch=None"):
# Run for real, with the architecture set. We should get a True
# result with the same changes.
ret = _run()
assert ret.changes == expected_changes
assert ret.comment.startswith("Configured")
assert ret.result is True
with subtests.test("test=True arch=None repeat"):
# Run again with test=True, should exit with no changes and a True
# result.
ret = _run(test=True)
assert not ret.changes
assert "already" in ret.comment
assert ret.result is True
with subtests.test("test=False arch=None repeat"):
# Run for real again, results should be the same as above (i.e. we
# should never get to the point where we exit with a None result).
ret = _run()
assert not ret.changes
assert "already" in ret.comment
assert ret.result is True
|
9e6ae74c792e2f5df05ac936624c62d096e483a4
| 72,942
|
import torch
def transform_verts(verts, T):
"""
Transform vertices using a 4x4 transformation matrix
Inputs:
- verts: FloatTensor of shape (N, V, 3) giving a batch of vertex positions.
- T: FloatTensor of shape (N, 4, 4) giving transformation matrices
Outputs:
- verts_out: FloatTensor of shape (N, V, 4)
giving vertex homogeneous positions (x, y, z, w)
where verts_out[i] is the result of transforming verts[i] by T[i].
"""
N, V = verts.shape[0], verts.shape[1]
dtype, device = verts.dtype, verts.device
# Add an extra row of ones to the world-space coordinates of verts before
# multiplying by the projection matrix. We could avoid this allocation by
# instead multiplying by a 4x3 submatrix of the projectio matrix, then
# adding the remaining 4x1 vector. Not sure whether there will be much
# performance difference between the two.
ones = torch.ones(N, V, 1, dtype=dtype, device=device)
verts_hom = torch.cat([verts, ones], dim=2)
verts_cam_hom = torch.bmm(verts_hom, T.transpose(1, 2))
return verts_cam_hom
|
4c4d0bcb4c4feb56dc2b885ae353405c59e9fae0
| 93,209
|
def to_fahrenheit(temp):
"""
Converts a temperature in celsius to fahrenheit.
"""
return (temp * 1.8) + 32
|
11e06263f472e79096656373cf190fc86ac1acf9
| 626,789
|
def cant_adyacentes(life, f, c):
"""
Calcula la cantidad de células adyacentes a la celda en la fila `f` y la
columna `c`.
Importante: El "tablero" se considera "infinito": las celdas del borde
izquierdo están conectadas a la izquierda con las celdas del borde
derecho, y viceversa. Las celdas del borde superior están conectadas hacia
arriba con las celdas del borde inferior, y viceversa.
"""
return sum([1 if ( i % len(life), j % len(life[0])) != (f, c) and life[i % len(life)][j % len(life[0])] else 0 for j in range(c - 1, c + 2) for i in range(f - 1, f + 2)])
|
2ee543333088ffc55c3227ce5f1568adefff9623
| 118,713
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.