content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import torch
def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
"""
Converts rotation matrices to 6D rotation representation by Zhou et al. [1]
by dropping the last row. Note that 6D representation is not unique.
Args:
matrix: batch of rotation matrices of size (*, 3, 3)
Returns:
6D rotation representation, of size (*, 6)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6)
|
d07acc00fb89bc3786a412a7baf0911a5cdd9fb2
| 78,077
|
from collections import defaultdict
import netaddr
def build_reverse_mappings_from_anm_input(anm):
"""Builds reverse mappings from ANM input graph,
assumes addresses have already been allocated onto input graph,
either externally or by previous run"""
g_in = anm['input']
rev_map = {
"loopbacks": {},
"infra_interfaces": {},
"subnets": {},
}
subnets = defaultdict(list)
for node in g_in:
rev_map["loopbacks"][str(node.loopback_v4)] = node
for interface in node.physical_interfaces:
rev_map["infra_interfaces"][str(interface.ipv4_address)] = interface
prefixlen = interface.ipv4_prefixlen
cidr_string = "%s/%s" % (interface.ipv4_address, prefixlen)
intermediate_subnet = netaddr.IPNetwork(cidr_string)
subnet_cidr_string = "%s/%s" % (intermediate_subnet.network, prefixlen)
subnet = netaddr.IPNetwork(subnet_cidr_string)
subnets[subnet].append(interface)
for subnet, interfaces in subnets.items():
subnet_str = str(subnet)
rev_map['subnets'][subnet_str] = "_".join(str(i.node) for i in interfaces)
return rev_map
|
efd43d8594889b15cce0b00be3e2278cf46906ba
| 78,080
|
from typing import List
def extend(layout: List[List[str]]) -> List[List[str]]:
"""
Extend layout by adding a border of empty seats
:param layout: initial layout
:return: layout with a border of empty seats
"""
empty_row = ['.'] * (2 + len(layout[0]))
retval = list()
retval.append(empty_row)
for r in layout:
r.insert(0, '.')
r.insert(len(r), '.')
retval.append(r)
retval.append(empty_row)
return retval
|
0a14b8a551aa36674c47703c2f820b0f98c8f048
| 78,081
|
import re
def get_fine_type(attributes):
""" Compute fine-grained mention type.
Args:
attributes (dict(str, object)): Attributes of the mention, must contain
values for "type", "tokens" and "pos".
Returns:
str: The fine-grained mention type, one of
- DEF (definite noun phrase),
- INDEF (indefinite noun phrase),
- PERS_NOM (personal pronoun, nominative case),
- PERS_ACC (personal pronoun, accusative),
- REFL (reflexive pronoun),
- POSS (possessive pronoun),
- POSS_ADJ (possessive adjective) or
- None.
"""
coarse_type = attributes["type"]
start_token = attributes["tokens"][0]
start_pos = attributes["pos"][0]
if coarse_type == "NOM":
if re.match("^(the|this|that|these|those|my|your|his|her|its|our|" +
"their)$", start_token.lower()):
return "DEF"
elif re.match("^NNP$", start_pos): # also matches NNPS!
return "DEF"
else:
return "INDEF"
elif coarse_type == "PRO":
if re.match("^(i|you|he|she|it|we|they)$", start_token.lower()):
return "PERS_NOM"
elif re.match("^(me|you|him|her|it|us|you|them)$",
start_token.lower()):
return "PERS_ACC"
elif re.match("^(myself|yourself|yourselves|himself|herself|itself|" +
"ourselves|themselves)$", start_token.lower()):
return "REFL"
elif start_pos == "PRP" and re.match("^(mine|yours|his|hers|its|" +
"ours|theirs|)$",
start_token.lower()):
return "POSS"
elif start_pos == "PRP$" and re.match("^(my|your|his|her|its|our|" +
"their)$", start_token.lower()):
return "POSS_ADJ"
|
2ac80d7a48614c33844754aee72d7fcc4ae21460
| 78,082
|
from pathlib import Path
from datetime import datetime
def _age(path: Path) -> int:
"""
Calculate age of log file at `path` in days.
The creation timestamp is extracted from the ISO filename.
"""
created_at = datetime.fromisoformat(path.stem)
difference = datetime.utcnow() - created_at
return difference.days
|
6f7c3f3275f3458a514f5a21cb0fee879c4ef1c2
| 78,083
|
def make_terms_table(terms_dict, n = None):
"""
Given a dictionary of terms and corresponding values, reformats in tabular (2-D list) format.
terms_dict - a dictionary of {term: value}.
n - optionally, the number of (top) records considered.
>> Default: None.
Returns a 2-D list of sorted (term, value) in descending order.
"""
# Set n to length of dataset if not specified
if not n:
n = len(terms_dict)
return sorted(terms_dict.items(), key = lambda x: x[1], reverse = True)[:n]
|
35c011a235b72f072f44162dbfcab8367b5387cf
| 78,086
|
import random
def gen_random_string(length):
""" Generates a random digit string.
Parameter:
length -- The lenght of the strings.
Return:
The random string.
"""
s = ''
for i in range(length):
s = s + str(random.randrange(0,10))
return s
|
62469e37022cb09805b729f50282a102cbea7498
| 78,092
|
import multiprocessing
def _set_resources(parallel, config):
"""Set resource availability for programs, downsizing to local runs.
"""
for program in ["gatk", "novoalign"]:
if not config["resources"].has_key(program):
config["resources"][program] = {}
if parallel["type"] == "local":
cores = min(parallel["cores"], multiprocessing.cpu_count())
config["resources"][program]["cores"] = cores
return config
|
21140d6519f627b2016b07e7504f7fc435ebef3d
| 78,093
|
import requests
def get_url_content(url: str) -> bytes:
"""Access a given url and return its content."""
page = requests.get(url)
if page.status_code != requests.codes.ok:
raise requests.HTTPError(f"Unable to access {url}")
return page.content
|
1f78f988f60448f1c54861de79b999eb4ba1da9c
| 78,094
|
def get_child_list(item):
"""
Get a list of an item's children
:param item: Node to be queried.
:type item: Node
:return: Elements, Tag, and Text
:rtype: List, List, List
"""
elist = list(item.iterfind('*'))
etags = [elem.tag for elem in elist]
etext = [elem.text for elem in elist]
return etags, etext, elist
|
060bbdf8ef8bca8db484a92472e685ba5ae27930
| 78,096
|
def filter_pairs(pairs, max_len):
"""
Filter pairs with either of the sentence > max_len tokens
==============
Params:
==============
pairs (list of tuples): each tuple is a src-target sentence pair
max_len (Int): Max allowable sentence length
"""
return [pair for pair in pairs if (len(pair[0].split()) <= max_len)]
|
cf43ecfbacbdf8a0ee90ac09a3f48f3a0f6f4828
| 78,099
|
from typing import Tuple
import random
def generate_color() -> Tuple[int, int, int]:
"""
Generates a random RGB color
Returns
-------
Tuple[int, int, int] : color in (r,g,b) format
"""
return (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)
)
|
0a3d223ebdc9b624a6a7918a218bff625d841a08
| 78,109
|
def calc_total_crab_fuel_complex(input_positions: list) -> int:
"""
Determine the fuel required for each crab to move to each position and return the fuel for the minimum consumption
position
Args:
input_positions: list of input crab submarine positions
Returns:
total fuel required for all crabs to move to the lowest total fuel position using advanced calculation
"""
possible_positions = [0] * max(input_positions)
for pos in range(len(possible_positions)):
for crab in input_positions:
dist = abs(crab - pos)
possible_positions[pos] += int(dist * (dist + 1) / 2)
return min(possible_positions)
|
377a6d53c2cfefe4cab7b91afcb4245c10eef0d1
| 78,116
|
from typing import Dict
from typing import List
def set_user_defined_floats(
sia: Dict[str, float], floats: List[float]
) -> Dict[str, float]:
"""Set the user-defined float values for the user-defined calculations.
:param sia: the similar item assessment dict.
:param list floats: the list of float values.
:return: sia; the similar item assessment dict with updated float values.
:rtype: dict
"""
for _idx in [11, 12, 13, 14, 15]:
_key = list(sia.keys())[_idx]
try:
sia[_key] = float(floats[_idx - 11])
except IndexError:
sia[_key] = 0.0
return sia
|
fe2cc1ce5fbe997e9774c155c11dcd37a59edca4
| 78,117
|
import json
def get_json_from_file(file_path):
"""
Get json data from file wich placed on file_path as dict
:param file_path: path to json file
:return: json data as dict from file
"""
# get json bytes from json file
json_file = open(file_path, 'r')
# convert json bytes to python dict
json_data_as_dict = json.load(json_file)
# close json file
json_file.close()
# return json data as dict
return json_data_as_dict
|
e41289f4f23ff1940c7b7d269eac9cef7d9ef407
| 78,118
|
from typing import Tuple
def get_main_version_part(version: Tuple[int, int, int, str, int]) -> str:
"""Retrieves the main part of version information - the main verison
consists of only the major, minor and patch components.
Parameters
----------
version
Version information.
Returns
-------
str
The main version part.
"""
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
|
1dfffd9b9190f0f0c1125f99feab26e43fde3f16
| 78,120
|
import csv
def import_trials_duration(path):
"""
Import some already existing local csv of times
Parameters
----------
path : string
the path that the file is located in
Returns
-------
list
a list of times to be plotted or viewed
"""
with open(path, "r") as times_file:
csv_object = csv.reader(times_file)
times = [[float(time) for time in row] for row in csv_object]
return times
|
567ea262acbf8b758bf8a6dcc4620fdc659f8e94
| 78,121
|
def count_ngram(hyps_resp, n):
"""
Count the number of unique n-grams
:param hyps_resp: list, a list of responses
:param n: int, n-gram
:return: the number of unique n-grams in hyps_resp
"""
if len(hyps_resp) == 0:
print("ERROR, eval_distinct get empty input")
return
if type(hyps_resp[0]) != list:
print("ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead".format(
type(hyps_resp[0])))
return
ngram = set()
for resp in hyps_resp:
if len(resp) < n:
continue
for i in range(len(resp) - n + 1):
ngram.add(' '.join(resp[i: i + n]))
return len(ngram)
|
4ab063744e48812c360dbb79421a51b003f79700
| 78,122
|
def find_shape(bottom_lines, max_len):
"""
Finds a shape of lowest horizontal lines with step=1
:param bottom_lines:
:param max_len:
:return: list of levels (row values), list indexes are columns
"""
shape = [1] * max_len
for i in range(max_len):
for line in bottom_lines:
if line[0] <= i + 1 < line[2]:
shape[i] = line[1]
break
return shape
|
a2ff03a955e368eba7195aadb8107e78e29b52e6
| 78,126
|
import re
def camel2snake(string1):
""" Convert string from camelCase style to snake_case style
Args:
string1 (str): camelCase style string
Returns:
snake_case style string
"""
if string1 is None:
return None
else:
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string1)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
6c26c7571c65e86e42110f0554df4256818b9a5a
| 78,131
|
def load(fp):
"""Deserialize our domain-specific configuration file."""
obj = {}
for line in fp:
line = line.strip()
if not line or line.startswith('#'):
continue
try:
key, value = map(str.strip, line.split(' ', 1))
except ValueError:
key = line
value = ''
try:
value = int(value)
except ValueError:
pass
obj[key] = value
return obj
|
f57aaea8b9946d2eafb46fc87a9c6f3f595d05a9
| 78,134
|
import re
def to_kebab(s):
"""Convert string to kebab case."""
s = re.sub(r'[^a-zA-Z0-9]+', ' ', s)
s = re.sub(r'\s+', ' ', s)
s = s.replace(' ', '-')
return s.lower()
|
bb75b7bf4763dbe24e2dd74dd9b664a436eabe52
| 78,150
|
def _version_format(version):
"""Return version in dotted string format."""
return '.'.join(str(x) for x in version)
|
62306f7c374f1a7d7cce5b3fc45009312514bf6e
| 78,155
|
def svn_repo(repo):
"""
Tests if a repo URL is a svn repo, then returns the repo url.
"""
# we can just go for known providers of svn
services = ('svn://', 'https://svn.code.sf.net/p/', 'http://svn.savannah.gnu.org/svn/', 'https://svn.icculus.org/', 'http://svn.icculus.org/', 'http://svn.uktrainsim.com/svn/', 'https://rpg.hamsterrepublic.com/source/wip')
if any(repo.startswith(service) for service in services):
return repo
# not svn
return None
|
c19f6a4a874d2aa2191682f4bc461dd38f2440d3
| 78,157
|
def split_text_by_delims(text):
"""Splits the string by special characters specified.
Filters out empty strings (as a result of the splitting) before
returning.
"""
for ch in [":", ".", "/", "'", "\"",
"(", ")", "\\", "[", "]", ",", "\t", "\n", "*", "-"]:
text = text.replace(ch, " ")
return text.split(" ")
|
da1cf1772184c84fd4b6f06500f6701c25686b86
| 78,161
|
def enclose_periods_in_braces(value):
"""
Perform sanitization by enclosing any periods in square braces.
Example: "domain.com" becomes "domain[.]com"
:param value: The value to be sanitized
:return: The sanitized value
"""
return value.replace('.', '[.]')
|
8c0e585fc97dbc934966df2dc12c25bfe921992f
| 78,163
|
def read_uint(file):
"""Read a uint from a file."""
result = 0
shift = 0
while True:
try:
byte = ord(file.read(1))
except TypeError:
raise EOFError
if byte&128:
result |= (byte&127)<<shift
shift += 7
else:
return result|(byte<<shift)
|
ed74180b7a22720adb6255df39cd1e21682f8f6e
| 78,164
|
def _normalize_data(data, mean, std):
"""Util function to normalized data based on learned mean and std."""
data -= mean
data /= std
return data
|
5b411f39197caf41d28a7435a6d522821c3eb9a6
| 78,166
|
import requests
import ast
def prepare_api_request(request):
"""Takes a dict of the form:
{"type": "POST",
"url": "www.xyz.com/api/v1.0/order",
"params: {"param_1": "start_time=1"},
"headers": {"header_1": "header"},
"data": {"data_1": "some_data"},
"json": {"json_data": {"json_stuff": "data"}}
}
Only working with GET or POST currently
"""
req = requests.Request(request["type"])
req.url = request["url"]
req.headers = request["headers"] if "headers" in request else {}
req.data = ast.literal_eval(request["data"]) if "data" in request else []
req.params = ast.literal_eval(request["params"]) if "params" in request else {}
req.json = ast.literal_eval(request["json"]) if "json" in request else {}
prepped = req.prepare()
return prepped
|
0c7aabced68e3ad8533ee673bf1768b6a80dfcef
| 78,171
|
import math
def average_length_in_sentences(groundtruth):
"""
Returns the average length of all daily summaries (in sentences).
The average ist first computed over all summaries in each timeline
in `groundtruth`. Then the average over all averages obtained
in this way is computed.
Params:
groundtruth (Groundtruth): Reference timelines.
Returns:
Average daily sumamry length.
"""
all_avgs = []
for tl in groundtruth.timelines:
all_avgs.append(sum(len(x) for x in tl.dates_to_summaries.values()) / len(tl.get_dates()))
return math.floor(sum(all_avgs) / len(all_avgs))
|
cfdc7c8e6a9a29f1f1a7d4ce39198c781c543d90
| 78,177
|
import torch
def init_shift1d_nfold(shift, nfold=8, noise=1e-3):
"""
Mimic TSM hard-coded shift.
First 1/nfold channels shift one time step to the past
Second 1/nfold channels shift one time step to the future
Remaining channels remain unchanged
"""
dim, channels = shift.size()
assert dim == 1, "only works with rubiks1d"
with torch.no_grad():
group = channels // nfold
shift[:, :group] = 1
shift[:, group : 2 * group] = -1
# perturb to stay away from zero
shift[:, 2 * group :].uniform_(-noise, noise)
return shift
|
960c8ba263c2157898550a9d0340c32a8461c2d8
| 78,179
|
def determine_special_faces(graph, dist):
"""Determines the special faces, which are those nodes whose distance is
at least k
Args:
graph (Gerrychain Graph): graph to determine special faces of
dist (numeric): distance such that nodes are considered special if
they have a 'distance' of at least this value
Returns:
list: list of nodes which are special
"""
return [node for node in graph.nodes() if graph.nodes[node]['distance'] >= dist]
|
c09f8c6cdefaca05e04576ad75decc81466ba57a
| 78,181
|
def pad_sequence(sequence, pad_tok=0, max_length=None):
"""Pad batched dataset with shape = (batch_size, seq_length(various))
:param sequence: input sequence
:param pad_tok: padding token, default is 0
:param max_length: max length of padded sequence, default is None
:return: padded sequence
"""
if max_length is None:
max_length = max([len(seq) for seq in sequence])
sequence_padded, seq_length = [], []
for seq in sequence:
seq_ = seq[:max_length] + [pad_tok] * max(max_length - len(seq), 0)
sequence_padded.append(seq_)
seq_length.append(min(len(seq), max_length))
return sequence_padded, seq_length
|
472d6ce3979be6acd6cf370bf5ef7b2b71c9f3b0
| 78,183
|
def get_anion_neighbors(site, structure, radius, anions, get_distance=False):
"""
Gets neighboring anions of sites
Args:
:param site: (Site) target site to find anion neighbors of
:param structure: (Structure) structure that contains target site
:param radius: (float) radius to which anion neighbors should be looked for
:param anions: (List of Strings) list of species considered anions
:param get_distance: (boolean) whether or not to get distance between cation and anion
:return: (List of either Sites or [Site, float]) list of either anion neighboring sites or [Site, float] if
getDistance is True
"""
anion_neighbors = []
neighbors = structure.get_neighbors(site, radius)
for neighbor in neighbors:
if neighbor[0].species_string in anions and neighbor[1] < radius:
if get_distance:
anion_neighbors.append(neighbor)
else:
anion_neighbors.append(neighbor[0])
return anion_neighbors
|
e9473f77ee2b5006e79503ebb14890c0e85904cf
| 78,193
|
def camelcase(text):
"""Convert text to camel case
Notes:
The algorithm sets the first letter of each word to uppercase.
Existing uppercase letters are left unchanged.
Words are split on whitespace.
Args:
text: string, text to convert
Returns:
string, converted text.
"""
if text is None:
return
if not text:
return text
words = [x[0].upper() + x[1:] for x in text.split() if x]
return ''.join(words)
|
e27ccce9c9fffe62e73a552270a9358f248b759a
| 78,197
|
def do_remove_first(s, remove):
"""
Removes only the first occurrence of the specified substring from a string.
https://github.com/Shopify/liquid/blob/b2feeacbce8e4a718bde9bc9fa9d00e44ab32351/lib/liquid/standardfilters.rb#L218
"""
return s.replace(str(remove), '', 1)
|
286498e8cc8149620a188d98cccf9a360e7367bc
| 78,204
|
def _high_bit(value: int) -> int:
"""Return index of the highest bit, and -1 if value is 0."""
return value.bit_length() - 1
|
d01e575f36b1644a91eb9c08dc21bf3b099959be
| 78,206
|
def is_iterable(something):
"""
Test if it is an iterable other than
a string.
Returns
-------
bool
True if an iterable other than str,
False otherwise.
"""
try:
iter(something)
except TypeError:
return False
return True and not isinstance(something, str)
|
58257390972ff9887676fe3d938cfe25bb2321cf
| 78,207
|
def select_range(df, window, range_column="t", include_end=True):
"""
Select a range of data for multiple Pandas DataFrames at once.
It was designed for using it with the output of the bout methods.
Parameters
----------
df : pandas.DataFrame/list
The (list of) DataFrame to use
window : list
A 2-list containing the minimum and maximum value of the range to select
range_column : str
Optional column in the DataFrame to use for the range (usually timestamp)
include_end : bool
Optional whether to include the max value in the range
Returns
-------
list(pandas.DataFrame)
A list of DataFrames selections
"""
if not isinstance(df, list):
df = [df]
return [
dff[(dff[range_column] >= window[0]) & (dff[range_column] <= window[1])]
if include_end
else dff[(dff[range_column] >= window[0]) & (dff[range_column] < window[1])]
for dff in df
]
|
7850d2a1dc2c19b923aae1cba6f1f71dae568e0e
| 78,208
|
def loop(clip, n=None, duration=None):
"""
Returns a clip that plays the current clip in an infinite loop.
Ideal for clips coming from GIFs.
Parameters
----------
n
Number of times the clip should be played. If `None` the
the clip will loop indefinitely (i.e. with no set duration).
duration
Total duration of the clip. Can be specified instead of n.
"""
previous_duration = clip.duration
clip = clip.time_transform(
lambda t: t % previous_duration, apply_to=["mask", "audio"]
)
if n:
duration = n * previous_duration
if duration:
clip = clip.with_duration(duration)
return clip
|
60c193f6c0d8a974cef33ec80e21400bf5269096
| 78,211
|
import json
def json_response(response):
""" Convert json response """
return json.loads(response.data.decode('utf8'))
|
7908d7a3b749313ac664b83c8f89871535c521cd
| 78,213
|
def sort_idx(m, reverse=False):
"""Return the indices of m in sorted order (default: ascending order)"""
return sorted(range(len(m)), key=lambda k: m[k], reverse=reverse)
|
abaf52bd00f7f540ade2512c1c750d348e761b44
| 78,214
|
def fahr_to_celsius(temp_fahr):
"""Function to convert temperatures from fahranhiet to celsius
Parameters
--------
temp_fahr: int | float
Input temperature in fahranheit (should be a number)
Returns
------
Temperature in celsius (float)
"""
return (temp_fahr -32)/(1.8)
|
f8c98f6b80e6b235715190f7e8e73180d1227857
| 78,215
|
def remove_first_line(string):
"""
Returns the copy of string without first line (needed for descriptions which differ in one line)
:param string:
:return: copy of string.
"""
return '\n'.join(string.split('\n')[1:])
|
2cd0c57801cf4f86c53ac9a61bc5ab0874338253
| 78,218
|
def getList(listFile):
"""
Imports a text file line by line into a list.
"""
file = open(listFile)
svgFile = []
while 1:
line = file.readline()
if not line:
break
svgFile.append(line)
return svgFile
|
b00ccf82774502a837755796466f0aaf38588ebd
| 78,224
|
def argument_set_new(arg_i, arg_history):
"""
If the arguments in arg_i are not contained in arg_history -1 is returned in indicating a new branch,
otherwise the branch number is returned
"""
if len(arg_history) == 0:
return -1
else:
for c, arg_set in enumerate(arg_history):
if arg_i == arg_set:
return c
return -1
|
d62d0ebdf264094730ea9e429ca9f34df178ea09
| 78,227
|
def _read_Btw_item(data,control):
"""Read a Btw item from the start of DATA.
Returns a tuple containing the item read (as a string) and what is
left of DATA.
The order of octets is:
"B" MSOF (most significant octet first)
"b" LSOF (least significant octet first)
(as discussed in 6.4.3.3 h))
In the case of a "b" format, we reorder the octets into MSOF
"""
# We have a Btw or btw item
# If it is complex, it is double the size
if control.control[1] == "5":
size = control.size * 2
else:
size = control.size
# First off, we can extract the relevant number of octets
item = data[:size]
data = data[size:]
# And now interpret them
if control.control[0] == "b":
# LSOF form - reverse it
thing = ""
for count in range(control.size-1,-1,-1):
thing = thing + item[count]
item = thing
# So we've now got the MSOF form, regardless of the format
# As of yet, we don't do anything with it...
return (item,data)
|
7bb6b546894a644d1e454264124275f64aa77a43
| 78,235
|
from typing import Any
from typing import get_args
def is_type(tp: Any) -> bool:
"""isinstance is not enough because in py39: isinstance(list[int], type) == True"""
return isinstance(tp, type) and not get_args(tp)
|
07817aec17ce3dbacf3d96bcfb5a3d6f408eef51
| 78,240
|
import torch
def grad(x, y, model, params, criterion):
"""Compute new parameters with computation graph intact.
Arguments:
x (torch.Tensor): input tensor.
y (torch.Tensor): target tensor.
model (Warp): warped model.
params (list): list of parameters to differentiate
the loss with respect to.
criterion (fun): task loss criterion.
"""
device = next(model.parameters()).device
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
p = model(x, cache_parameters=False)
loss = criterion(p, y)
return torch.autograd.grad(loss, params, create_graph=True)
|
aaf7eb1eb4a3c3b5405d0350020c5893b1118f89
| 78,244
|
def polygonal_number(n, k):
"""
Returns the kth n-gonal number P(n, k) given by the general formula:
P(n, k) = [(n - 2)k^2 - (k - 4)n] / 2
"""
return int(((n - 2) * k ** 2 - (n - 4) * k) / (2))
|
df67d20f9b3d4bea30ffdbb390a7a863a3a1467a
| 78,248
|
def dumb_factor(x, primeset):
""" If x can be factored over the primeset, return the
set of pairs (p_i, a_i) such that x is the product
of p_i to the power of a_i.
If not, return []
"""
factors = []
for p in primeset:
exponent = 0
while x % p == 0:
exponent = exponent + 1
x = x//p
if exponent > 0:
factors.append((p,exponent))
return factors if x == 1 else []
|
d01a1faa90a62521b0e9881e2a62791b69482087
| 78,249
|
import re
def format_input(text):
"""
Formats the text for purposes of storage.
"""
text = re.sub(r"\s+", " ", text)
return text.strip()
|
0ee870734664375c842a4aba6a983f28f25b52a6
| 78,259
|
def is_eresource_callno(callno: str) -> bool:
"""
Checks if call number is for electronic resource
Args:
callno: call number string
Returns:
bool
"""
try:
norm_callno = callno.lower()
except AttributeError:
return False
if norm_callno.startswith("enypl"): # NYPL pattern
return True
elif norm_callno in ("ebook", "eaudio", "evideo"): # BPL pattern
return True
else:
return False
|
88092c0a7fbf5b95937b91687191f79fc5bc5e53
| 78,265
|
def convert_endianness(array: bytes):
"""
Switch between big-endian order and little-endian order.
Bitcoin stores bytes in little-endian byte order but we human beings are
more comfortable with big-endian one. Therefore, we convert the endianness
before showing values to users.
Note that bytes objects are immutable
"""
assert isinstance(array, bytes)
return array[::-1]
|
a7dc068b749e24742f54a41ce159572bd96f3b2c
| 78,272
|
def workerRunner(worker):
"""
Run a worker.
:return: Finished worker.
"""
return worker.run()
|
d5af901106628c6b3927fcf1f63ac92e6f9ee18b
| 78,273
|
import torch
import math
def cosine_cutoff(input: torch.Tensor, cutoff: torch.Tensor):
""" Behler-style cosine cutoff.
.. math::
f(r) = \begin{cases}
0.5 \times \left[1 + \cos\left(\frac{\pi r}{r_\text{cutoff}}\right)\right]
& r < r_\text{cutoff} \\
0 & r \geqslant r_\text{cutoff} \\
\end{cases}
Args:
cutoff (float, optional): cutoff radius.
"""
# Compute values of cutoff function
input_cut = 0.5 * (torch.cos(input * math.pi / cutoff) + 1.0)
# Remove contributions beyond the cutoff radius
input_cut *= (input < cutoff).float()
return input_cut
|
8c10baa10cd26856cfb8338c4921d11f94e131af
| 78,275
|
import torch
def get_xy(dls):
"""Grabs data as tensors from a DataLoaders instance."""
x, y = [], []
for batch in dls.train:
x.append(torch.cat(list(batch)[:-1], dim=-1) if len(batch) > 2 else batch[0])
y.append(batch[-1])
return torch.cat(x, dim=0), torch.cat(y, dim=0)
|
8380d86262f8ba25d09134da780264febd3f534e
| 78,285
|
def generate_makefile(input_dsf_file, output_dgb_file, output_trace_file, max_n, dgasm_line, dgsim_line):
"""
Generates a very simple Makefile.
"""
makefile_template = f"{output_trace_file}:{output_dgb_file}\n" \
f"\t{dgsim_line}\n\n" \
f"{output_dgb_file}:{input_dsf_file}\n" \
f"\t{dgasm_line}\n\n" \
f"html:{output_trace_file}\n" \
f"\txdg-open {output_trace_file}\n\n" \
f"clean:\n" \
f"\t rm *.dgb\n" \
f"\t rm *.html\n" \
f"\t rm *.css\n"
return makefile_template
|
e374bd727c12479af39996846fef715cee1e5147
| 78,286
|
def get_outputs(db, job_id):
"""
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param job_id:
ID of a calculation.
:returns:
A sequence of :class:`openquake.server.db.models.Output` objects
"""
return db('SELECT * FROM output WHERE oq_job_id=?x', job_id)
|
e4a60c0adf71f8bb65e82ea61b78aea6d5afa99a
| 78,290
|
def points_bounds(points):
"""Return bounding rect of 2D point list (as 4-tuple of min X, min Y,
max X, max Y)."""
min_x, min_y, max_x, max_y = (
points[0][0],
points[0][1],
points[0][0],
points[0][1],
)
for point in points[1:]:
min_x = min(min_x, point[0])
min_y = min(min_y, point[1])
max_x = max(max_x, point[0])
max_y = max(max_y, point[1])
return (min_x, min_y, max_x, max_y)
|
4c0cbd47bef32fe5d3c1787789d806016d0db4ff
| 78,291
|
def get_filename(name):
"""
greates a file name of the form test_<name>.py
:param name: the name of the test
"""
return "test_{0}.py".format(name)
|
171b8069fcf893bb85c4046cceae89340ef93925
| 78,292
|
def read_network(file):
"""
Read a (switched) Boolean network from a text file:
Line 1: number of state variables
Line 2: number of control inputs
Line 3: number of sub-networks
Line 4: transition matrix of the first sub-network (linear representation of a logical matrix)
line 5: transition matrix of the second sub-network (linear representation of a logical matrix)
...
:param file: a text file
:return: (n, m, w, Ls), where
n: number of state variables
m: number of control inputs
w: number of sub-systems
Ls: a list of transition matrices, each for one sub-system
"""
with open(file, 'r') as f:
n = int(f.readline().strip())
m = int(f.readline().strip())
w = int(f.readline().strip())
N = 2 ** n
M = 2 ** m
Ls = []
for _ in range(w):
line = f.readline().strip()
assert line, f'{w} transition matrices must be provided!'
numbers = line.split()
assert len(numbers) == M * N, f'The transition matrix must have {M * N} columns'
Ls.append([int(num) for num in numbers])
return n, m, w, Ls
|
76907cb8342e3f96b88f519cb09a2c28bc6e137e
| 78,297
|
import re
def has_parentheses(formula):
"""
判断是否还有括号
:param formula: str
:return: boolean
"""
if re.search("[()]", formula):
return True
return False
|
8f194b182d38bcd903be1a917b1947575718722f
| 78,298
|
from pathlib import Path
def get_tile_id(tile_path):
"""
Given the path to an SRTM1 or SRTM3 tile, return the ID of the tile (a string), e.g. "S36E174" for the path "bingo/S36E174.SRTMGL1.hgt.zip"
Assume that the tile ID is the first part of the file name, as is the SRTM convention.
"""
path = Path(tile_path)
return path.stem.split('.')[0]
|
1a4b07b3059915d821438ec4e5d45d93265335b6
| 78,300
|
def create_experiment_on_server(experiment, uploader):
"""
:type experiment: mytardis_models.Experiment
:type uploader: mytardis_uploader.MyTardisUploader
:return: The url path of the experiment created.
:rtype: str
"""
expt_url = uploader.create_experiment(experiment.package())
return expt_url
|
3dc7482d50be634479840a0a522e8d9acfaf8064
| 78,302
|
def get_maximum_time(note_tracks):
"""
Determines the largest value of end_time
among all notes. This is required to know
when the video should end.
"""
maximum_time = -999999.9
for t in note_tracks:
for pitch_list in t:
if pitch_list != []:
if pitch_list[-1].end_time > maximum_time:
maximum_time = pitch_list[-1].end_time
return maximum_time
|
7c6c65df9b0a99591297e7a6633e662b695027e5
| 78,305
|
def get_parentdirs(path):
"""Generate a list of all parent directories for a path.
:param path: Path to find parent directories for.
:return: List of all parent directories for the path (including the path).
"""
# Split path into a list for each level
path_split = [x + '/' for x in path.split('/')[:-1]]
# Build up all parent directories and return as a list
parentdirs = []
for path in path_split[1:]:
parentdirs.append(''.join(path_split))
path_split = path_split[:-1]
return parentdirs
|
c138b12e14ea96827bfdea7a5081f35131863fd5
| 78,313
|
import logging
def fpiterate(func, _x0, tol, maxits=100):
"""
fixed-point iteration is a method of computing fixed points of a function
"""
_xp = _x0 # previous approximation
_x = _x0
k = 0
while k < maxits and (k == 0 or abs(_x - _xp) > tol):
_xp = _x
_x = func(_x)
k += 1
logging.info('k = {:2d}, x = {:15.8e}'.format(k, _x))
logging.info('terminated after %s iterations', k)
logging.info('x = %s f(x) = %s', _x, func(_x))
return _x
|
ab04a0031b69c0ebb4fece1101eb0c73eeecf590
| 78,314
|
def norm_lon(x):
""" Normalize longitude x into range [0,360]. """
if x == 360:
return 360
else:
return x % 360
|
806ba03353b29ca26e4e469d85e739d5d416ddac
| 78,316
|
import click
def cb_bbox(ctx, param, value):
"""
Click callback to handle ``--bbox`` syntax and validation.
Parameters
----------
ctx : click.Context
Ignored.
param : click.Parameter
Ignored.
value : tuple
x_min, y_min, x_max, y_max
Raises
------
click.BadParameter
Returns
-------
tuple
(x_min, y_min, x_max, y_max)
"""
if not value:
return None
bbox = value
x_min, y_min, x_max, y_max = bbox
if (x_max < x_min) or (y_max < y_min):
raise click.BadParameter('min exceeds max for one or more dimensions: {0}'.format(' '.join(bbox)))
return bbox
|
3b4ba822ccb1d53b91618c2cb864d838c80876cb
| 78,318
|
from string import punctuation
import re
def normalize_text(text, method='str'):
"""
Parameters
----------
text : str
method : {'str', 'regex'}, default 'str'
str: cleans digits and puntuations only ('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
regex : clean digits and all special characters
Returns
-------
text : str
"""
# Conver to lower case
text = text.lower()
if method=='str':
# Remove digits
text = ''.join(c for c in text if not c.isdigit())
# Remove punctuation
text = ''.join(c for c in text if c not in punctuation)
# Remove extra spaces
text = " ".join(text.split())
elif method=='regex':
# Remove digits
text = re.sub(r'\d+', ' ', text)
# Remove all special characters
text = re.sub(r'\W+', ' ', text)
# Remove extra spaces
text = re.sub('\s\s+', ' ', text)
return text
|
6baae1a8e352b4d2ec608fe3091d3f11a20f5d5d
| 78,325
|
def hello(bot, args):
"""Salute function
Without decorator to see what happens...
Returns a string as a salute message
"""
return "Hello sir, welcome"
|
a364a100ae637fe683a9eda716cc6fa958a054e3
| 78,327
|
def listToIndex2item(L):
"""converts list to dict of list index->list item"""
d = {}
for i, item in enumerate(L):
d[i] = item
return d
|
bc350045d72e5f7d7338a433782f7a56e4f0db58
| 78,329
|
def max_sequence(arr):
"""Find the largest sum of any contiguous subarray."""
best_sum = 0 # or: float('-inf')
current_sum = 0
for x in arr:
current_sum = max(0, current_sum + x)
best_sum = max(best_sum, current_sum)
return best_sum
|
d17f69534eda79649236438df7a1576ebbea789c
| 78,330
|
def iterate_json_keys_for_value(jsonObj, key, value):
"""Return True if *jsonObj* contains a toplevel *key* set to *value*."""
for i in jsonObj:
if i[key] == value:
return True
return False
|
f43acdbfcb0ebc5bd68e39180458cf89f5e1cd04
| 78,334
|
def copy_attribute(name, src, dst):
"""
Given a name copy attribute from
source object to destination object
@param name: field name
@param src: source object
@param dst: destination object
@return: destination object
"""
if src and name in src:
dst[name] = src[name]
return dst
|
f74587764727728f9ed216b01b13cb14b04b655a
| 78,338
|
def _occupied_cols(piece):
""" Returns a list of which cols are occupied in a piece.
Ex: >>> list(map(_occupied_cols, PIECE_I))
[[0, 1, 2, 3], [2], [0, 1, 2, 3], [1]]
"""
return [i for i, row in enumerate(zip(*piece)) if any(row)]
|
c647966c9c96f783148c2d30edcb20c309f2de61
| 78,341
|
import glob
def _parse_batch_params(params):
"""Transform batch parameter strings into lists of tuples.
Parameters
----------
params : list of str
The string should have the form "key=val1,val2,val3".
Returns
-------
A generator that, for each key, yields a list of key-value tuple pairs.
"""
def maybe_glob(x):
return glob.glob(x) if glob.has_magic(x) else [x]
seen_keys = set()
for param in params:
if "=" not in param:
raise ValueError(
"param value should be formatted as 'key=value,...'")
key, value_str = param.split("=", maxsplit=1)
if key in seen_keys:
raise ValueError("Key '{}' was given more than once".format(key))
seen_keys.add(key)
yield [(key, v)
for v_unexpanded in value_str.split(",")
for v in maybe_glob(v_unexpanded)]
|
8d67d19cc6cc55b81c289d5be7a7bd93bbde5b92
| 78,344
|
def vanilla_grad_desc(para, grad_para, lr):
""" Update function for the vanilla gradient descent: w = w - learningRate * grad_w
:param para: Parameter to be updated
:param grad_para: Gradient at the parameter
:param lr: learning rate
:return:
"""
return para - lr * grad_para
|
66ecd06ed70c5c95bde2048dc001d282d6ab424c
| 78,345
|
from typing import List
def get_module_parents(full_module_name: str) -> List[str]:
"""
Returns a list module's parent packages and subpackages.
Example: package1.subpackage1.module1 -> [package1, package1.subpackage1]
"""
parts = full_module_name.split('.')
parts.pop()
parents = []
while parts:
parents.insert(0, '.'.join(parts))
parts.pop()
return parents
|
8f2c6c7ae79a999a7c12c8f9208268a47c5b207c
| 78,356
|
def __prep_importance(
df_vim_structured, s_vim_importance, style="tail", shadow="null",
num_features=50):
"""Given a DataFrame containing the structured VIM scores and a Series
containing mean VIMs, finds the n head or tail features (and the top
shadow, if it's not among the n), and returns the columns of
df_vim_structured for those features.
"""
s_sorted = s_vim_importance.copy()
s_sorted.sort_values(inplace=True)
# subset the top or bottom features
s_truncated = None
if style is 'tail':
# gives us the names of the n best features
s_truncated = s_sorted.tail(num_features)
else:
# gives us the names of the n worst features
s_truncated = s_sorted.head(num_features)
# ensure that the passed shadow feature is included
if shadow and shadow not in s_truncated:
s_truncated[shadow] = s_sorted[shadow]
return df_vim_structured[s_truncated.index]
|
b97c5b7ab29c9897b6829600a27b4bea0efcd7b8
| 78,367
|
def xor(data, key):
"""
XORs data with a given key
:param data:
:param key:
:return:
"""
return bytearray(a ^ b for a, b in zip(*map(bytearray, [data, key])))
|
490b0d441f2e7590bb55276c6bc3621b4aca1cb0
| 78,374
|
from typing import List
import math
def generate_tt(num_variables: int) -> List[List[bool]]:
"""
Generates a truth table for all boolean combinations
of the possible variables.
"""
tt = []
# the binary representation of each number in the
# range 0..(2^num_variables) gives us all
# possibilities for the truth table
# using this because it is compact and simple
for i in range(int(math.pow(2, num_variables))):
tt.append([x == "0" for x in list(f"{{0:0{num_variables}b}}".format(i))])
return tt
|
f08711dcf35bdb0e6b1df5352aec90307d88b6d5
| 78,383
|
def replace_at_idx(tup, i, val):
"""Replaces a value at index *i* of a tuple *tup* with value *val*
:param tup: tuple to be updated
:param i: index at which the value should be replaced
:type i: integer
:param val: new value at index i
:type val: value
:return: new tuple with replaced value
"""
tup_list = list(tup)
tup_list[i] = val
return tuple(tup_list)
|
db4a8bd4d36eec5a393e7f45f7059b38f8c19715
| 78,384
|
def M_from_t(t,n,M0,t0):
"""
find mean anomaly at time t
Parameters
----------
t
time to find M for
n
mean motion
M0
reference mean anomaly at time t0
t0
reference time t0
Note that M = 0 at pericentre so if t0 = pericentre passage time then M0 = 0
"""
M = M0 + (n*(t-t0))
return M
|
3f40608013c92fe6309618165ac7e991aa833daa
| 78,387
|
def read_unique_lines(data_file):
"""Loads the data from the given files as lines (unique),
in preparation for training or testing.
"""
encountered = set()
with open(data_file) as f:
for line in f:
encountered.add(line)
return list(encountered)
|
bc72e195b38bcab2d158be9c4d5da7279886a955
| 78,390
|
def node_type(node) -> str:
"""
Get the type of the node.
This is the Python equivalent of the
[`nodeType`](https://github.com/stencila/schema/blob/bd90c808d14136c8489ce8bb945b2bb6085b9356/ts/util/nodeType.ts)
function.
"""
# pylint: disable=R0911
if node is None:
return "Null"
if isinstance(node, bool):
return "Boolean"
if isinstance(node, (int, float)):
return "Number"
if isinstance(node, str):
return "Text"
if isinstance(node, (list, tuple)):
return "Array"
if isinstance(node, dict):
type_name = node.get("type")
if type_name is not None:
return type_name
return "Object"
|
4b04f4c94eaec8c9746fe7db5174b045e1f9a14b
| 78,401
|
from typing import List
def int_to_rgba(v: int) -> List[float]:
"""Get rgba (0-1) e.g. (1, 0.5, 0, 1) from integer.
>>> print(int_to_rgba(0))
[0.0, 0.0, 0.0, 0.0]
>>> print([round(x, 3) for x in int_to_rgba(100100)])
[0.0, 0.004, 0.529, 0.016]
"""
return [x / 255 for x in v.to_bytes(4, signed=True, byteorder="big")]
|
0d7ae444420d9336bce10d781dafa805fd21e39f
| 78,404
|
def determine_set_y_logscale(cfg, metadata):
"""
Determine whether to use a log scale y axis.
Parameters
----------
cfg: dict
the opened global config dictionairy, passed by ESMValTool.
metadata: dict
The metadata dictionairy for a specific model.
Returns
----------
bool:
Boolean to flag whether to plot as a log scale.
"""
set_y_logscale = True
if 'set_y_logscale' in cfg:
set_y_logscale = cfg['set_y_logscale']
if 'set_y_logscale' in metadata:
set_y_logscale = metadata['set_y_logscale']
return set_y_logscale
|
c6def09b9b7468d9e5aec073d73ab9ad29146ea5
| 78,406
|
import re
def vet_pdb_id(pdbid):
"""PDB ID must be exactly four characters long, alphanumeric, and
the first character must be an integer.
"""
if len(pdbid) < 4 or not \
pdbid.isalnum() or not \
re.match(r'^[0-9][A-Za-z0-9]{3}$', pdbid):
return False
return True
|
c661855de4852312450f6bf4502b83e77bae0acd
| 78,407
|
from datetime import datetime
def get_course_dates(course_dates_df):
"""
Get the start and end dates for the course
"""
def get_datetime_col(col_name):
"""
Get column as a datetime object
"""
return datetime.strptime(course_dates_df[col_name][0], '%Y-%m-%d')
course_start_date = get_datetime_col('CourseRunStartDate')
course_end_date = get_datetime_col('CourseRunEndDate')
return (course_start_date, course_end_date)
|
a665f172c6653e0af44c2f9b0d298c28867e98e5
| 78,413
|
from pathlib import Path
from typing import Union
from typing import Dict
from typing import Any
from typing import List
import json
def read_json_file(
src_path: Path
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
"""Reads json file and returns either python dict or python list."""
if not src_path.exists():
raise ValueError(
'Provided json file does not exist!'
)
json_string = src_path.read_text()
return json.loads(json_string)
|
dd3d5bfe8eaeb6986c41d9a542cbaab7c5ec7d49
| 78,414
|
def create_params(options, base_params, keys=None):
"""
Given a dictionary of parameters and possible values,
creates a list of the cross products of all parameter values.
Note that b has [3] as an element. The cross product is shallow
so [3] will be a value in the output.
Example:
options = { a: [1,2], b: [[3],4] }
returns: [ {a:1, b:[3]}, {a:1, b:4}, {a:2, b:[3]}, {a:2, b:4} ]
"""
if keys is None:
keys = options.keys()
if len(keys) == 0:
return [dict(base_params)]
ret = []
key = keys[0]
vals = options[key]
if not isinstance(vals, list):
vals = [vals]
for val in vals:
param = dict(base_params)
param[key] = val
ret.extend(create_params(options, param, keys[1:]))
return ret
|
075743f723d7fc81b9192730d77dd95d5e285bb6
| 78,418
|
def Sn(x,y):
""" Calculate Sn
Parameters:
-----------
'x' (numpy array): predictions
'y' (numpy array): observations
Returns:
--------
'Sn'
"""
if float(sum(x+y==2) + sum(x-y==-1))!=0:
return float(sum(x+y==2)) / float(sum(x+y==2) + sum(x-y == -1))
else:
return 0.
|
0aaf87fd778b9e1bcde710906da5862568112266
| 78,424
|
def selection_smallest_first(lst):
"""Find smallest element and put it in index 0, sort from there."""
for i in range(len(lst)):
smallestIndex = i
for j in range(i + 1, len(lst)):
if lst[j] < lst[smallestIndex]:
smallestIndex = j
if smallestIndex != i:
lst[smallestIndex],lst[i] = lst[i],lst[smallestIndex]
return lst
|
137baeb1fc8f63227028a61f22b23a56e2976495
| 78,426
|
def msb_32(val) -> int:
"""Returns the MSB of a 32 bit value."""
return (val & 0x80000000)>>31
|
177fad51198945ab7eae40a010761078108fb62f
| 78,429
|
def train_model(model, data, epochs=1000):
"""Training Routine
:param model: Model
:type model: tf.keras.Model object
:param data: Data
:type data: tuple
:param epochs: Number of Epochs, defaults to 1000
:type epochs: int, optional
:return: History of training
:rtype: dict
"""
# Data
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']
# Compile
model.compile(
loss='mse',
optimizer='adam',
metrics=['mae'],
)
# Train
history = model.fit(
x=x_train,
y=y_train,
batch_size=20,
epochs=epochs,
validation_data=(x_test, y_test),
# callbacks=[TqdmCallback()],
verbose=0,
)
return history
|
8c825d3572c4143aed5cb0edf146fc3661f724a7
| 78,430
|
def select_desired_features(full_features, desired_features, feature_dict):
"""
A function which reduces a feature set down to only the desired features specified in a list
Features must match with keys stored in the passed dictionary
"""
# print(f"{full_features.shape[1]} attributes available for selection")
feature_index_list = []
for i in desired_features:
feature_index_list.append(feature_dict[i])
new_features = full_features[:,feature_index_list]
# print(f"{new_features.shape[1]} attributes selected for classification")
return new_features
|
64720e093ceccd0035b9a7ba8d05c7e6f547c467
| 78,436
|
def getTimeString(seconds):
"""
Get a pretty time string, using hours, minutes, seconds and milliseconds as required.
:param seconds: The desired time span, given in seconds. Can be an int or a float.
:return: A string representing the desired time span, given in hours, minutes, seconds and milliseconds.
"""
units = list()
msecs = (seconds % 1) * 1000
if msecs >= 1:
units.append('{0}ms'.format(int(msecs % 60)))
units.append('{0}s'.format(int(seconds % 60)))
minutes = seconds/60
if minutes >= 1:
units.append('{0}m'.format(int(minutes % 60)))
hours = minutes/60
if hours >= 1:
units.append('{0}h'.format(int(hours % 60)))
return ' '.join(units[::-1])
|
383871f7d79e19742ea64788397bca5f13a9f3ee
| 78,438
|
def tail_swap(word_list):
"""
Creates a format for the final replacement
Picks the head and tail for each string in the list via tuple unpacking
Swaps the tails using the format created.
:param word_list: The word list with 2 strings which contain exactly 1 colon
:return: a list with the tails of each string swapped
:rtype: list
"""
# create a format for final replacement
fmt = '{}:{}'.format
# pick the head and tail for each word in the list
(head, tail), (head_2, tail_2) = (a.split(':') for a in word_list)
# return the formatted list
return [fmt(head, tail_2), fmt(head_2, tail)]
|
092508637f024fe3271d5d553104501be93f5bc0
| 78,439
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.