content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def copy_model_instance(obj):
"""
Copy Django model instance as a dictionary excluding automatically created
fields like an auto-generated sequence as a primary key or an auto-created
many-to-one reverse relation.
:param obj: Django model object
:return: copy of model instance as dictionary
"""
meta = getattr(obj, '_meta') # make pycharm happy
# dictionary of model values excluding auto created and related fields
return {f.name: getattr(obj, f.name)
for f in meta.get_fields(include_parents=False)
if not f.auto_created}
|
fb7c905c5140b5024cc4c5b650776bc95d4de9ee
| 63,380
|
import calendar
def get_day(date_obj):
"""Get the name of the day based on the date object."""
return calendar.day_name[date_obj.weekday()]
|
f872fbc1fb3166272effc2e97e7b24c8434ef4cf
| 63,382
|
def is_pkcs7(bs, block_size):
"""
Determines if a byte array is pkcs7 padded.
"""
# Length must be a multiple of the block size.
if len(bs) % block_size != 0:
return False
# Last byte cannot be greater than 15 or less than 1.
last = ord(bs[-1])
if last < 1 or last > block_size-1 or last > len(bs):
return False
# Check whether all padding is the same byte.
return len([i for i in bs[-last:] if ord(i) != last]) == 0
|
e83e6ae5563e1532d4565fd7a48dc0034ff5e52c
| 63,389
|
def update_output(n_clicks, current_team_list, current_name, original_team_list):
"""
Depending on the state, display appropriate text.
Initial message -> Name of who's been picked -> End Message -> why you still clicking message
Args:
n_clicks (int): number of times the button has been clicked
current_team_list (list): The updated team list with removed names based on button clicks
current_name (str): random name from team_list
original_team_list (list): The original set of team members (team list) chosen from the dropdown
Returns:
str: Text based on where we are in the workflow
"""
if n_clicks == 0:
return f"We've got {len(current_team_list)} people to get through today"
elif n_clicks == len(original_team_list) + 1:
return f"All done and dusted, have a lovely day!"
elif n_clicks > len(original_team_list):
return f"You're keen, we've already been!"
else:
return f"{current_name}, you're up!"
|
7178ce2966c98fdc2b9a78b273bd4dd2ea5b4ce0
| 63,390
|
def total_exs(dataset):
"""
Returns the total number of (context, question, answer) triples,
given the data read from the SQuAD json file.
"""
total = 0
for article in dataset['data']:
for para in article['paragraphs']:
total += len(para['qas'])
return total
|
46f516da61c1e11f0d9b4270a3b7cb839c166f30
| 63,393
|
from typing import Iterable
from typing import List
from typing import Counter
def compute_union(x: Iterable, y: Iterable) -> List:
"""Returns union of items in `x` and `y`, where `x` and `y` allow for
duplicates. Items must be hashable. For example:
x = ['a', 'a', 'b', 'c']
y = ['a', 'c', 'c', 'd']
then their union is ['a', 'a', 'b', 'c', 'c', 'd'].
**DOES NOT GUARANTEE ORDER OF THE OUTPUT LIST**
"""
# 1) count everything in `x` and `y`
counter_x = Counter(x)
counter_y = Counter(y)
# 2) fill in `union` with values
union = []
keys = set.union(set(x), set(y))
for key in keys:
count_x = counter_x.get(key, 0)
count_y = counter_y.get(key, 0)
union.extend([key for _ in range(max(count_x, count_y))])
return union
|
9113f2b7ed90ae05528f0f17658efc8b99bfc23c
| 63,394
|
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq):
return False
for val in seq:
if val:
return True
return False
|
69b8bf7fa83a79c34b5ed563ef9f623311be4b2c
| 63,398
|
def create_idx_to_cat(cat_to_idx):
"""Reverse the mapping of a cat_to_idx dictionary so that the internal index
numbers provided by the classifier can be mapped to the actual flower
category labels.
Args:
cat_to_idx: Dictionary mapping flower categories to classifier indexes.
returns
Dictionary mapping classifier indexes to flower categories.
"""
return {val: key for key, val in cat_to_idx.items()}
|
6839cfd9417d7969e3c74abe4c2355371fd5a644
| 63,402
|
def trunc(X, high, low):
"""truncates the value X to be within [high, low]"""
return min(high, max(X, low))
|
ecccb971bd7cca3d03162a973aeb1a75ae8cab0d
| 63,404
|
import calendar
import time
def iso2ts(date):
"""Convert ISO 8601 date to timestamp, i.e. seconds since epoch."""
if date is None: return 0
return calendar.timegm(time.strptime(date, "%Y-%m-%dT%H:%M:%SZ"))
|
7046298444a6a80ee715bff09ab6f95f4e211379
| 63,405
|
def de_itemparser(line):
"""return a dict of {OfficalName: str, Synonyms: [str,]} from a de_item
The description item is a str, always starts with the proposed official
name of the protein. Synonyms are indicated between brackets. Examples
below
'Annexin A5 (Annexin V) (Lipocortin V) (Endonexin II)'
"""
fieldnames = ["OfficalName", "Synonyms"]
fields = [e.strip(") ") for e in line.split("(")]
# if no '(', fields[1:] will be []
return dict(list(zip(fieldnames, [fields[0], fields[1:]])))
|
b5417b7d047d16462051c889117bf0b0177157a8
| 63,407
|
def minmax_normalize(array):
"""Normalize array to lie between 0 and 1.
Parameters
----------
array : np.ndarray
Array to be normalized
Returns
-------
np.ndarray
Normalized array
"""
return (array - array.min()) / (array.max() - array.min())
|
39bc7e0e568d503f9f552ef994e7daed199105ff
| 63,408
|
import re
def get_enewsno(html_string):
"""
Get TüNews E-News-No from HTML string
"""
enewsno = re.search("tun[0-9]{8}", html_string)
if enewsno:
return enewsno.group(0)
return 0
|
9341f02c620ee86ffc1ce65f15a4345f914715b9
| 63,411
|
def get_decoded_chunks(decoded: str, event_key='', array_separator_key='') -> list:
"""
Interprets a decoded event and splits it into various pieces.
"""
return decoded.replace(event_key, '').split(array_separator_key)
|
c2843f002155ff5ed6eeb27ce9a398596938ebf6
| 63,412
|
def _clean_timings(raw_timings: list, start_time: int) -> list:
"""
Converts a list of raw times into clean times.
Each raw time is divided by 5000 to convert it to seconds. Also, the time list is adjusted to the
time the fMRI began scanning.
"""
clean_timings = []
for raw_time in raw_timings:
time = (raw_time - start_time) / 5000
clean_timings.append(time)
return clean_timings
|
37eb239ae6372620c7fbacc4be1d979f408ce7e6
| 63,413
|
def add_line_breaks_to_sequence(sequence, line_length):
"""
Wraps sequences to the defined length. All resulting sequences end in a line break.
"""
if not sequence:
return '\n'
seq_with_breaks = ''
pos = 0
while pos < len(sequence):
seq_with_breaks += sequence[pos:pos+line_length] + '\n'
pos += line_length
return seq_with_breaks
|
6cb9101690b173604c0dac5f0b9300f3a6db4e1a
| 63,414
|
def get_whereclause(params):
"""Given a dictionary of params {key1: val1, key2: val2 }
return a partial query like:
WHERE key1 = val1
AND key2 = val2
...
"""
query_parts = []
first = False
for key, val in params.items():
if not first:
first = True
query_parts.append("WHERE %s = '%s'" % (key, val))
else:
query_parts.append("AND %s = '%s'" % (key, val))
return " ".join(query_parts)
|
a9d2198ae3308be862eecdb148c484e0d63ccfac
| 63,417
|
def token_to_char_offset(e, candidate_idx, token_idx):
"""Converts a token index to the char offset within the candidate."""
c = e["long_answer_candidates"][candidate_idx]
char_offset = 0
for i in range(c["start_token"], token_idx):
t = e["document_tokens"][i]
if not t["html_token"]:
token = t["token"].replace(" ", "")
char_offset += len(token) + 1 # + 1 is for the space
return char_offset
|
d378346662aa6766842d2ef9e344afe0dfe01d8a
| 63,421
|
def patch_name_parts_limit(name_str, space_replacer=None):
""" Usage: par_name = patch_name_parts_limit(name_str, <space_replacer>)
clean up name_str such that it may be decoded with
patch_name_to_dict and serve as a valid file name
Args:
name_str: string representation for case_id or class_label or file_extension
space_replacer: python str to replace spaces -
Returns:
part_name: name_str string with spaces removed, reserved characters removed
and underscores replaced with hyphens
"""
# remove spaces: substitute if valid space replacer is input
if space_replacer is not None and isinstance(space_replacer, str):
name_str = name_str.replace(' ', space_replacer)
# no spaces!
name_str = name_str.replace(' ', '')
# remove reserved characters
reserved_chars = ['/', '\\', '?', '%', '*', ':', '|', '"', '<', '>']
part_name = ''.join(c for c in name_str if not c in reserved_chars)
# replace underscore with hyphen to allow decoding of x and y location
part_name = part_name.replace('_', '-')
return part_name
|
3ad579cc15b6105cb3c7c1882775266bfa00d694
| 63,425
|
def is_multicast(ip):
"""
Tells whether the specified ip is a multicast address or not
:param ip: an IPv4 address in dotted-quad string format, for example
192.168.2.3
"""
return int(ip.split('.')[0]) in range(224, 239)
|
2da871d29b8b3e6042430fb3f776c6216e7d2aa8
| 63,426
|
import pytz
def time_str(date):
"""
By default datetimes aren't timezone aware and we need to clearly return everything in UTC
Args:
date:
Returns:
str
"""
if date.tzinfo is None:
date = date.replace(tzinfo=pytz.utc)
return date.isoformat()
|
a62338d1811eef98e4ed1e97e57ec14dac6f8eba
| 63,429
|
def neutral_mass_from_mz_charge(mz: float, charge: int) -> float:
"""
Calculate the neutral mass of an ion given its m/z and charge.
Parameters
----------
mz : float
The ion's m/z.
charge : int
The ion's charge.
Returns
-------
float
The ion's neutral mass.
"""
hydrogen_mass = 1.00794
return (mz - hydrogen_mass) * charge
|
1212fffcf7b924768cc9c50079197ed294d937b3
| 63,430
|
def lowest_common_ancestor(root, a, b):
""" lowest common ancestor in BST """
if root.data > max(a, b):
return lowest_common_ancestor(root.left, a, b)
if root.data < min(a, b):
return lowest_common_ancestor(root.right, a, b)
return root
|
a6d43274864c4f443540f864f246bdaa452f5459
| 63,433
|
import re
def remove_punc(rawtext, ignore_case=False, remove_numbers=False, sub_numbers=True,
names_list=set(), locations_list=set(), is_mhddata=False):
"""
Parameters
----------
rawtext : str
ignore_case: bool
remove_numbers: bool
Removes all numbers if True
sub_numbers: bool
Substitutes all numbers to a token -num-.
if 'remove_numbers' is True, it will have no effect.
is_mhddata: bool
True if the data is MHD
False if it's from some other dataset
Returns
-------
str
"""
# from NLTK
#ending quotes
ENDING_QUOTES = [
(re.compile(r'"'), " '' "),
(re.compile(r'(\S)(\'\')'), r'\1 \2 '),
(re.compile(r"([^' ])('[sS]|'[mM]|'[dD]|') "), r"\1 \2 "),
(re.compile(r"([^' ])('ll|'LL|'re|'RE|'ve|'VE|n't|N'T) "), r"\1 \2 "),
]
# List of contractions adapted from Robert MacIntyre's tokenizer.
CONTRACTIONS2 = [re.compile(r"(?i)\b(can)(not)\b"),
re.compile(r"(?i)\b(d)('ye)\b"),
re.compile(r"(?i)\b(gim)(me)\b"),
re.compile(r"(?i)\b(gon)(na)\b"),
re.compile(r"(?i)\b(got)(ta)\b"),
re.compile(r"(?i)\b(lem)(me)\b"),
re.compile(r"(?i)\b(mor)('n)\b"),
re.compile(r"(?i)\b(wan)(na) ")]
CONTRACTIONS3 = [re.compile(r"(?i) ('t)(is)\b"),
re.compile(r"(?i) ('t)(was)\b")]
txt = re.sub(r'[\[\{\(\<]patient name[\]\}\)\>]', ' -name- ', rawtext)
# For proper names and location.
# Couldn't use string match since it might be part of other words therefore using regex
# Takes forever!! we use dict at the end
# for lc in locations_list:
# txt = re.sub(r"\b" + re.escape(lc) + r"\b", "-location-", txt)
# for n in names_list:
# txt = re.sub(r"\b" + re.escape(n) + r"\b", "-name-", txt)
# Laugh
txt = re.sub(r'[\[\{\(\<]laugh[ter]*[\]\}\)\>]', ' -laugh- ', txt)
# Delete brackets (also contents inside the brackets)
txt = re.sub(r'[\[\{\<][^\[\{\<]*[\]\}\>]', ' ', txt)
# Remove parentheses, while leaving the contents inside.
txt = re.sub(r'[\(\)]', ' ', txt)
if remove_numbers:
# remove everything except for alpha characters and ', -, ?, !, .
txt = re.sub(r'[^\.,A-Za-z\'\-\?\! ]', ' ', txt)
else:
# remove everything except for alphanumeric characters and ', -, ?, !, .
txt = re.sub(r'[^\.,A-Za-z0-9\'\-\?\! ]', ' ', txt)
# split numbers
txt = re.sub(r"([0-9]+)", r" \1 ", txt)
if sub_numbers:
txt = re.sub(r'[0-9]+', '-num-', txt)
if is_mhddata:
# Replace non-ascii characters (for now just replace one)
txt = re.sub("\x92", "'", txt)
txt = re.sub("í", "'", txt)
# Remove all NAs
txt = re.sub(r"\bNA\b", "", txt)
if ignore_case:
txt = txt.lower()
# space out the periods and commas
txt = re.sub(r"\.", " . ", txt)
txt = re.sub(r",", " , ", txt)
# split ! and ?
txt = re.sub(r'\!', ' ! ', txt)
txt = re.sub(r'\?', ' ? ', txt)
#add extra space to make things easier
txt = " " + txt + " "
# remove dashes that are used alone -- but this might be useful later
# for now we're just removing them
txt = re.sub(r' \-\-* ', ' ', txt)
# remove -- these (consecutive dashes)
txt = re.sub(r'\-\-+', ' ', txt)
# remove dashes in the words that start with a dash or end with a dash
# (Note: words that start AND end with single dashes are meaningful tokens.)
txt = re.sub(r"\-([A-Za-z\']+)\s", r"\1 ", txt)
txt = re.sub(r"\s([A-Za-z\']+)\-", r" \1", txt)
#add extra space to make things easier
txt = " " + txt + " "
# Added these two to find a match with the pre-trained words
# txt = re.sub(r'[A-Za-z]\-$', '<PARTIALWORD>', txt) # add this? probably not at the moment.
# txt = re.sub(r'\-', ' ', txt)
txt = re.sub(r"''", "'", txt)
txt = re.sub(r"\s+'\s+", " ", txt)
for regexp, substitution in ENDING_QUOTES:
txt = regexp.sub(substitution, txt)
for regexp in CONTRACTIONS2:
txt = regexp.sub(r' \1 \2 ', txt)
for regexp in CONTRACTIONS3:
txt = regexp.sub(r' \1 \2 ', txt)
txt = re.sub(r'\s+', ' ', txt) # make multiple white spaces into 1
tokenized = txt.strip().split() # tokenized temporarily to check proper nouns
for i, w in enumerate(tokenized):
if w in names_list:
tokenized[i] = '-name-'
# considers up to bigrams. First looks up if there is a match in bigrams. if not, checks unigram
if i < len(tokenized)-1 and ' '.join(tokenized[i:i+2]) in locations_list:
tokenized[i] = '-location-'
del(tokenized[i+1])
elif w in locations_list:
tokenized[i] = '-location-'
return ' '.join(tokenized)
|
fa0c0f40f710498eaa30b8fdc70422e4724c7609
| 63,441
|
def cube(num):
"""
Check if a number is cube
:type num: number
:param num: The number to check.
>>> cube(8)
True
"""
x = num**(1 / 3)
x = int(round(x))
return bool(x**3 == num)
|
ded3dc359613d8f0a062571640b6a27fd746331a
| 63,444
|
def _reformat_host_networks(networks):
"""Reformat networks from list to dict.
The key in the dict is the value of the key 'interface'
in each network.
Example: networks = [{'interface': 'eth0', 'ip': '10.1.1.1'}]
is reformatted to {
'eth0': {'interface': 'eth0', 'ip': '10.1.1.1'}
}
Usage: The networks got from db api is a list of network,
For better parsing in json frontend, we converted the
format into dict to easy reference.
"""
network_mapping = {}
for network in networks:
if 'interface' in network:
network_mapping[network['interface']] = network
return network_mapping
|
de9bf105abb300aee041fe0c77261cc4afffaa07
| 63,447
|
def splitValues(txt, sep=",", lq='"<', rq='">'):
"""
Helper function returns list of delimited values in a string,
where delimiters in quotes are protected.
sep is string of separator
lq is string of opening quotes for strings within which separators are not recognized
rq is string of corresponding closing quotes
"""
result = []
cursor = 0
begseg = cursor
while cursor < len(txt):
if txt[cursor] in lq:
# Skip quoted or bracketed string
eq = rq[lq.index(txt[cursor])] # End quote/bracket character
cursor += 1
while cursor < len(txt) and txt[cursor] != eq:
if txt[cursor] == '\\': cursor += 1 # skip '\' quoted-pair
cursor += 1
if cursor < len(txt):
cursor += 1 # Skip closing quote/bracket
elif txt[cursor] in sep:
result.append(txt[begseg:cursor])
cursor += 1
begseg = cursor
else:
cursor += 1
# append final segment
result.append(txt[begseg:cursor])
return result
|
78e383d1a80910e9849767e992bf54c4cf72630a
| 63,448
|
def _unwrap(cdw):
"""Return list of objects within a CoreDataWrapper"""
if not cdw:
return []
values = []
for i in range(cdw.count()):
values.append(cdw.valueAtIndex_(i))
return values
|
f14b341e0894e85c81c27216fe0e88dc38bff46a
| 63,449
|
def power_modulo(a: int, b: int, n: int) -> int:
""" Computes a ^ b mod n """
result = 1
# Loop through all the binary digits of the numbers
while b != 0:
if b % 2 == 1:
# b odd
result = (result * a) % n
# result = (result * (a % n)) % n
a = (a * a) % n
# a = ((a % n) * (a % n)) % n
b //= 2
return result
|
d628c4d10f870d056a8d5527f646c82df103e10a
| 63,452
|
from itertools import product
def spiral_XYs(min_X, max_X, min_Y, max_Y):
"""
>>> spiral_XYs(1, 3, 1, 3)
[(2, 2), (2, 3), (1, 3), (1, 2), (1, 1), (2, 1), (3, 1), (3, 2), (3, 3)]
>>> spiral_XYs(1, 2, 1, 2)
[(1, 2), (1, 1), (2, 1), (2, 2)]
>>> spiral_XYs(1, 1, 1, 1)
[(1, 1)]
>>> spiral_XYs(1, 4, 1, 1)
[(1, 1), (2, 1), (3, 1), (4, 1)]
>>> spiral_XYs(1, 1, 1, 3)
[(1, 1), (1, 2), (1, 3)]
"""
if (min_X > max_X) or (min_Y > max_Y):
raise ValueError
XYs = list(product(range(min_X, max_X + 1), range(min_Y, max_Y + 1)))
res_reverse = []
while XYs:
# Go down right side
app = [(X, Y) for X, Y in XYs if X == max_X and min_Y <= Y <= max_Y]
XYs = [(X, Y) for X, Y in XYs if not (X, Y) in app]
app = sorted(app, key=lambda x: x[1], reverse=True)
res_reverse += app
max_X -= 1
# Go left lower side
app = [(X, Y) for X, Y in XYs if Y == min_Y and min_X <= X <= max_X]
XYs = [(X, Y) for X, Y in XYs if not (X, Y) in app]
app = sorted(app, key=lambda x: x[0], reverse=True)
res_reverse += app
min_Y += 1
# Go up left side
app = [(X, Y) for X, Y in XYs if X == min_X and min_Y <= Y <= max_Y]
XYs = [(X, Y) for X, Y in XYs if not (X, Y) in app]
app = sorted(app, key=lambda x: x[1])
res_reverse += app
min_X += 1
# Go right upper side
app = [(X, Y) for X, Y in XYs if Y == max_Y and min_X <= X <= max_X]
XYs = [(X, Y) for X, Y in XYs if not (X, Y) in app]
app = sorted(app, key=lambda x: x[0])
res_reverse += app
max_Y -= 1
res = list(reversed(res_reverse))
return res
|
5e794b783c7443fe6d497732611417a323cdb75d
| 63,453
|
import torch
def _loss_fn(logits, labels, weights):
"""
Cross-entropy loss
"""
loss = torch.nn.CrossEntropyLoss(reduction='none')(logits, labels)
return torch.sum(loss * weights)
|
346328075001415a49f905491101105564ff476f
| 63,455
|
def is_partitioned_line_blank(indent: str, text: str) -> bool:
"""Determine whether an indent-partitioned line is blank.
Args:
indent: The leading indent of a line. May be empty.
text: Text following the leading indent. May be empty.
Returns:
True if no text follows the indent.
"""
return len(text) == 0
|
a1b9b5ed5a1852bd3cf1dc6b550214c38f34a856
| 63,457
|
def sample_ionosphere(sim,sigma,l):
"""Generate an ionosphere, I(sigma,l).
sim : SimulatedTec object (non reentrant)
sigma : float log_electron variance
l : float length scale
Returns a the model as ndarray
"""
sim.generate_model(sigma, l)
model = sim.model
return model
|
db96cfc7d4603d203fce7a71b5a806ccceba3dfd
| 63,463
|
def get_insert_indices(my_timestamps, existing_timestamps):
"""
Given new timestamps and an existing series of timestamps, find the indices
overlap so that new data can be inserted into the middle of an existing
dataset
"""
existing_timestep = existing_timestamps[1] - existing_timestamps[0]
my_timestep = my_timestamps[1] - my_timestamps[0]
# make sure the time delta is ok
if existing_timestep != my_timestep:
raise Exception("Existing dataset has different timestep (mine=%d, existing=%d)"
% (my_timestep, existing_timestep))
my_offset = (my_timestamps[0] - existing_timestamps[0]) // existing_timestep
my_end = my_offset + len(my_timestamps)
return my_offset, my_end
|
d71198c04228d0d63669d3ce1115d2ad08b08d7c
| 63,472
|
def search_min_diff_element(arr: list[int], val: int) -> int:
"""Binary search to find number that has min difference with ‘val’
Complexity:
Time: Θ(logn) <=> O(logn) & Ω(logn)
Space: O(1)
Args:
arr: array of numbers sorted in ascending order
val: element in arr for which to find companion element with the min difference
Returns: element in arr that has min difference with ‘val’
Examples:
>>> search_min_diff_element([4, 6, 10],7)
6
>>> search_min_diff_element([4, 6, 10],4)
4
>>> search_min_diff_element([1, 3, 8, 10, 15],12)
10
>>> search_min_diff_element([4, 6, 10],17)
10
>>> search_min_diff_element([1],0)
1
"""
## EDGE CASES ##
if val < arr[0]: # smallest item has min difference with ‘val’
return arr[0]
if val > arr[-1]: # largest item has min difference with ‘val’
return arr[-1]
start, end = 0, len(arr) - 1
while start <= end:
mid = (start + end) // 2
if val < arr[mid]:
end = mid - 1
elif val > arr[mid]:
start = mid + 1
else:
return arr[mid] # diff == 0 min_diff found
else:
# arr[start] & arr[end] are immediate successor & predecessor, respectively, of `val`
# See Also: pytudes/_2021/miscellany/searching/binary_search.py:18
# Since edge cases take care of bounds checks, find the min diff of arr[start], arr[end].
compute_diff = lambda other_val: abs(val - other_val)
return min(arr[start], arr[end], key=compute_diff)
|
79c15b141948e146e86db0f7d2d6d8f120a900c2
| 63,473
|
def create(collection, region):
"""
Creates a Sentinel 1 quality mosaic.
Args:
collection: Sentinel 1 ee.ImageCollection with at least 'VV', 'VH', and 'quality' bands.
region: The region to clip the mosaic to.
Returns:
A clipped ee.Image with all bands included in provided collection, and 'ratio_VV_VH'
- the ratio between VV and VH.
"""
mosaic = collection.qualityMosaic('quality')
mosaic = mosaic \
.addBands([
mosaic.select('VV').subtract(mosaic.select('VH')).rename('ratio_VV_VH')
])
return mosaic \
.float() \
.clip(region)
|
5a2beaba566ea7a79481f81d01ae251b43ded181
| 63,482
|
def prepare_dataset(argv) -> int:
"""
Считывает данные из файлов и сохранить объединенные данные в новый файл, где:
строки имеют вид '<ФИО>: <данные о хобби>'
:param path_users_file: путь до файла, содержащий ФИО пользователей, разделенных запятой по строке
:param path_hobby_file: путь до файла, содержащий хобби, разделенные запятой по строке
:param users_hobby_file: путь до результирующего файла, содержащий пары вида '<ФИО>: <данные о хобби>'
:return: статус выполнения (0 - успешно, 1 - ошибка)
"""
if len(argv) == 1:
# ничего не передали в параметрах - берем имена файлов по умолчанию
path_users_file, path_hobby_file, users_hobby_file = 'users.csv', 'hobby.csv', 'users_hobby.txt'
elif not len(argv) == 4:
print(f'Передано неверное количество параметров!')
return 1
else:
program, path_users_file, path_hobby_file, users_hobby_file, *_ = argv
users_file = open(path_users_file, 'r', encoding='utf-8')
hobby_file = open(path_hobby_file, 'r', encoding='utf-8')
with open(users_hobby_file, 'a', encoding='utf-8') as users_hobby:
while True:
users_line = users_file.readline().strip()
hobby_line = hobby_file.readline().strip()
if not (users_line or hobby_line):
# Оба файла закончились - заканчиваем писать в файл
# Тут же можно прописать контроли на неравенство к-ва строк в файлах
# с соответствующим поведением (как в прошлом здании)
break
users_hobby.write(f'{users_line}: {hobby_line}\n')
users_file.close()
hobby_file.close()
return 0
|
d4742f1e56c853c211224d11a98294bd7401cb54
| 63,490
|
def split_text(text, n=100, character=" "):
"""Split the text every ``n``-th occurrence of ``character``"""
text = text.split(character)
return [character.join(text[i: i + n]).strip() for i in range(0, len(text), n)]
|
0efc59020a8fed15960346649dc8de861a64eb58
| 63,491
|
def load_directions(filename):
"""Load directions from a file."""
with open(filename) as f:
return [direction.strip().strip(',') for direction
in f.readline().strip().split()]
|
0b2fea3e6f67a0ec517601b14b1719dcb4c600f6
| 63,492
|
def to_string(data):
"""Convert data to string.
Works for both Python2 & Python3. """
return '%s' % data
|
c22fe85a57740b0e772a2b9c57df39c7d473a265
| 63,496
|
def format_url(host, resource):
"""
Returns a usable URL out of a host and a resource to fetch.
"""
return host.rstrip("/") + "/" + resource.lstrip("/").rstrip("/")
|
46c57603dd2d6c6b50b893476c1b1fa7f0f30799
| 63,499
|
def get_platform_and_table(view_name: str, connection: str, sql_table_name: str):
"""
This will depend on what database connections you use in Looker
For SpotHero, we had two database connections in Looker: "redshift_test" (a redshift database) and "presto" (a presto database)
Presto supports querying across multiple catalogs, so we infer which underlying database presto is using based on the presto catalog name
For SpotHero, we have 3 catalogs in presto: "redshift", "hive", and "hive_emr"
"""
if connection == "redshift_test":
platform = "redshift"
table_name = sql_table_name
return platform, table_name
elif connection == "presto":
parts = sql_table_name.split(".")
catalog = parts[0]
if catalog == "hive":
platform = "hive"
elif catalog == "hive_emr":
platform = "hive_emr"
elif catalog == "redshift":
platform = "redshift"
else:
# Looker lets you exclude a catalog and use a configured default, the default we have configured is to use hive_emr
if sql_table_name.count(".") != 1:
raise Exception("Unknown catalog for sql_table_name: {sql_table_name} for view_name: {view_name}")
platform = "hive_emr"
return platform, sql_table_name
table_name = ".".join(parts[1::])
return platform, table_name
else:
raise Exception(f"Could not find a platform for looker view with connection: {connection}")
|
569f13d5ef8b8f5b4f81e693b61c078452a40e97
| 63,507
|
def test_setting_mixin(mixin, **attrs):
"""Create a testable version of the setting mixin.
The setting mixin passes through kwargs to an expected `super`
object. When that object doesn't exist it causes errors. This
function takes the mixin in question and adds that `super` object
so that the mixin can be tested.
"""
# The __init__ method for the parent object, so that it will accept
# kwargs and store them so they can be tested.
def __init__(self, **kwargs):
self.kwargs = kwargs
# Create the parent object for testing the setting mixin and return
# the subclass of the mixin and the parent.
parent = type('TestSettingMixin', (), {'__init__': __init__})
return type('SettingMixin', (mixin, parent), attrs)
|
3aeabc21e31c57001693abda253381d651f6d7d8
| 63,509
|
import zlib
def dehex_and_decompress(value):
"""
Decompress and dehex PV value.
Args:
value: Value to translate.
Returns:
Dehexed value.
"""
try:
# If it comes as bytes then cast to string
value = value.decode('utf-8')
except AttributeError:
pass
return zlib.decompress(bytes.fromhex(value)).decode("utf-8")
|
b8462ca695d9405340189aa0cc2affc51faa830a
| 63,511
|
from functools import reduce
def combine_game_stats(games):
"""
Combines a list of games into one big player sequence containing game
level statistics.
This can be used, for example, to get GamePlayerStats objects corresponding
to statistics across an entire week, some number of weeks or an entire
season.
"""
return reduce(lambda ps1, ps2: ps1 + ps2,
[g.players for g in games if g is not None])
|
5912fe1725e7571cc2a4040b14eaec789e7fe07c
| 63,513
|
def invert_mapping(kvp):
"""Inverts the mapping given by a dictionary.
:param kvp: mapping to be inverted
:returns: inverted mapping
:rtype: dictionary
"""
return {v: k for k, v in list(kvp.items())}
|
62c16608b83d9a10fe30930add87b8d73ba5e1cd
| 63,516
|
import torch
def expand_tens(tens, nb_new, device="cpu"):
"""Expands a tensor to include scores for new videos
tens (tensor): a detached tensor
nb_new (int): number of parameters to add
device (str): device used (cpu/gpu)
Returns:
(tensor): expanded tensor requiring gradients
"""
expanded = torch.cat([tens, torch.zeros(nb_new, device=device)])
expanded.requires_grad = True
return expanded
|
2c2ef13f0148a20e8d8f40d325e2cfb2049f4383
| 63,518
|
def generateAtomKey(mol, atom):
"""Return key representation of atom."""
key = str(mol) + ";" + str(atom.chain) + ";" + str(atom.resi) + ";" + str(atom.name)
return key
|
ed2f2ea697da0f7adfcf70d51e324ead3b13a636
| 63,520
|
def get_repo(repo):
"""Parse owner and name of the repository from repository's fullname
Args:
repo (str): Full name of the repository (owner/name format)
Returns:
tuple: (owner, name)
"""
repo_owner, repo_name = repo.split("/")
return repo_owner, repo_name
|
0c40344678be24ce0c5b324c39c1f9c20d012a85
| 63,521
|
def above_threshold(direction: str, correlation_value: float, threshold: float):
"""
Compare correlation values with a threshold
"""
if direction == 'correlation':
return correlation_value > threshold
if direction == 'anti-correlation':
return correlation_value < threshold
if direction == 'both':
return abs(correlation_value) > abs(threshold)
return correlation_value > threshold
|
8c2d608775e5bfcf683d90a54ebb68df225bddf5
| 63,525
|
from typing import Iterable
from typing import Union
from typing import Any
def get(
iterable: Iterable,
**kwargs
) -> Union[None, Any]:
"""
Get something within a list according to your set attrs.
Parameters
----------
iterable: List[Any]
The thing you want to iterate through
*args: Any
The args you want to check for.
Examples
--------
Simple Example:
```python
data = get(iterable, id=1234, title='This')
```
"""
check = lambda entry: all((getattr(entry, key) == item for key, item in kwargs.items()))
for iteration in iterable:
if check(iteration):
return iteration
return None
|
a9e6fd0cbce7b9e54a229311bb18633f287f620c
| 63,527
|
def get_dotted_val_in_dict(d, keys):
"""Searches dict d for element in keys.
Args:
d (dict): Dictionary to search
keys (str): String containing element to search for
Returns:
Value found at the specified element if found, else None
Examples:
Search for the value of foo['bar'] in {'foo': {'bar': 1}}
>>> get_dotted_val_in_dict({'foo': {'bar': 1}}, 'foo.bar')
1
Search for the value of foo['baz'] in {'foo': {'bar': 1}}
>>> get_dotted_val_in_dict({'foo': {'bar': 1}}, 'foo.baz')
"""
if "." in keys:
key, rest = keys.split(".", 1)
dval = d.get(key, {})
if isinstance(dval, dict):
return get_dotted_val_in_dict(dval, rest)
else:
if d.get(keys):
return d[keys]
|
37c388308f1727ef7832cf0e52b24488d37f6ee9
| 63,528
|
import io
def _fp_from_image(image):
"""Returns a BytesIO fp from the given image."""
fp = io.BytesIO()
image.save(fp, format='png')
fp.seek(0)
return fp
|
4c2d38906d3a44053219e2783f39c7df2b651b99
| 63,532
|
import string
import random
def id_generator(size=10, chars=string.digits):
"""Generates a 10 string ID of uppercase characters and integers
unless given the arguments 1) the length 2) characters to choose from
in generation. h/t to the internet.
"""
return ''.join(random.choice(chars) for _ in range(size))
|
a0e0ce2195226aee146d16b30164e73ae9f515b8
| 63,535
|
def bdev_nvme_remove_error_injection(client, name, opc, cmd_type):
"""Remove error injection
Args:
name: Name of the operating NVMe controller
opc: Opcode of the NVMe command
cmd_type: Type of NVMe command. Valid values are: admin, io
Returns:
True on success, RPC error otherwise
"""
params = {'name': name,
'opc': opc,
'cmd_type': cmd_type}
return client.call('bdev_nvme_remove_error_injection', params)
|
7cd8c89fd9b561b1d6ce4585ba984ea808803e11
| 63,536
|
import base64
def _pem_encode_csr(csr):
"""Encodes the CSR in PEM format"""
b64_csr = base64.b64encode(csr).decode('ascii')
b64rn_csr = '\r\n'.join(b64_csr[pos:pos+64] for pos in range(0, len(b64_csr), 64))
pem_csr = '-----BEGIN NEW CERTIFICATE REQUEST-----\r\n'
pem_csr += b64rn_csr
pem_csr += '\r\n-----END NEW CERTIFICATE REQUEST-----'
return pem_csr
|
d19eea98db25ac03e4a0a57db3219ce6125d5e98
| 63,538
|
import fnmatch
def match_filter(rfilter, title):
"""
Match title against list of rfilters
"""
if not rfilter:
return True
for f in rfilter:
if fnmatch.fnmatch(title, f):
return True
return False
|
575333daf93f6a7211375b52ca9945a0eca7b30d
| 63,542
|
import re
def extract_coords(some_str: str) -> list:
"""Utility function to extract coordinates from a (WKT) string
:param some_str: A string providing coordinates
:type some_str: str
:return: A list with the coordinates
:rtype: list
"""
regex = r"[+|-]\d+(?:\.\d*)?"
matches = re.findall(regex, some_str, re.MULTILINE)
return matches
|
6f5847dd31bf23647598fc63bf0cc50785b35776
| 63,547
|
def bin_compensate(bin_arr):
"""
Compensate a binary number in string with zero till its length being a multiple of 8
:param bin_arr: str
:return: str
"""
return '0' * (8 - len(bin_arr) % 8) + bin_arr
|
efb08cc92acadd78f916cefa6b54af07fea0a034
| 63,551
|
def _massage_school_name(school_name: str) -> str:
"""Given a school name, massage the text for various peculiarities.
* Replace the trailing dot of school names like Boise St.
* Handle "Tourney Mode" where school names have their seed in the name.
"""
# Replace the trailing dot in `Boise St.` so right-justified text looks better.
# ... trust me, it makes a difference.
school_name = school_name.replace(".", "")
# Who knew! During the NCAA tourney season KenPom puts the tourney seed into
# the school name. So, while text_items[1] will be "Gonzaga" most of the year,
# during (and after, till start of next season) text_items[1] will be
# "Gonzaga 1" since they're a #1 seed.
# convert "NC State 1" to ["NC", "State", "1"]
name_candidate = school_name.split(" ")
try:
# Is the last element a number?
int(name_candidate[-1])
# Convert back to a string, minus the trailing number
school_name = " ".join(name_candidate[:-1])
except ValueError:
pass
return school_name
|
8372b85970752e35c2f040d53c6a09fe5219a602
| 63,554
|
def duplicate(reader):
"""
duplicate the quora qestion pairs since there are 2 questions in a sample
Input: reader, which yield (question1, question2, label)
Output: reader, which yield (question1, question2, label) and yield (question2, question1, label)
"""
def duplicated_reader():
for data in reader():
(q1, q2, label) = data
yield (q1, q2, label)
yield (q2, q1, label)
return duplicated_reader
|
9e598394b113d7903901931859600ee4e81952dd
| 63,556
|
def is_weakreferable(object_):
"""
Returns whether the given object is weakreferable.
Parameters
----------
object_ : `Any`
The object to check.
Returns
-------
is_weakreferable : `bool`
"""
slots = getattr(type(object_), '__slots__', None)
if (slots is not None) and ('__weakref__' in slots):
return True
if hasattr(object_, '__dict__'):
return True
return False
|
c5b4705184ef81b9d96f4a3647386d8604ef9dc3
| 63,561
|
def get_daily_rate(target_count, cycle_minutes):
"""Get daily rate of emails sent."""
cycle_days = cycle_minutes / (60 * 24)
return target_count / cycle_days
|
e58c20ef53885032b89fab1fa2205b4a046a50a7
| 63,563
|
def parse_logfile_string(s):
"""
Parses a logfile string according to 10_Logfile_challenge.ipynb
Parameters:
s : str
logfile string
Returns:
dictionary : dict
containing "params", "names", "data"
"""
# split the input string on "\n" new line
lines = s.split("\n")
# create a look-up table of sections and line numbers
idxs = dict()
for lineNo, line in enumerate(lines):
if line in ['measurements', "header"]:
idxs[line] = lineNo
idxs["names"] = idxs["measurements"] + 1
idxs["params_begin"] = idxs["header"] + 1
idxs["params_end"] = idxs["measurements"] - 1
idxs["data"] = idxs["names"] + 1
# parse the column
names = lines[idxs["names"]].split(",")
# parse the params_lines list(str) into params dict{param: value}
params = dict()
for line in lines[idxs["params_begin"] : idxs["params_end"]]:
key, value = line.split(",")
params[key] = value
# converts str to float incl. "Ohms" removal
def string_to_float(s):
idx = s.find("Ohms")
if idx > 0:
number = s.split(" ")[0]
prefix = s[idx-1]
return float(number) * {" ": 1, "m": 0.001}[prefix]
return float(s)
# parse data_lines list(str) into data list(list(floats))
data = list()
for data_line in lines[idxs["data"] :]:
row = list()
for item in data_line.split(","):
row.append(string_to_float(item))
data.append(row)
return {"params": params, "names": names, "data":data}
|
8dffa2fb7a827066b6ba579df06f551ea84af847
| 63,568
|
from typing import OrderedDict
def ascii_encode_dict(item):
"""
Support method to ensure that JSON is converted to ascii since unicode
identifiers, in particular, can cause problems
"""
if isinstance(item, dict):
return OrderedDict(
(ascii_encode_dict(key), ascii_encode_dict(item[key]))
for key in sorted(item.keys()))
elif isinstance(item, list):
return [ascii_encode_dict(element) for element in item]
elif isinstance(item, str):
return item.encode('ascii')
else:
return item
|
6130e78ccddc8123e1cfcc85309791cbf88540ad
| 63,569
|
def construct_existor_expr(attrs):
"""Construct a exists or LDAP search expression.
:param attrs: List of attribute on which we want to create the search
expression.
:return: A string representing the expression, if attrs is empty an
empty string is returned
"""
expr = ""
if len(attrs) > 0:
expr = "(|"
for att in attrs:
expr = "%s(%s=*)"%(expr,att)
expr = "%s)"%expr
return expr
|
0f03f67778af11f004cb6a419fc0d7e626ac8721
| 63,574
|
def build_kd_tree(catalog, depth=0):
"""
Recursive algorithm to build a k-d tree from catalog
K-d tree is stored in a nested dictionary.
Parameters
----------
catalog: 2D numpy array. Ex: catalog[i] = [RA, DEC, ID]. See cross_matching_tools.load_bss, load_cosmos
depth: int, for recursion.
"""
n = len(catalog)
if n <= 0:
return None
coord = depth % 2
sorted_data = catalog[catalog[:,coord].argsort()] # will sort numpy array by column instead of by axis
return {
'star': sorted_data[n // 2],
'left': build_kd_tree(sorted_data[:n // 2], depth + 1),
'right': build_kd_tree(sorted_data[n // 2 + 1:], depth + 1)
}
|
d7a89f51adf32fe3c29255bcb5b361116f5c0d4b
| 63,575
|
def sort_cards_by_type(card):
""" Sorting key function for card types (as added by add_card_types) """
if len(card.types) > 0:
return card.types[0]
else:
return ''
|
ff9ac52e21807fc0dfc0b238f3ff4f428df5111f
| 63,586
|
from typing import List
def check_polygon(nums: List) -> bool:
"""
Takes list of possible side lengths and determines whether a
two-dimensional polygon with such side lengths can exist.
Returns a boolean value for the < comparison
of the largest side length with sum of the rest.
Wiki: https://en.wikipedia.org/wiki/Triangle_inequality
>>> check_polygon([6, 10, 5])
True
>>> check_polygon([3, 7, 13, 2])
False
>>> check_polygon([])
Traceback (most recent call last):
...
ValueError: List is invalid
"""
if not nums:
raise ValueError("List is invalid")
nums.sort()
return nums.pop() < sum(nums)
|
37beb62faf06f84bd69a51b60d98f9398b597cd1
| 63,588
|
def guess_age_group(swimmer_age: int) -> tuple[int, int]:
"""Guess the age group from a swimmer's age.
Args:
swimmer_age (int): The swimmer's age.
Returns:
tuple[int, int]: The age group in terms of (age_min, age_max).
"""
if swimmer_age <= 8:
# Probably 8&U
return 0, 8
elif 9 <= swimmer_age <= 10:
# Probably 9-10
return 9, 10
elif 11 <= swimmer_age <= 12:
# Probably 11-12
return 11, 12
elif 13 <= swimmer_age <= 14:
# Probably 13-14
return 13, 14
else:
# Probably open
return 0, 109
|
8f44ad7217ab2d4d273860e732bc68cb2218838c
| 63,593
|
import pytz
def _convert_to_utc(date, default_tz):
"""Convert date to UTC, using default_tz if no time zone is set."""
if date is None:
return date
if date.tzinfo is None:
date = default_tz.localize(date)
date.astimezone(pytz.utc)
# strip timezone info
return date.replace(tzinfo=None)
|
c80dd03d1838114be7d998690445d2cbc3cbfa5c
| 63,596
|
import math
def round_away_from_zero(number, digits=0):
"""
Round numbers using the 'away from zero' strategy as opposed to the
'Banker's rounding strategy.' The strategy refers to how we round when
a number is half way between two numbers. eg. 0.5, 1.5, etc. In python 2
positive numbers in this category would be rounded up and negative numbers
would be rounded down. ie. away from zero. In python 3 numbers round
towards even. So 0.5 would round to 0 but 1.5 would round to 2.
See here for more on floating point rounding strategies:
https://en.wikipedia.org/wiki/IEEE_754#Rounding_rules
We want to continue to round away from zero so that student grades remain
consistent and don't suddenly change.
"""
p = 10.0 ** digits
if number >= 0:
return float(math.floor((number * p) + 0.5)) / p
else:
return float(math.ceil((number * p) - 0.5)) / p
|
61fe5e9ca086add8dcbf72e16bc5d59902949ce7
| 63,598
|
def clean_vbs(s):
"""
Strip out the leading "VBS" that comes from reading string data from the scope using LeCroy VBS commands.
This function splits the returned string and discards the first item before the space (strips "VBS")
"""
s = s.split(' ', 1)[1]
return s
|
d0cbff1ffb85c6e6c46be6797744aede7abe05f5
| 63,599
|
def mtx_smoothing(matrix, epsilon):
""" Adding epsilon into all matrix entries."""
total = 0
for w in matrix.keys():
for c in matrix[w].keys():
matrix[w][c] += epsilon
total += matrix[w][c]
return matrix, total
|
b0c0cd22d976432bcff83cfbb0b3da31a1f98a87
| 63,600
|
def gettext_getfunc( lang ):
"""
Returns a function used to translate to a specific catalog.
"""
# Note: you would get the gettext catalog here and install it in the
# closure.
def tr( s ):
# Note: we do not really translate here, we just prepend the
# language, but you get the idea.
return '[%s] %s' % (lang, s)
return tr
|
c303505812581a8101511bc23abfe8be8c390088
| 63,603
|
import copy
def correct_Q(action, state, reward, old_Q, next_Q):
"""
Produce a corrected Q(s,a) estimate according to:
Q(s,a) = R + gamma*Q(s+1,a+1)
"""
gamma = 0.5 # weights importance of future reward
new_Q = copy.copy(old_Q)
new_Q[action] = reward + gamma*next_Q[action] # action indexes are 0,1,2, corresponding to position in Q-function
return new_Q
|
10cf7bedb4d8bdd67b63eb9bc199ca60b9fd119e
| 63,607
|
def plcfreq(self, spec="", sectbeg="", sectend="", **kwargs):
"""Plots the frequency response for the given CYCSPEC specification.
APDL Command: PLCFREQ
Parameters
----------
spec
CYCSPEC specification number (ordered 1 to N in the order input;
use CYCSPEC,LIST to view the current list order). Defaults to 1.
sectbeg
Beginning sector number to plot. Defaults to 1.
sectend
Ending sector number to plot. Defaults to the total number of
sectors expanded (/CYCEXPAND).
Notes
-----
Following a cyclic mode-superposition harmonic analysis, this command
plots the result item given by a CYCSPEC specification versus the
harmonic frequency, one curve for each of the specified sectors. A
CYCCALC command must have been issued prior to this command.
"""
command = f"PLCFREQ,{spec},{sectbeg},{sectend}"
return self.run(command, **kwargs)
|
c54d1a13481d9b8e57dbd266bd55cb7cf362ab7b
| 63,610
|
def m2ft(meters):
"""meters -> feet"""
return meters/0.3048
|
ccb442ec9ff2b045d00333aaf10b082d3fcdf597
| 63,611
|
def get_tablename(string_identifier):
"""
Irrespective of the type of naming convetion used i.e.
3 part "schema.table.columnname" or 4 part "db.schema.table.columnname"
this method expects the second part from the right to always be
table name
"""
processed = string_identifier.split(".")
parts = len(processed)
if parts < 2:
return processed[-1]
else:
return processed[-2]
|
7037a51a1b02f1b7baf68cd2c5eaf61a52bc2c2a
| 63,615
|
def map_ip_address_info(address, nmap_store):
"""Map IP address details to the database record."""
nmap_store["ip_address"] = address.get("addr")
nmap_store["ip_type"] = address.get('addrtype')
return nmap_store
|
bfb98f18f9728b31a6040a62deec1047d4102971
| 63,618
|
import math
def distance(orig, dest):
"""Calculate the distance between 2 coordinates"""
radius = 6371 # Earth radius in meters
lat1, lon1 = orig
lat2, lon2 = dest
haversine = lambda x: math.sin(x / 2) ** 2
dphi = math.radians(lat2 - lat1)
dlambda = math.radians(lon2 - lon1)
phi1, phi2 = math.radians(lat1), math.radians(lat2)
axr = haversine(dphi) + math.cos(phi1) * math.cos(phi2) * haversine(dlambda)
return 2 * radius * math.atan2(math.sqrt(axr), math.sqrt(1 - axr))
|
416d4297fb42de7a55fed07d9a2a5a9bb2187677
| 63,624
|
def rsa_blind(message, randint, exponent, modulus):
"""
Return message RSA-blinded with integer randint for a keypair
with the provided public exponent and modulus.
"""
return (message * pow(randint, exponent, modulus)) % modulus
|
d05996261aa55f5ba1f98bc0c3589a4f491d5b1e
| 63,628
|
from typing import Union
from typing import Type
from typing import Callable
def fqn(obj: Union[Type, Callable]) -> str:
"""Return the fully qualified name of an object, str() as fallback."""
module = obj.__module__ + '.' if hasattr(obj, '__module__') else ''
name = obj.__qualname__ if hasattr(obj, '__qualname__') else str(obj)
return module + name
|
07b059c695de919e596bd6b6cd98faaaacf04359
| 63,631
|
def draw_line(a1, a2, b, out_grid, colour, vert):
"""
Helper function for solve_5c2c9af4, used by draw_concentric_squares
It draws a horizontal line between two columns with the row specified in a 2d array
For an effectively vertical line, transpose array to draw in horizontal direction then transpose back to get vertical line
"""
if vert: # vertical line
out_grid = out_grid.T
for i in range(a1, a2+1):
out_grid[b][i] = colour
out_grid = out_grid.T
else: # horizontal line
for i in range(a1, a2+1):
out_grid[b][i] = colour
return out_grid
|
998bf8afeb398e0236f47916105dff1c253a0fb8
| 63,632
|
def str_to_list(value):
"""
Converts a string value to a list, otherwise just keep it as it is.
@param value: The value to convert to a list
@return: [value] if value is a string, otherwise returns just value
"""
if isinstance(value, str):
return [value]
else:
return value
|
10ad40423a979ee4795841daa32b2e62d57fa69f
| 63,635
|
def sqliterow_to_dict(row) -> dict:
"""Convert sqlite3.Row instance to dict"""
return dict(zip(row.keys(), tuple(row)))
|
5e4353a7c1ea20229068085f126657ce631b1143
| 63,638
|
import torch
def gather_positions(sequence, positions):
"""Gathers the vectors at the specific positions over a minibatch.
Args:
sequence: A [batch_size, seq_length] or
[batch_size, seq_length, depth] tensor of values
positions: A [batch_size, n_positions] tensor of indices
Returns: A [batch_size, n_positions] or
[batch_size, n_positions, depth] tensor of the values at the indices
"""
shape = sequence.size()
depth_dimension = (len(shape) == 3)
if depth_dimension:
B, L, D = shape
else:
B, L = shape
D = 1
position_shift = torch.unsqueeze(L * torch.arange(B, device=positions.device), -1)
flat_positions = torch.reshape(positions + position_shift, [-1])
flat_sequence = torch.reshape(sequence, [B * L, D])
gathered = flat_sequence[flat_positions]
if depth_dimension:
return torch.reshape(gathered, [B, -1, D])
else:
return torch.reshape(gathered, [B, -1])
|
308c12080a49956d223f0c1da8f3a43c25ed8b3f
| 63,640
|
import time, requests
from datetime import datetime
def checkExonoraTor(ip,posix_time):
"""Check if an IP was a TOR exit node at a particular time by querying ExonoraTor.
Sends the request via https and reads the content returned for a string
indicating that the IP was in fact an exit node.
Parameters:
ip: The IP address being examined.
posix_time: A POSIX/UNIX timestamp (seconds from epoch)
Returns:
True if the response contains 'Result is positive', false otherwise.
"""
date = datetime.fromtimestamp(posix_time).strftime('%Y-%m-%d')
url = 'https://exonerator.torproject.org/?ip=' + str(ip) + '×tamp=' + date + '&lang=en'
response = requests.get(url).content
if str(response).find('Result is positive') > 0:
return True
elif str(response).find('Result is negative') > 0:
return False
print('Could not determine if {} is an exit node.'.format(ip))
raise UserWarning
|
e90413013e5f9f631dbc238de468a18a3f1fd238
| 63,641
|
import uuid
def uuid_from_string(uuid_str):
"""Returns a uuid object for the given uuid string; hyphens are ignored.
arguments:
uuid_str (string): the hexadecimal representation of the 128 bit uuid integer;
hyphens are ignored
returns:
uuid.UUID object
notes:
if a uuid.UUID object is passed by accident, it is returned;
if the string starts with an underscore, the underscore is skipped (to cater for a fesapi quirk);
any tail beyond the uuid string is ignored
"""
if uuid_str is None:
return None
if isinstance(uuid_str, uuid.UUID):
return uuid_str # resilience to accidentally passing a uuid object
try:
if uuid_str[0] == '_': # tolerate one of the fesapi quirks
if len(uuid_str) < 37:
return None
return uuid.UUID(uuid_str[1:37])
else:
if len(uuid_str) < 36:
return None
return uuid.UUID(uuid_str[:36])
except Exception:
# could log or raise an error or warning?
return None
|
a5d4ba647f8bafd97f3be0971d6d3025757d6aa2
| 63,642
|
def LoadVersions(filepath):
"""Load version data from specified filepath into a dictionary.
Arguments:
filepath: path to the file which will be loaded.
Returns:
A dictionary of KEY:VALUE pairs.
"""
versions = {}
version_lines = open(filepath, 'r').readlines()
for line_num, line in enumerate(version_lines, start=1):
line = line.strip()
if line.startswith('#'):
continue
if line == '':
continue
if '=' not in line:
raise RuntimeError('Expecting KEY=VALUE in line %d:\n\t>>%s<<' %
(line_num, line))
key, val = line.split('=', 1)
versions[key] = val
return versions
|
1d7a3571fca001b5ca1ddc1981d7cbd7295a11ec
| 63,645
|
import re
def _sanitize_platform(platform_name):
"""Platform names must only be alphanumeric with underscores"""
return re.sub('[^a-z0-9_]', '_', platform_name.lower())
|
7624117e0a5f3fad9d13c2ed47f72ec2f0349790
| 63,646
|
def time_to_int(time):
"""Convert a timestamp into an integer representation."""
hour = str(time.hour)
minutes = str(time.minute)
if len(hour)==1:
hour ='0'+hour
if len(minutes)==1:
minutes = '0'+minutes
return int(hour+minutes)
|
a3c7713fa37a1c40eb0168bd4ac97d6223495cc2
| 63,647
|
import json
def file_to_json(mapfile):
""" Given a filename string pointing to a JSON mapping, returns a
dictionary representation of the JSON file
"""
if isinstance(mapfile, str):
with open(mapfile, 'r') as filehandle:
return json.load(filehandle)
else:
return json.load(mapfile)
|
3c7a8c78d99b00a6d2e5d1d39d9c2f15310090d8
| 63,649
|
def get_twindb_config_dir(client, container_id):
"""Read hostconfig of a container and return directory on a host server
that is mounted as /etc/twindb in the container
:param client: Docker client class instance
:type client: APIClient
:param container_id: container id. can be something like c870459a6724 or
container name like builder_xtrabackup
:return: directory on the host machine
:rtype: str
"""
api = client.api
host_config = api.inspect_container(container_id)["HostConfig"]
binds = host_config["Binds"]
for bind_pair in binds:
print(bind_pair)
bind = bind_pair.split(":")
host_dir = bind[0]
guest_dir = bind[1]
if guest_dir == "/etc/twindb":
return host_dir
raise RuntimeError("Could not find binding for /etc/twindb")
|
fb7659bb86a93b15dd0046c6f7cbb59d02c6eece
| 63,650
|
def trainable_parameters(net):
"""Returns a list of all trainable parameters in a model."""
return [p for p in net.parameters() if p.requires_grad]
|
99775c73e58cc0b5deaf5de11ac3865269fef3cc
| 63,652
|
def remove_outliers(data, bounds):
""" Returns indices of outliers in a numpy array
:param data: numpy array
:param bounds: bounds, in which numpy array values are accepted
:return: list of indices that need to be removed
"""
lower, upper = bounds
indices_to_delete = []
for i in range(len(data)):
if not lower < data[i][1] < upper:
indices_to_delete.append(i)
return indices_to_delete
|
d1e572c1131e602a39823f3a4a6cd510b0d19e0b
| 63,654
|
def categorise(functions, unique=False):
"""categorise(functions, unique=False)(sequence) - Split up a sequence according to functions.
:: ([a -> bool], bool) -> [a] -> [[a]]
functions: A list of predicates, one for each list you want as output.
sequence: The sequence you want to categorise.
Returns a tuple of lists, one list per function, plus a final list
for items that weren't captured into any of the other lists.
Each item from the original sequence can appear in more than one list,
if more than one of the functions return True for it.
>>> even = lambda x: x % 2 == 0
>>> multiple_of_3 = lambda x : x % 3 == 0
>>> nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> categorise([even, multiple_of_3])(nums)
[[2, 4, 6, 8, 10], [3, 6, 9], [1, 5, 7]]
>>> categorise([even, multiple_of_3], unique=True)(nums)
[[2, 4, 6, 8, 10], [3, 9], [1, 5, 7]]
>>> categorise([multiple_of_3, even], unique=True)(nums)
[[3, 6, 9], [2, 4, 8, 10], [1, 5, 7]]
"""
def categorise_functions(sequence):
out_lists = [list() for i in range(len(functions) + 1)]
for item in sequence:
caught = False
for idx, fun in enumerate(functions):
if fun(item):
out_lists[idx].append(item)
caught = True
if unique:
break
if not caught:
out_lists[-1].append(item)
return out_lists
return categorise_functions
|
8c5a2d883e094da458858f063fa2127309b50e7b
| 63,655
|
def fortran_int(s, blank_value = 0):
"""Returns float of a string written by Fortran.
Its behaviour is different from the float() function in the following ways:
- a blank string will return the specified blank value (default zero)
- embedded spaces are ignored
If any other errors are encountered, None is returned."""
try: return int(s)
except ValueError:
s = s.strip()
if not s: return blank_value
else:
try:
s = s.replace(' ', '')
return int(s)
except: return None
|
5ebbd2098e17f8b67eee33997c1c70e388a46432
| 63,658
|
def calcEccentricityEarthOrbit( t ):
""" Calculate the eccentricity of earth's orbit (unitless) """
e = 0.016708634 - t * ( 0.000042037 + 0.0000001267 * t)
return e
|
223825b8422dd350de43a0e945dde32dacb0ce3c
| 63,659
|
def italics(msg: str):
"""Format to italics markdown text"""
return f'*{msg}*'
|
a303d99bdf2d114081f8e3b9c72cb5be2396bfab
| 63,661
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.