content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def interpolate_position_embeddings(model, layer, param):
"""
Fine-tuning at a different resolution than that which a model was
pretrained at requires interpolating the learned position embeddings.
"""
if (
hasattr(model.trunk, "interpolate_position_embedding")
and layer.shape != param.shape
):
interp = model.trunk.interpolate_position_embedding
if callable(interp):
try:
param = interp(param)
except BaseException:
raise RuntimeError("Unable to interpolate position embeddings")
return param
|
8c8c9c11015b93473a62899baf8301ccd4cdaad0
| 60,380
|
def is_oppo_dispossessed(event_list, team):
"""Returns whether an opponent is disposessed"""
disposessed = False
for e in event_list[:1]:
if e.type_id == 50 and e.team != team:
disposessed = True
return disposessed
|
24b39dd984e660fdad565576f865c5995fd83bac
| 60,382
|
import math
def _norm_pdf(x): # pragma: no cover
"""
Returns the probability density function value of a standard normal
Gaussian distribtion.
"""
return math.exp(-x**2/2) / math.sqrt(2 * math.pi)
|
e468e1b2ec674a062b9f4b3c8af4a9e2aa3169ef
| 60,384
|
def growing_plant(upSpeed, downSpeed, desiredHeight) -> int:
"""
Each day a plant is growing by upSpeed meters. Each night
that plant's height decreases by downSpeed meters due to the
lack of sun heat. Initially, plant is 0 meters tall. We plant
the seed at the beginning of a day. We want to know when the
height of the plant will reach a certain level.
:param upSpeed:
:param downSpeed:
:param desiredHeight:
:return:
"""
height = 0
days = 0
while height <= desiredHeight:
height += upSpeed
days += 1
if height >= desiredHeight:
return days
height -= downSpeed
return days
|
106b50e11709d75fe5ef71e0d01292133a5866cf
| 60,387
|
def parse_key_value_list(kv_string_list, error_fmt, error_func):
"""Parse a list of strings like ``KEY=VALUE`` into a dictionary.
:param kv_string_list: Parse a list of strings like ``KEY=VALUE`` into a
dictionary.
:type kv_string_list: [str]
:param error_fmt: Format string accepting one ``%s`` argument which is the
malformed (i.e. not ``KEY=VALUE``) string
:type error_fmt: str
:param error_func: Function to call when a malformed string is encountered.
:type error_func: function(str)
"""
ret = {}
for value in kv_string_list:
try:
k, v = value.split('=', 1)
ret[k] = v
except ValueError:
error_func(error_fmt % (value,))
return ret
|
0aa2df08d75efbc5981ffa3f92d815f6cdd84816
| 60,388
|
def concat_rfc_lines(lines):
"""
Given a list of lines where a same RFC is described on multiple lines, concat
the lines describing the same RFC.
"""
rfc_lines = []
current_rfc = ''
for line in lines:
if line.startswith('RFC'):
rfc_lines.append(current_rfc) # End of previous RFC, append it to list.
current_rfc = line # Get beginning of new rfc.
else:
current_rfc += line
return rfc_lines
|
fd7c10e0bb68e1391d5e95a466b1b6e80a14ed85
| 60,395
|
def get_attention_map(self):
"""Returns the current attention map."""
return self.medcam_dict['current_attention_map']
|
9cedd8b6170f2ff9587fa531205eaba61b35a40e
| 60,402
|
def ancestry(mention):
""" Compute the ancestry of a mention.
We follow the definition of the ancestry by Durrett and Klein (2013). For
more information, have a look at their paper:
Greg Durrett and Dan Klein. Easy Victories and Uphill Battles in
Coreference Resolution. In Proceedings of EMNLP 2013.
http://anthology.aclweb.org/D/D13/D13-1203.pdf
Args:
mention (Mention): A mention.
Returns:
The tuple ('ancestry', ANCESTRY), where ANCESTRY is the ancestry of
the mention.
"""
return "ancestry", mention.attributes["ancestry"]
|
1661d27f7788d3b671041658ef157989560215b7
| 60,404
|
import math
def haversine(lat1: float, lat2: float, lon1: float, lon2: float) -> float:
"""Calculate distance between two points on earth."""
earth_radius = 6371e3
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
delta_phi = math.radians(lat2 - lat1)
delta_lam = math.radians(lon2 - lon1)
a = (math.sin(delta_phi / 2) * math.sin(delta_phi / 2)
+ (math.cos(phi1) * math.cos(phi2)
* math.sin(delta_lam / 2) * math.sin(delta_lam / 2)))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return earth_radius * c
|
8d2579bd33b65b9b0db2e9079c5bfd309b9fc6f9
| 60,412
|
def note_value_to_string(list_of_dict_keys_time):
"""
Convert note arrays (e.g. array([36, 48, 60]) to strings (e.g. '36,48,60')
Arguments:
==========
- list_of_dict_keys_time : list
List of dict of timesteps and note played
A list with len of batch_song (16), each element is a dict
Each dict has keys (timestep numbers: 30,31,35,...) vs.
values (note arrays, e.g. array([36, 48, 60])
Returns
=======
- list_of_dict_time_notestr: list
List of dict of timesteps and note played
A list with len of batch_song (16), each element is a dict
Each dict has keys (timestep numbers: 30,31,35,...) vs.
values (note strings, e.g. '36,48,60')
"""
list_of_dict_time_notestr = []
for song_dict in list_of_dict_keys_time:
dict_time_notestr = {}
for time in song_dict:
note_str = ','.join(str(a) for a in song_dict[time])
dict_time_notestr[time] = note_str
list_of_dict_time_notestr.append(dict_time_notestr)
return list_of_dict_time_notestr
|
70698811c8a715001f86dfc188e8f41e363d6192
| 60,413
|
import torch
def deepim_boxes(
ren_boxes,
ren_centers_2d,
obs_boxes=None,
lamb=1.4,
imHW=(480, 640),
outHW=(480, 640),
clamp=False,
):
"""
Args:
ren_boxes: N x 4
ren_centers_2d: Nx2, rendered object center is the crop center
obs_boxes: N x 4, if None, only use the rendered boxes/centers to determine the crop region
lamb: enlarge the scale of cropped region
imH (int):
imW (int):
Returns:
crop_boxes (Tensor): N x 4, either the common region from obs/ren or just obs
resize_ratios (Tensor): Nx2, resize ratio of (w,h), actually the same in w,h because we keep the aspect ratio
"""
ren_x1, ren_y1, ren_x2, ren_y2 = (ren_boxes[:, i] for i in range(4)) # (N,)
ren_cx = ren_centers_2d[:, 0] # (N,)
ren_cy = ren_centers_2d[:, 1] # (N,)
outH, outW = outHW
aspect_ratio = outW / outH # 4/3 or 1
if obs_boxes is not None:
obs_x1, obs_y1, obs_x2, obs_y2 = (obs_boxes[:, i] for i in range(4)) # (N,)
xdists = torch.stack(
[
ren_cx - obs_x1,
ren_cx - ren_x1,
obs_x2 - ren_cx,
ren_x2 - ren_cx,
],
dim=1,
).abs()
ydists = torch.stack(
[
ren_cy - obs_y1,
ren_cy - ren_y1,
obs_y2 - ren_cy,
ren_y2 - ren_cy,
],
dim=1,
).abs()
else:
xdists = torch.stack([ren_cx - ren_x1, ren_x2 - ren_cx], dim=1).abs()
ydists = torch.stack([ren_cy - ren_y1, ren_y2 - ren_cy], dim=1).abs()
xdist = xdists.max(dim=1)[0] # (N,)
ydist = ydists.max(dim=1)[0]
crop_h = torch.max(xdist / aspect_ratio, ydist).clamp(min=1) * 2 * lamb # (N,)
crop_w = crop_h * aspect_ratio # (N,)
x1, y1, x2, y2 = (
ren_cx - crop_w / 2,
ren_cy - crop_h / 2,
ren_cx + crop_w / 2,
ren_cy + crop_h / 2,
)
boxes = torch.stack([x1, y1, x2, y2], dim=1)
assert not clamp
if clamp:
imH, imW = imHW
boxes[:, [0, 2]] = torch.clamp(boxes[:, [0, 2]], 0, imW - 1)
boxes[:, [1, 3]] = torch.clamp(boxes[:, [1, 3]], 0, imH - 1)
resize_ratios = torch.stack([outW / crop_w, outH / crop_h], dim=1)
return boxes, resize_ratios
|
ee02929b1a67ea763b262c840cbc670bda0fa4c9
| 60,414
|
def SkipLastNItems(iterable, n):
"""
Generator yielding all but the final n items of a finite stream.
SkipLastNItems(iterable, n) -> iterator
iterable -- a sequence, iterator, or some object which supports
iteration, yielding items of any type.
n -- an integer or None
"""
def SLNI(iterable, n):
items = list(iterable)[:-n]
while items:
yield items.pop(0)
if n and n > 0:
iterable = SLNI(iterable, n)
return iterable
|
c8fce1748ee23c554c12bf89cba3c59c0991afb5
| 60,418
|
def cpec_equimolarity(list_seq):
"""
Takes in PCR product lengths and PCR product concentrations,
and returns the relative volumes of PCR product that is added to a CPEC reaction.
Parameters:
- list_seq: (list) a list of PCR products and their attributes in namedtuples:
- name: (str) name given to a particular PCR product of interest
- dna_conc: (float) in units g/µL
- seq_length: (int) number of base **pairs**.
Example namedtuple:
A1 = Sequence(name="A1", dna_conc=42.3*10**-9, seq_length=1600)
Returns:
- rel_vol: (dict) a dictionary keyed by part name. The numbers are relative to the most concentrated piece.
"""
d_fragment_conc = {} #dictionary of calculated concentrations of DNA sequences per ul
relvol = {}
for seq in list_seq:
#Calculate the concentration of fragments of a particular sequence in solution
fragment_conc = (float(seq.dna_conc)/(1.62*10**-21))/float(seq.seq_length)
d_fragment_conc[str(seq.name)] = fragment_conc
for seq_name, fragment_conc in d_fragment_conc.items():
volume = float(max(d_fragment_conc.values()))/float(fragment_conc)
relvol[seq_name] = volume
return relvol
|
a948837597ba511af5db1afd2dedc40c8ac266da
| 60,422
|
import base64
def image_decoder_b64(encoded_image: str) -> bytes:
"""
Decode image from a Weaviate format image.
Parameters
----------
encoded_image : str
The encoded image.
Returns
-------
bytes
Decoded image as a binary string.
"""
return base64.b64decode(encoded_image.encode('utf-8'))
|
d9fb93ac16d47df3c95c709fd3a953185d5aaaf4
| 60,423
|
def exact_match(s1: str, s2: str) -> bool:
"""Test if two strings are exact matches.
Parameters
----------
s1: string
Left side string of the comparison.
s2: string
Right side string of the comparison.
Returns
-------
bool
"""
return s1 == s2
|
d6e3420e6571f24bc32b83c1155af0f6ef0d2b46
| 60,426
|
def intattr(d, name):
"""Return attribute as an integer, or None."""
if name in d:
v = d[name].strip()
return int(v)
else:
return None
|
54e54e165faec3403bae9017fbd73e4ef6388b29
| 60,427
|
def adder(a, b):
"""
Return a added to b.
>>> adder(1, 3)
3
>>> adder('a', 'b')
'ab'
>>> adder(1, 'b')
Traceback (most recent call last):
TypeError: unsupported operand type(s) for +: 'int' and 'str'
"""
return a + b
|
5c8751136048dbc07e32abb17c1f98149f3c490c
| 60,428
|
def polygon2points(p):
"""
convert a polygon to a sequence of points for DS documents
:param p: shapely.geometry.Polygon
returns a string representing the set of points
"""
return ",".join(list("%s,%s"%(x,y) for x,y in p.exterior.coords))
|
e2fa507eed3879682293b81a9c021678c8aeeb5d
| 60,429
|
def strip_url_fields(blob):
"""
Utility function to strip out the "_url" fields returned from GitHub, since
they aren't really useful for the purposes of Security Monkey, and add in
bloat to the record.
This will recursively remove them from nested dictionaries.
:param blob:
:return:
"""
keys_to_delete = []
if isinstance(blob, list):
for item in blob:
strip_url_fields(item)
if not isinstance(blob, dict):
return blob
for k in blob:
# Is the field a dictionary or list of dicts?
if isinstance(blob[k], dict) or isinstance(blob[k], list):
strip_url_fields(blob[k])
if "_url" in k:
keys_to_delete.append(k)
# Delete them:
for k in keys_to_delete:
del blob[k]
return blob
|
bb4fb9fe5053ef1425298c72534049b4d31e6385
| 60,431
|
def restruct_nearby_place(places: list) -> list:
"""Process data for frontend
Args:
places: A place nearby data from google map api.
Returns:
context: A place data that place-list page needed.
Data struct:
[
{
# Essential key
'place_name': <name>,
'place_id': <place_id>,
'photo_ref': [<photo_ref],
'types': [],
# other...
}
. . .
]
"""
context = []
for place in places:
init_place = {
'place_name': None,
'place_id': None,
'photo_ref': [],
'types': [],
}
if 'photos' in place:
# Place have an image
photo_ref = place['photos'][0]['photo_reference']
init_place['photo_ref'].append(photo_ref)
else:
# Place don't have an image
continue
init_place['place_name'] = place['name']
init_place['place_id'] = place['place_id']
init_place['types'] = place['types']
context.append(init_place)
return context
|
b1a5369112a5182fd7a9397f751840b8e14c3335
| 60,436
|
import inspect
def input_list(f):
"""Return list of function inputs"""
return inspect.getfullargspec(f).args
|
2c6ac4f86b0d9eeb05b503c7ad061b2d129c7b55
| 60,437
|
import hashlib
def md5(data):
"""
Get the MD5 hash as a string.
"""
return hashlib.md5(data).hexdigest()
|
4453958100cdc4dc399ea7df38c3816872fb4574
| 60,440
|
import re
def process_input(string):
"""
The json files are written in the following format:
{'key': '"value"'}
This function removes the initial and ending quotes and any '\n' in value.
Example:
Input: '"value1\nvalue2"'
Output: 'value value2'
"""
if type(string) != str:
string = ""
if string:
if string[0] == '"':
string = string[1:]
if string[-1] == '"':
string = string[:-1]
string = re.sub(' +', ' ', string) # removes more than one white space
return string.replace("\\n", " ")
|
919417c195aa4c2c91779d2941a7e1c1f3edf74e
| 60,441
|
def publish(
results_bucket,
results_prefix,
results_id,
public_bucket,
public_key,
s3_client,
):
"""Copies Athena results to the public bucket for client use."""
return s3_client.copy_object(
CopySource=f"{results_bucket}/{results_prefix}/{results_id}.csv",
Bucket=public_bucket,
Key=public_key,
ACL="public-read",
)
|
3ee6880196ad17c7c886b126ae46ba4d094d93ab
| 60,449
|
def solution(A):
"""
A function that given a non-empty array A containing an odd number (N) of elements - all integers, and each element of the array can be paired with another element that has the same value, except for one element that is left unpaired, returns the value of the unpaired element.
For example, given array A such that:
A[0] = 9 A[1] = 3 A[2] = 9
A[3] = 3 A[4] = 9 A[5] = 7
A[6] = 9
the function should return 7
"""
# Define a dictionary to keep value-count pairs
value_dict = {}
# Define a variable to store the value with an odd count
odd_value = None
# Loop through A taking note of the current element's count
for el in A:
if el in value_dict:
value_dict[el] += 1
else:
value_dict[el] = 1
odd_value = el if value_dict[el] % 2 == 1 else odd_value
return odd_value
|
54fbef0954b5f08fae7494eedbfec49539d40253
| 60,453
|
def _remove_lines_starting_with(string, start_char):
"""
Utility function that removes line starting with the given character in the
provided str.
:param data: the str to remove lines.
:param start_char: the character to look for at the begining of a line.
:returns: the string without the lines starting with start_char.
"""
data = ""
for line in string.split('\n'):
if not line.startswith(start_char):
data += line + '\n'
return data
|
59092b26601fe2b1e0038c21b1fc827dc8ce0fb8
| 60,458
|
import warnings
def title_case(sentence):
"""Convert a string to title case.
Parameters
-----------
sentence : string
string to be converted to title case
Returns
----------
title_case_sentence : string
String converted to title case
Example
----------
>>> title_case('ThIS iS a StrInG to BE ConVerTed.')
'This Is A String To Be Converted.'
"""
#check that input is string
if not isinstance(sentence, str):
raise TypeError('Invalid input %s - Input type must be type string' % (sentence))
if sentence=='':
warnings.warn('Input %s is empty' % (sentence))
sent = sentence.split()
title_case_sentence = ''
for word_ind in range(len(sent)):
for char_ind in range(len(sent[word_ind])):
if char_ind == 0:
title_case_sentence += sent[word_ind][char_ind].upper()
else:
title_case_sentence += sent[word_ind][char_ind].lower()
if word_ind != len(sent) - 1:
title_case_sentence += ' '
return title_case_sentence
|
92a9c98a7de8e8554357f4b285ebc9b828c33361
| 60,459
|
def stringifyStyle(style):
"""Convert a style dict back to a string"""
return ';'.join(['{}:{}'.format(key, value) for key, value in style.items()])
|
70e7e97809b321fa7b9f1edaf2a750e1cde6948d
| 60,461
|
from typing import Sequence
from typing import TypeGuard
def check_sequence_str(
container: Sequence[object],
) -> TypeGuard[Sequence[str]]:
"""Check all objects in the container is of type str."""
return all(isinstance(o, str) for o in container)
|
51e5024044384a3967c0d6e24d46e33ca48e02c4
| 60,468
|
def flatten_combine_lists(*args):
"""
Flattens and combines list of lists.
"""
return [p for l in args for p in l]
|
6f5bcfd29d041192effae3c6c1f52571bfc2fe57
| 60,480
|
import logging
def __init_logging(api_config: dict) -> logging.Logger:
"""
Create a logging.Logger with it format set to config["logs"]["log_format"] f exist, else default.
:param api_config: api config dict
:return: logger initialized
"""
try:
logging.basicConfig(level=logging.INFO, format=api_config["logs"]["log_format"])
except KeyError:
logging.basicConfig(level=logging.INFO)
return logging.getLogger(__name__)
|
3a115e13db68e0bbe0d855e791bba3c3408d7473
| 60,483
|
from typing import List
import random
def random_state(width: int, height: int) -> List[List[int]]:
"""Generates a board with all the cells randomly dead (0) or alive (1).
Args:
width: The width of the board to be created.
height: The height of the board to be created.
Returns:
A list of lists representing the board in a 2D space.
"""
return [[int(random.random() * 2) for _ in range(width)] for _ in range(height)]
|
6aea8cc9aa6df5b963a04ed4a1a2c0389ded7555
| 60,487
|
def arelle_parse_value(d):
"""Decodes an arelle string as a python type (float, int or str)"""
if not isinstance(d, str): # already decoded.
return d
try:
return int(d.replace(',', ''))
except ValueError:
pass
try:
return float(d.replace(",", ""))
except ValueError:
pass
return d
|
c2d61973ccba351b3b7e801fe39a8eb53061e8db
| 60,491
|
def get_key_in_parse_from_config_key(config_key):
"""
Example:
Arguments:
config_key: "a-b-c"
Returns:
"c"
"""
return config_key.split("-")[-1]
|
d93eac6f0c110ad1535bf27d88433f2acfe15127
| 60,495
|
def getWordsUntilLength(t, maxLength):
"""take words until maxLength is reached
>>> getWordsUntilLength('this is a test', 60)
u'this is a test'
>>> getWordsUntilLength('this is a test', 7)
u'this is'
>>> getWordsUntilLength('this is a test', 2)
u'this'
"""
t = t.replace(',', '')
t = t.replace('.', '')
T = t.split()
while T:
t = ' '.join(T)
if len(t) <= maxLength:
return t
T.pop()
else:
# return first word
return t
|
39f1d2aec466db8abcea2cd7bd04ed4973dbda17
| 60,497
|
from typing import List
def to_dms(degrees: float) -> List[float]:
"""Convert decimal degrees to degrees, minutes, seconds"""
minutes = (degrees - int(degrees)) * 60
seconds = (minutes - int(minutes)) * 60
return [int(degrees), int(minutes), seconds]
|
45e3eef678edfa408536b892bd5114e49e4a53e5
| 60,498
|
import re
def make_circle_name(sid, rev):
"""Extract a succinct label based on sid.
If there are numbers to be extracted, returns the first group of number.
Otherwise, the first letter is returned.
If sid is in rev, then '-' gets appended to the label.
Args:
sid (str): seqid
rev (set[str]): Set of seqids that are reversed
Returns:
str: Single letter label for the sid
"""
in_reverse = sid in rev
sid = sid.rsplit("_", 1)[-1]
si = re.findall(r"\d+", sid)
if si:
si = str(int(si[0]))
else:
si = sid[0].upper()
if in_reverse:
si += "-"
return si
|
58cd19d5f1637807580bae56d9e94c1c5b672fa0
| 60,499
|
def find_max(root):
"""Finds node with maximum value in the tree
Args:
root(BinaryTreeNode): Root node
Returns:
BinaryTreeNode: contains maximum value data
"""
if (root is None) or (root.rchild is None):
return root
return find_max(root.rchild)
|
ede4c293fc89d07d44560a00cb1d70183e969c67
| 60,503
|
import re
def unpack_package_def(package_definition):
"""
Create a valid github URL from a given package definition.
:param str package_definition: String of the form
<username>/<repositor>:<branchname> defining the source of a package
:returns: tuple containing (username, repository, branch) where branch
is set to `None` for strings of the form <username>/<repository>
:rtype: tuple
"""
return (re.split(r"[\/\:]", package_definition) + [None])[:3]
|
bc36dba9dc163c7dbb0b29d8db22f765371248c5
| 60,504
|
def form_diff_table_comparison_value(val):
"""
Function for converting the given value to a suitable UI value for presentation in the diff table
on the admin forms for update requests.
:param val: the raw value to be converted to a display value
:return:
"""
if val is None:
return ""
if isinstance(val, list) and len(val) == 0:
return ""
if isinstance(val, list):
dvals = []
for v in val:
dvals.append(form_diff_table_comparison_value(v))
return ", ".join(dvals)
else:
if val is True or (isinstance(val, str) and val.lower() == "true"):
return "Yes"
elif val is False or (isinstance(val, str) and val.lower() == "false"):
return "No"
return val
|
444d8963f4a6d1e076c86d6735966726e380ce6b
| 60,506
|
def find_max_item_support(pattern, supports):
"""
Returns support of item with maximum support among items in pattern.
pattern: List. list of items in pattern.
supports: Dict. item -> count dict
"""
max_support = -1
for item in pattern:
max_support = max(max_support, supports[item])
return max_support
|
c925ec553ab1214e9ce95167885b749c6238b67b
| 60,508
|
import functools
def record_rule(func):
""" A meta wrapper for table normalization rules.
.. note:: Record rules are applied after all value rules have been applied
to a record. They take in a full record as an implicit parameter and
are expected to return the normalized record back.
:param callable func: The normalization rule
:returns: The wrapped normalization rule
:rtype: callable
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.record_rules.add(func)
self.rules.append((func, args, kwargs,))
return self
return wrapper
|
4e7fa2337e939614713a703a88d9a94f12d2ad6e
| 60,511
|
def is_report(post_site_id):
"""
Checks if a post is a report
:param post_site_id: Report to check
:return: Boolean stating if it is a report
"""
if post_site_id is None:
return False
return True
|
7e932b5523a298755041b4bfc2dd675ae6b908b8
| 60,513
|
def generate_instructions(instruction_info):
"""Generates an instruction string from a dictionary of instruction info given.
:params instruction_info: Dictionary
:returns: String of instructions
"""
return f"""Give {instruction_info['amount_per_dose']}
{'tablets' if instruction_info['form'] == 'tab' else 'ml'},
{instruction_info['frequency_day']}.
(every {instruction_info['frequency_hrs']} hrs)
for {instruction_info['duration']} days."""
|
7f641c679033f923b52ebb06d7e897e2e9d4775d
| 60,516
|
import pathlib
def _stringify_path(path):
"""
Convert *path* to a string or unicode path if possible.
"""
if isinstance(path, str):
return path
# checking whether path implements the filesystem protocol
try:
return path.__fspath__() # new in python 3.6
except AttributeError:
# fallback pathlib ckeck for earlier python versions than 3.6
if isinstance(path, pathlib.Path):
return str(path)
raise TypeError("not a path-like object")
|
d4d06454c355f0e28629a286f15191b1125e3229
| 60,519
|
import base64
import json
def lambda_handler(event, context):
"""Appends a timestamp to the kinesis firehose stream data."""
output = []
for record in event['records']:
payload = base64.b64decode(record['data']).decode('utf-8')
data = json.loads(payload)
# add timestamp to the record
data["timestamp"] = record['approximateArrivalTimestamp'] # add timestamp to record
new_payload = json.dumps(data)
output_record = {
'recordId': record['recordId'],
'result': 'Ok',
'data': base64.b64encode(new_payload.encode('utf-8')).decode('utf-8')
}
output.append(output_record)
return {'records': output}
|
f922a02e485f03fcc3fd19ddad96c7b2b6c9c3b3
| 60,522
|
def last_page(doc):
""" Returns last page no in the original Blurb doc. """
pages = doc.xpath('.//section/page')
return int(pages[-1].get('number'))
|
b7086011bf0f005b09640285c50424f80ec299c0
| 60,524
|
def flipBits ( binVal ):
"""
This function will convert the given binary string to a binary string with the
bits flipped.
Parameters
----------
binVal:
An binary string without the leading '0b'
Returns
-------
flippedBinVal:
A binary string of flipped bits without the leading '0b'
"""
flippedBinVal = ''.join( '1' if val == '0' else '0' for val in binVal )
return flippedBinVal
|
49d390113d5162121dd9957588a407e7d964d916
| 60,526
|
def is_atom_in_str(spc_str):
""" Checks a MESS-formatted species data string to see
if the species is, or contains, an Atom species definition.
:param: spc_str: MESS species string
:type spc_str: str
rtype: bool
"""
return bool('Atom' in spc_str)
|
c9d21d326fa6f739e084f32678141f9b03efb44a
| 60,532
|
def check_duplicate_index(df, verbose=True):
""" checks for duplicates in the index of a dataframe """
dupes = df[df.index.duplicated()]
num = dupes.shape[0]
print('{} index duplicates'.format(num))
if verbose == True:
print('duplicates are:')
print(dupes.head(3))
return df[df.index.duplicated(keep=False)]
|
cc41b7b30c6699259e03c458f092f4ad0fa1892d
| 60,534
|
def pluralize(apitools_collection_guess):
"""Pluralize krm_kind and handle common atypical pluralization cases."""
ending_plurals = [('Policy', 'Policies'), ('Proxy', 'Proxies'),
('Repository', 'Repositories'), ('Index', 'Indexes'),
('Address', 'Addresses')]
found_plural = False
for singular, replacement_plural in ending_plurals:
if apitools_collection_guess.endswith(singular):
apitools_collection_guess = apitools_collection_guess.replace(
singular, replacement_plural)
found_plural = True
if not found_plural:
apitools_collection_guess += 's'
return apitools_collection_guess
|
83acc2d1da75818dfb819f35725c72df70250220
| 60,538
|
def _tokenize_path(path):
"""
Helper function that removes trailing slash
and split the path into bits.
e.g.: "/api/foo/" -> ["", "api", "foo"]
"""
return path.rstrip("/").split("/")
|
0e3c11a04d2824b75a977a6785f839ed74c7eb91
| 60,539
|
import logging
def has_valid_str_length(data, minimum_length=1, maximum_length=255):
""" Validate the string lenght is between minimun and maximum length.
Parameters
----------
data: str
the strign data to be checked.
minimum_length: int, optional
the minimum length of the string.
default to 1
miximum_length: int, optional
the maximum length of the string.
default to 255
Returns
-------
Boolean
Raises
------
TypeError:
for invalid parameter data type.
Exception:
for any uncaught syntax error.
"""
try:
if not isinstance(data, str):
raise TypeError("data parameter must be in str data type.")
if not isinstance(minimum_length, int):
raise TypeError("minimum_length parameter must be in int data type.")
if not isinstance(maximum_length, int):
raise TypeError("maximum_length parameter must be in int data type.")
str_length = len(data)
return minimum_length <= str_length <= maximum_length
except Exception as e:
logging.error(e)
raise e
|
8758e813bd2056dab926c22df6bdcd8770a00319
| 60,540
|
def add_slash(path):
"""Add slash to the path if it's not there."""
if path.endswith('/'):
return path
return path + '/'
|
ce3bb0d9e3ddd171b7c20bb537c853ed58fe3cd4
| 60,542
|
def EucDist(p1, p2):
"""2D Euclidean distance"""
return ( (p2[0]-p1[0])*(p2[0]-p1[0]) + (p2[1]-p1[1])*(p2[1]-p1[1]) )**0.5
|
1e1978f371d4413b5dc7b9db6f913e32704d2d43
| 60,543
|
from typing import List
from typing import Dict
def distinct_by_key(data: List[Dict], key: str) -> List:
""" Return distinct records from a list of dictionaries, as determined by the
provided key. """
result = {}
for x in data:
k = x.get(key)
if k:
result[k] = x
return list(result.values())
|
6abca04738a7b1cbcc6292820d9ca656de511e25
| 60,547
|
import io
def export_html(palette):
"""
Return a HTML string for a palette preview
Arguments:
palette (dict): Dictionnary of named colors (as dumped in JSON from
``colors`` command)
Returns:
string: HTML preview.
"""
# HTML color palette
html_palette = io.StringIO()
html_palette.write('<div class="palette" style="display: flex; flex-direction: row; flex-wrap: wrap;">\n')
for original_code, values in sorted(palette.items(), key=lambda x:x[1]):
name, from_color = values
html_palette.write(' <div class="item" style="flex: 1 0 20%; max-width: 20%; padding: 0.5rem;">\n')
html_palette.write(' <div class="color" style="background-color: {}; width: 100%; height: 3rem;"></div>\n'.format(original_code))
html_palette.write(' <p class="code" style="">{}</p>\n'.format(original_code))
html_palette.write(' <p class="name" style="">{}</p>\n'.format(name))
html_palette.write(' </div>\n')
html_palette.write('</div>\n')
output = html_palette.getvalue()
html_palette.close()
return output
|
a3d0048f41520b86005c5d5541ef9d85e6819012
| 60,548
|
def merge_intervals(data):
"""
data = [(10,20), (15,30), (100, 200)]
out = [(10,30), (100,200)]
"""
if len(data)==0:
return data
result = []
saved = list(data[0])
for st, en in sorted([sorted(t) for t in data]):
if st <= saved[1]:
saved[1] = max(saved[1], en)
else:
result.append((saved[0], saved[1]))
saved[0] = st
saved[1] = en
result.append(saved)
return result
|
e83b458890433d9367b9810aee2d68f85773af73
| 60,559
|
def get_local_bin(home):
"""
Returns the local bin path of the User
"""
return f"{home}/.local/bin"
|
292018f97e120c7e9cdee1a4f4272375b56dda86
| 60,562
|
def _rates_seir(state, beta, sigma, gamma, n):
"""
Calculate the intensities of the cumulated infection process and the
recovery process.
Parameters
----------
state : dict or pd.Series
Dictionary or pd.Series with the keys "s", "e", "i", "r". The values
associated with the keys are integers.
beta : float
Parameter beta of the SEIR model.
sigma : float
Parameter sigma of the SEIR model.
gamma : float
Parameter gamma of the SEIR model.
n : int
Parameter N of the SEIR model.
Returns
-------
rate_e : float
Rate at which a susceptible individual becomes exposed.
rate_i : float
Rate at which an exposed individual becomes infectious.
rate_r : float
Rate at which a recovery occurs.
change_rate : float
Sum of the other returned rates.
"""
rate_e = beta * (state["s"] / n) * state["i"]
rate_i = sigma * state["e"]
rate_r = gamma * state["i"]
change_rate = rate_e + rate_i + rate_r
return rate_e, rate_i, rate_r, change_rate
|
0291d27286b19eb1c7c2a0b8c178af10c5abf058
| 60,565
|
def set_prefixes(sto):
"""Setter of ontology prefixes.
Here all custom prefixes are specified.
"""
sto.set_prefix('dbont', 'http://dbpedia.org/ontology/')
sto.set_prefix('dbprop', 'http://dbpedia.org/property/')
sto.set_prefix('nsprov', 'http://www.w3.org/ns/prov#')
sto.set_prefix('vocvr', 'http://purl.org/voc/vrank#')
sto.set_prefix('lingg', 'http://purl.org/linguistics/gold/')
return sto
|
7fa689dd17188700aa568af15eece5dcb8660559
| 60,567
|
def is_public(f):
"""Check if function is public."""
return not f.__name__.startswith('_')
|
8474d8afaa19c234ae5628ac588238e37ebdaff5
| 60,571
|
def _find_run_id(traces, item_id):
"""Find newest run_id for an automation."""
for trace in reversed(traces):
if trace["item_id"] == item_id:
return trace["run_id"]
return None
|
acc60e73dc8c958de245b94d95e7aadc4cc5f12c
| 60,572
|
import re
def cut_pair(exec_code):
"""
<>や><、+-、-+のペアを無くす
Parameters
-------------
exec_code: str
Returns
-------------
cut_code: str
"""
inc_dec_pair = re.compile(r"<>|><|\+\-|\-\+")
cut_code = exec_code
cut_num = 1
while(cut_num >= 1):
cut_code, cut_num = re.subn(inc_dec_pair, r'', cut_code)
return cut_code
|
6b7c9c299a698eb4dd2b6d8dd27761f1dd768c70
| 60,579
|
import functools
import unittest
def test_requires(*operations):
"""
This is a decorator that identifies what DOS operations a given test
case uses (where each DOS operation is named by its `operationId` in
the schema, e.g. ListBundles, UpdateObject, GetServiceInfo,
etc.) and skips them if the operation is not supported by the
implementation under test.
For example, given this test setup::
class Test(AbstractComplianceTest):
supports = ['UpdateBundles']
@test_requires('UpdateBundles')
def test_update_data_bundles(self):
self.drs_request('PUT', '/databundles/1234')
@test_requires('ListBundles', 'UpdateBundles')
def test_list_and_update_data_bundles(self):
self.drs_request('GET', '/databundles')
self.drs_request('PUT', '/databundles/1234')
``test_update_data_bundles`` would run and ``test_list_and_update_data_bundles``
would be skipped.
:param str \*operations: the operations supported by the decorated
test case
"""
def decorator(func):
@functools.wraps(func)
def wrapper(self):
unsupported = [op for op in operations if op not in self.supports]
if unsupported:
raise unittest.SkipTest("not supported: " + ", ".join(unsupported))
return func(self)
return wrapper
return decorator
|
e145bf43a3bbbc9999b07e8145bfa96659588c75
| 60,580
|
def rotate(arr, n):
"""
Right side rotation of an array by n positions
"""
# Storing the length to avoid recalculation
arr_len = len(arr)
# Adjusting the value of n to account for Right side rotations and cases where n > length of the array
n = arr_len - (n % arr_len)
# Splitting the array into two parts and merging them again in reverse order (Second part + First part)
return arr[n:] + arr[:n]
|
733530337ae0f0fd9d71b727ab102487c5fd75d1
| 60,582
|
def _get_min_indent(lines, tab_width=4):
"""Find the minimum count of indenting whitespaces in lines.
Arguments:
lines (tuple):
The content to search the minimum indention for.
tab_width (int):
The number of spaces expand tabs before searching for indention by.
"""
min_indent = 2**32
for line in lines:
i = 0
for c in line:
if c == ' ':
i += 1
elif c == '\t':
i += tab_width - (i % tab_width)
else:
break
if min_indent > i:
min_indent = i
if not min_indent:
break
return min_indent
|
8655e67499daad82d8c84b20635b75ceac08487f
| 60,584
|
def FourthOrderDamping(U, DampCoeff):
"""Return fourth-order damping term.
Calculated using Equation 6-77 in CFD Vol. 1 by Hoffmann.
Call signature:
FourthOrderDamping(U, DampCoeff)
Parameters
----------
U : 1D or 2D array
The dependent variable from time level (n) within the domain.
DampCoeff : float
Damping coefficient. The value must be selected in the range of
0 to 0.125 for a stable solution.
Returns
-------
D : 1D or 2D array
The fourth-order damping term within the entire domain.
"""
D = U.copy() # Initialize D
D[2:-2] = (
-DampCoeff*(U[0:-4] - 4.0*U[1:-3] + 6.0*U[2:-2]
- 4.0*U[3:-1] + U[4:])
)
return D
|
0c4795cc1aebcd298c005eb1af70c49dfd429b32
| 60,585
|
import pkg_resources
def write_header(par, well0):
"""
Create the header block for MESS
"""
# Read the header template
header_file = pkg_resources.resource_filename('tpl', 'mess_header.tpl')
with open(header_file) as f:
tpl = f.read()
header = tpl.format(TemperatureList=' '.join([str(ti) for ti in par.par['TemperatureList']]),
PressureList=' '.join([str(pi) for pi in par.par['PressureList']]),
EnergyStepOverTemperature=par.par['EnergyStepOverTemperature'],
ExcessEnergyOverTemperature=par.par['ExcessEnergyOverTemperature'],
ModelEnergyLimit=par.par['ModelEnergyLimit'],
CalculationMethod=par.par['CalculationMethod'],
ChemicalEigenvalueMax=par.par['ChemicalEigenvalueMax'],
Reactant=well0,
EnergyRelaxationFactor=par.par['EnergyRelaxationFactor'],
EnergyRelaxationPower=par.par['EnergyRelaxationPower'],
EnergyRelaxationExponentCutoff=par.par['EnergyRelaxationExponentCutoff'],
Epsilons=' '.join([str(ei) for ei in par.par['Epsilons']]),
Sigmas=' '.join([str(si) for si in par.par['Sigmas']]),
Masses=' '.join([str(mi) for mi in par.par['Masses']]))
return header
|
f8b61d511f6a816df0a52e4c2427de71254e72e0
| 60,587
|
def onize_formula(formula=""):
"""
Chemical formulas don't show ones like H2O1, it ignores numbers when it's equal to one.
For consistency, we're adding those ones here.
input: str
output: str
This function is idempotent
"""
formula_with_ones = []
for element in formula:
if element.isdigit():
formula_with_ones.append(element)
elif element.islower(): # means that it's part of the previous element name
formula_with_ones.append(element)
else:
if formula_with_ones and not formula_with_ones[-1].isdigit() and formula_with_ones[-1] not in ['(', '[']:
formula_with_ones.append('1')
formula_with_ones.append(element)
if not formula_with_ones[-1].isdigit():
formula_with_ones.append('1')
return ''.join(formula_with_ones)
|
af57ee201dd033b77700980c8532d257c4e4e315
| 60,591
|
import time
def wait_for_input_states(medialive, input_id, states):
"""
Wait for a MediaLive input to be in a specified state.
"""
current_state = ''
while current_state not in states:
time.sleep(5)
current_state = medialive.describe_input(InputId=input_id)['State']
return current_state
|
380ab04fc22e52582716a85f21cef40605b5ab0e
| 60,592
|
def calculate_wer(df):
""" Calculates the word error rate and adds the collumn 'product' to the dataframe
Args:
df: a Pandas Dataframe to calculate used to calculate the wer
Returns:
df: The pandas dataframe with the column: 'product' (the product of the word error rate and the amount of ref words
wer: The word error rate
>>> DUMMY_DF = pandas.DataFrame({'wer':[0.2,0.4,0.1],'ref_words':[10,2,5]})
>>> calculate_wer(DUMMY_DF)
( wer ref_words product
0 0.2 10 2.0
1 0.4 2 0.8
2 0.1 5 0.5, 0.1941176470588235)
"""
df['product'] = df['wer'] * df['ref_words']
wer = float(df['product'].sum()) / float(df['ref_words'].sum())
return df, wer
|
27f0e9bfa1028008e7d3028442dcca8c02636d25
| 60,593
|
def crop(img, x1, y1, x2, y2):
"""
crop image given coordinates
:param img: input image, [H,W,3]
:param x1: coordinate, int
:param y1: coordinate, int
:param x2: coordinate, int
:param y2: coordinate, int
:return: cropped image
"""
img = img[y1:y2, x1:x2]
return img
|
e0a625259d14bda2ca5afb227c08dcd4fd328b94
| 60,594
|
def positive_sum(arr):
"""Return the sum of positive integers in a list of numbers."""
if arr:
return sum([a for a in arr if a > 0])
return 0
|
30467034bdf7548f24e0f519f70b8d2a6cdedcf5
| 60,596
|
def dump_section(bank_number, separator="\n\n"):
"""
Returns a str of a section header for the asm file.
"""
output = "SECTION \""
if bank_number in [0, "0"]:
output += "bank0\",HOME"
else:
output += "bank"
output += bank_number
output += "\",DATA,BANK[$"
output += bank_number
output += "]"
output += separator
return output
|
3a1655e513df2f02bdaed257ccd4105d9539fc26
| 60,599
|
def GetApkFileInfo(filename, compressed_extension, skipped_prefixes):
"""Returns the APK info based on the given filename.
Checks if the given filename (with path) looks like an APK file, by taking the
compressed extension into consideration. If it appears to be an APK file,
further checks if the APK file should be skipped when signing, based on the
given path prefixes.
Args:
filename: Path to the file.
compressed_extension: The extension string of compressed APKs (e.g. ".gz"),
or None if there's no compressed APKs.
skipped_prefixes: A set/list/tuple of the path prefixes to be skipped.
Returns:
(is_apk, is_compressed, should_be_skipped): is_apk indicates whether the
given filename is an APK file. is_compressed indicates whether the APK file
is compressed (only meaningful when is_apk is True). should_be_skipped
indicates whether the filename matches any of the given prefixes to be
skipped.
Raises:
AssertionError: On invalid compressed_extension or skipped_prefixes inputs.
"""
assert compressed_extension is None or compressed_extension.startswith('.'), \
"Invalid compressed_extension arg: '{}'".format(compressed_extension)
# skipped_prefixes should be one of set/list/tuple types. Other types such as
# str shouldn't be accepted.
assert isinstance(skipped_prefixes, (set, list, tuple)), \
"Invalid skipped_prefixes input type: {}".format(type(skipped_prefixes))
compressed_apk_extension = (
".apk" + compressed_extension if compressed_extension else None)
is_apk = (filename.endswith(".apk") or
(compressed_apk_extension and
filename.endswith(compressed_apk_extension)))
if not is_apk:
return (False, False, False)
is_compressed = (compressed_apk_extension and
filename.endswith(compressed_apk_extension))
should_be_skipped = filename.startswith(tuple(skipped_prefixes))
return (True, is_compressed, should_be_skipped)
|
9ee5bd452c574c2150f7bfa1276225d7ea61bd20
| 60,613
|
def xproto_tosca_field_type(type):
"""
TOSCA requires fields of type 'bool' to be 'boolean'
TOSCA requires fields of type 'int32' to be 'integer'
"""
if type == "bool":
return "boolean"
elif type == "int32":
return "integer"
else:
return type
|
191021f7ba9ae13ec29f51893b901f5d4d0ec6c3
| 60,625
|
import copy
def digits(sudoku):
"""
纵向扫描传入数独组,将传入数独组横列置换,输出新数独组
:param sudoku: 待置换数独组
:type sudoku: list
:return: 横列置换的数独组
:rtype: list
"""
sudokus = copy.deepcopy(sudoku) # 将原数独组拷贝一份,避免修改原数独组
# 创建空列表
digit = []
for r in range(9):
# 创建缓存空列表
tmp = []
for d in range(9):
# 将原数独组每列数填至缓存列表
tmp.append(sudokus[d][r])
# 将缓存列表数据存入最终列表,type->[[],[]...]
digit.append(tmp)
return digit
|
27d325e7988c405c79d02c08cd5f77b086754b72
| 60,626
|
from functools import reduce
def partition(tosplit, predicate):
"""
Splits the list :param:`tosplit` based on the :param:`predicate` applied to each
list element and returns the two resulting lists
Parameters
----------
tosplit: list
The list to split
predicate: :class:`six.Callable`
A callable predicate that takes as an argument a list element to test.
Returns
-------
true_list: list
The list of elements in :param:`tosplit` for which :param:`predicate` were
True
false_list: list
The list of elements in :param:`tosplit` for which :param:`predicate` were
False
"""
return reduce(lambda x, y: x[not predicate(y)].append(y) or x, tosplit,
([], []))
|
0c0c054f0681117d2e5336485c66211532b4ad5b
| 60,631
|
def list_to_comma_delimited(list_param):
"""Convert a list of strings into a comma-delimited list / string.
:param list_param: A list of strings.
:type list_param: list
:return: Comma-delimited string.
:rtype: str
"""
if list_param is None:
list_param = []
return ','.join(list_param)
|
54d402e9ad00b3f61f689c3b8ff1ef6edd5f964f
| 60,633
|
def is_string_in_file(string, filepath):
"""
Check if a string is in a file
Args:
:string: string to search for
:filepath: path of file
Returns:
:is_found: True if found, False otherwise
"""
if string in open(filepath).read():
return True
return False
|
3705573f37c604f61f5f5da14e3726f13413de96
| 60,636
|
def nested_lookup(dictionary, *entry):
"""get a nested entry from a dictionary"""
try:
if isinstance(entry, str):
v = dictionary[entry]
else:
v = None
for e in entry:
# print("key:", e, "value:", v if v is not None else "<none yet>")
v = v[e] if v is not None else dictionary[e]
except:
raise Exception("could not find entry '" +
"/".join(list(entry)) + "' in the dictionary")
return v
|
f0f437cff132f9b8892872b5f01f36709bc7b59c
| 60,645
|
import pathlib
def is_path(x):
"""This tests if something is a path."""
return isinstance(x, pathlib.Path)
|
0fd458beed09ec09db34e8a33746ed24b8e67a0e
| 60,646
|
def derive_new_filename(filename):
"""Derives a non-numbered filename from a possibly numbered one."""
while filename[-1].isdigit():
filename = filename[:-1]
return filename
|
163b1f718c73ff8e51f1eaa196d6d233f0690e45
| 60,649
|
def filepath_exist_cmd(path):
"""Checks if filepath exists."""
return " ".join(["test", "-e", path])
|
b695946e286711cbfbe50c79eb489288d6e84243
| 60,654
|
import math
def quadratic_equation(num1, num2, num3):
"""Solve quadratic equation"""
delta = (num2 ** 2) - (4 * num1 * num3)
if delta < 0:
return None
elif delta > 0:
return (-num2 + math.sqrt(delta)) / 2 * num1, (-num2 - math.sqrt(delta)) / 2 * num1
else:
return -num2 / 2 * num1, None
|
672c3b4158db58f1975058a8827b1871d52949d2
| 60,657
|
def printAtom( serial, molecule, atom, alchemicalTransformation):
"""Generate atom line
Parameters
----------
serial : int
Atom serial
alchemicalTransformation : bool
True if alchemical transformation
Returns
-------
atomLine : str
Atom line
"""
line = ''
if alchemicalTransformation:
line = ' %5d %10s %6d %6s %5s %6d %10.4f %10.4f%11s%11.4f%11.4f \n' % (serial, atom.type_gmx, 1,
molecule.residueName, atom.nameOriginal, 1, atom.charge, atom.mass, atom.type_gmx_B, atom.charge_B, atom.mass_B)
else:
line = ' %5d %10s %6d %6s %5s %6d %10.4f %10.4f \n' % (serial, atom.type_gmx, 1,
molecule.residueName, atom.nameOriginal, 1, atom.charge, atom.mass)
return line
|
52326352ccf2b592eabc517fc8d8528d32e97e37
| 60,661
|
import pydoc
def wrapped_help_text(wrapped_func):
"""Decorator to pass through the documentation from a wrapped function.
"""
def decorator(wrapper_func):
"""The decorator.
Parameters
----------
f : callable
The wrapped function.
"""
wrapper_func.__doc__ = (
"This method wraps the following method:\n\n"
+ pydoc.text.document(wrapped_func)
)
return wrapper_func
return decorator
|
07eab890d6ebb380a85d1a497eb4ba4f857da489
| 60,664
|
import io
import zipfile
def do_compress(str_hex:str)->str:
"""文字列の16進数をバイナリ値にそのまま変換し、zip圧縮して返す
Args:
str_hex (str): 16進数の文字列(先頭にbはない)
Returns:
str: zip圧縮したバイナリ値を16進数にした文字列
"""
# 16進数文字列をそのままバイナリ化
b = bytes.fromhex(str_hex)
# バイナリをzip化
zip_stream = io.BytesIO()
with zipfile.ZipFile(zip_stream, 'w', compression=zipfile.ZIP_DEFLATED) as new_zip:
new_zip.writestr('_', b)
# zipしたバイナリ値をそのまま取得
h = zip_stream.getvalue()
# 16進数にして返す
return h.hex()
|
889848643360af0d0e7460550e5721177769b33d
| 60,665
|
def decrease_when_higher_than_3(offset):
"""Return offset adjust -1 if offset >= 3, otherwise return 1."""
return -1 if offset >= 3 else 1
|
760b80e8cb769edf529b85c30eaa97fb28140083
| 60,669
|
import statistics
def get_partition_agreement_score(partitionA, partitionB):
"""
Compute the partition agreement score for two partitions.
"""
scores = []
for i, prtB in enumerate(partitionB):
score = 0
for j, prtA in enumerate(partitionA):
if prtB.issubset(prtA):
score = 1
break
scores += [score]
return statistics.mean(scores or [0])
|
0a80b858ea768c08c368d97406f181a0b9c75b1c
| 60,673
|
def get_version(client):
"""Return ES version number as a tuple"""
version = client.info()['version']['number']
return tuple(map(int, version.split('.')))
|
c4a5f3d3e4e6326b6a7f5eed20d3e3e5b3c408c8
| 60,677
|
def inner_node_3(key, left, right):
"""Returns left + right + 1"""
return left + right + 1
|
2cb67f7a292ccf34edc923ca10d10526217c112a
| 60,683
|
def sorted_tree(ls):
"""
Recursively sort a nested list to get a canonical version.
"""
if ls is None: return ls
for i in range(len(ls)):
if type(ls[i]) is list:
ls[i] = sorted_tree(ls[i])
ls.sort()
return ls
|
a0af9b17775fd0d90c471c16f5ac2d647ac6b182
| 60,684
|
def is_schema_request(request):
"""
Return whether this request is serving an OpenAPI schema.
"""
return request.query_params.get('format') == 'openapi'
|
804ea5e7c1ac68080b4a6af4bc96c40cbff6162b
| 60,687
|
def simple_two_params(one, two):
"""Expected simple_two_params __doc__"""
return "simple_two_params - Expected result: %s, %s" % (one, two)
|
6cc9cff496c0b00280fe9e967578086de83eadcf
| 60,688
|
from datetime import datetime
def snapshot_identifier(prefix, db_identifier):
"""Return an identifier for a snapshot of a database or cluster.
"""
now = datetime.now()
return '%s-%s-%s' % (prefix, db_identifier, now.strftime('%Y-%m-%d-%H-%M'))
|
7de4a96a4a8933a562c5275308f358539b8a066c
| 60,690
|
def sa_con_string(dialect, server, db, py_driver=None, user=None, password='', driver=None):
"""
Formats connection variables into SQL Alchemy string.
Intended for brevity elsewhere in the App. For more detail,
see the `SQLAlchemy Engine Configuration <https://docs.sqlalchemy.org/en/13/core/engines.html>`_ page.
Parameters
----------
dialect : str
SQLAlchemy-recognised name for the DBMS, such as `mssql` or `sqlite`
server : str
Server/host name
db : str
Database name
py_driver : str
Name of additional driver required for dialect connection (e.g. pyodbc)
user : str
Username, if used. If ommitted, connection uses windows credentials (via trusted connection)
password : str
Password for given username. Can be blank.
driver : str
Specific driver to use when connecting.
Returns
-------
str
SQL Alchemy engine connection string.
"""
# Configure security
user = '' if user is None else user
if len(user) > 0:
login = user + ':' + password
trust = ''
else:
login = ''
trust = '?trusted_connection=yes'
# Configure dialect
if py_driver is not None:
dialect = '+'.join([dialect, py_driver])
# configure additional dialect
if driver is not None and len(driver) > 0:
driver = '&driver='+driver.replace(" ", "+")
con = f"{dialect}://{login}@{server}/{db}{trust}{driver}" + \
";MARS_Connection=Yes"
return con
|
7908da6a59e1f96505ee8e388bcc0c54292f697c
| 60,692
|
def euclid(p, q):
"""Return the euclidean distance.
Args:
p (list): p vector
q (list): q vector
Returns:
euclidean distance
"""
dist = 0
for p_i, q_i in zip(p, q):
dist += (q_i - p_i) ** 2
return dist ** 0.5
|
01dcdf2686f3d6d77f792a01b9ab47bcf4914704
| 60,693
|
import json
def get_credential(file_path):
"""
Read credential json file and return
username and password
"""
with open(file_path) as json_file:
config = json.load(json_file)
assert "username" in config.keys()
assert "password" in config.keys()
return config["username"], config["password"]
|
2ec258a5032b394121babdf327efe94fbd9d2db1
| 60,700
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.