content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def filter_df_on_case_length(df, case_id_glue="case:concept:name", min_trace_length=3, max_trace_length=50):
"""
Filter a dataframe keeping only the cases that have the specified number of events
Parameters
-----------
df
Dataframe
case_id_glue
Case ID column in the CSV
min_trace_length
Minimum allowed trace length
max_trace_length
Maximum allowed trace length
"""
df = df.groupby(case_id_glue).filter(lambda x: (len(x)>= min_trace_length and len(x)<=max_trace_length))
return df
|
a3eadc9534b41c62f895def2611a68157abfe091
| 44,229
|
def helper(n, largest):
"""
:param n: int,
:param largest: int, to find the biggest digit
:return: int, the biggest digit in n
Because digit < 10, this function recursively check every digit of n
"""
remainder = n % 10
if n < 10: # Base case!
if remainder > largest:
return remainder
else:
return largest
elif remainder > largest: # Recursive
largest = remainder
return helper((n-remainder)//10, largest)
else: # Recursive
return helper((n-remainder)//10, largest)
|
a441ee9f7712b426db8f2f0a677941c0be44cc0e
| 44,232
|
def generate_header(sample_name):
"""Function for generating the header for output VCF file.
Args:
sample_name (str): Name of the sample.
Returns:
str: Header for the VCF file.
"""
fin = open("header", "rt")
data = fin.read()
data = data.replace('SAMPLENAME', sample_name)
fin.close()
return data+"\n"
|
d8690521d32da1df43253d7cb2ea95590c3becf5
| 44,235
|
def upper(_, text):
""" Convert all letters in content to uppercase. """
return text.upper()
|
b2832b5f07d2f564e11c745669d31d86f014eaa1
| 44,238
|
def from_c_str(v):
"""
C str to Python str
"""
try:
return v.decode("utf-8")
except Exception:
pass
return ""
|
1a8026386a4575a3fcb7be6a15e47bf6a2ba4b97
| 44,241
|
def parse_relatedcontent_data(data):
"""
Given encoded related content data form a hidden input field, parse it into
a list of tuples (content_type, object_id).
"""
final_data = []
parts = [x.strip() for x in data.split(",") if x.strip()]
for part in parts:
data = part[1:-1].split(" ")
content_type = data[0]
object_id = data[1]
final_data.append((data[0], data[1]))
return final_data
|
78adae2f892f01cf12b85b26054ec87adc370952
| 44,242
|
def read_build_vars(path):
"""Parses a build_vars.txt into a dict."""
with open(path) as f:
return dict(l.rstrip().split('=', 1) for l in f)
|
b36b1f16111b5c8afbe038bf82df2dd13517a1a7
| 44,243
|
import math
def compass(azimuth, radians=False):
"""
Get named direction from azimuth.
"""
if radians:
azimuth *= 180.0 / math.pi
names = (
'N', 'NNE', 'NE', 'ENE',
'E', 'ESE', 'SE', 'SSE',
'S', 'SSW', 'SW', 'WSW',
'W', 'WNW', 'NW', 'NNW',
)
return names[int((azimuth / 22.5 + 0.5) % 16.0)]
|
2f50430899ade2b5c95d1a5223772a6ca6e055b0
| 44,244
|
def _strip_string(*string):
"""Strips each element of the given list
Arguments
------------------
*string : str
string to be stripped
Returns
------------------
stripped : list
Stripped strings
"""
return [x.strip() for x in string]
|
d95761e182671193c5491a98ec3ca5f4a7f925f7
| 44,245
|
import torch
def collate_fn(items):
"""
Creates mini-batch tensors from the list of tuples (image, caption).
Args:
data: list of tuple (image, caption).
- image: torch tensor of shape
- caption: torch tensor of shape (?); variable length.
Returns:
images: torch tensor of images.
targets: torch tensor of shape (batch_size, padded_length).
lengths: list; valid length for each padded caption.
image_ids: List; batch中每个image唯一的id
"""
image_batch, label_batch, gts_batch, info_batch = zip(*items)
# image_batch, caption_batch, imageid_batch = zip(*items)
# Merge images (from tuple of 3D tensor to 4D tensor).
image_batch = torch.stack(image_batch, 0)
info_batch = list(info_batch)
lengths = [label.size()[0] for label in label_batch]
label_batch = torch.nn.utils.rnn.pad_sequence(label_batch, batch_first=True, padding_value=0)
gts_batch = torch.nn.utils.rnn.pad_sequence(gts_batch, batch_first=True, padding_value=0)
mask_batch = torch.zeros_like(label_batch)
for i, len in enumerate(lengths):
for j in range(len):
mask_batch[i, j] = 1
return image_batch, label_batch, mask_batch, gts_batch, info_batch
|
0ce25f1d030b88636334dbc0d1eef1ec954d8b89
| 44,246
|
def _comment(string: str) -> str:
"""Return string as a comment."""
lines = [line.strip() for line in string.splitlines()]
sep = "\n"
return "# " + f"{sep}# ".join(lines)
|
c8919933f2737528ec6c8a0c3fbb4b1f9767a398
| 44,252
|
def get_config_options(otype):
""" Return list of valid configuration options for nodes and dispatcher."""
if otype is 'node':
return ['node_name', 'node_type', 'node_id', 'node_description', 'primary_node', 'ip', 'port_frontend', 'port_backend', 'port_publisher', 'n_responders', 'lsl_stream_name', 'primary_n_channels', 'primary_channel_names', 'primary_channel_descriptions', 'primary_sampling_rate', 'primary_buffer_size_s', 'run_publisher', 'secondary_node', 'secondary_n_channels', 'secondary_buffer_size', 'secondary_channel_names', 'secondary_channel_descriptions', 'default_channel']
elif otype is 'dispatcher':
return ['node_list', 'port', 'ip', 'n_threads', 'run_pubsub_proxy', 'proxy_port_in', 'proxy_port_out']
else:
return None
|
8909a07b54353343b0be2c73b370e3fa4c1f0daf
| 44,264
|
def del_webf_obj(server, session, obj_type, obj_name, *args):
"""
Remove and object from the server. A simple wrapper for the "delete_XXX"
API methods.
"""
obj = getattr(server, "delete_%s" % obj_type)(session, obj_name, *args)
return obj
|
81c56d432d5c1cbd5826f999e27f227712cfbf21
| 44,266
|
def _get_upload_to_path(instance, filename):
"""
Returns an upload path using the instance slug.
This function keeps file uploads organized.
"""
return "img/portfolio/%s/%s" % (instance.slug, filename)
|
24c8ec6fa60c1c733d3db4fb81f4e31f6de9c3a8
| 44,267
|
def _sort_and_merge_sub_arrays(left_array, right_array):
"""This method assumes elements in `left_array` and `right_array` are already sorted.
Parameters
----------
left_array: list[int]
right_array: list[int]
Returns
-------
list: merged and sorted list
"""
left_array_length = len(left_array)
right_array_length = len(right_array)
# Creating a placeholder with zeros.
merged_array = (left_array_length + right_array_length) * [0]
left_index = 0
right_index = 0
current_index = 0
while left_index < left_array_length or right_index < right_array_length:
# merging by sorting.
if left_index < left_array_length and right_index < right_array_length:
if left_array[left_index] > right_array[right_index]:
merged_array[current_index] = right_array[right_index]
right_index += 1
elif left_array[left_index] <= right_array[right_index]:
merged_array[current_index] = left_array[left_index]
left_index += 1
else:
# Left over elements.
if left_index < left_array_length:
merged_array[current_index:] = left_array[left_index:]
current_index += len(left_array[left_index:])
left_index = left_array_length
elif right_index < right_array_length:
merged_array[current_index:] = right_array[right_index:]
current_index += len(right_array[right_index:])
right_index = right_array_length
current_index += 1
return merged_array
|
3663097132e530f19d2692cb3492e0cd9008fcfc
| 44,268
|
def to_rating(value):
"""
Converts the given value to a valid numerical skill rating.
Args:
value (str, int, or float): The value to convert.
Returns:
float: The converted value.
Raises:
ValueError: If ``value`` cannot be converted to a float, or if the converted value is less than zero.
"""
if type(value) not in [int, float, str]:
raise ValueError("Cannot convert %s value '%s' to a rating. Only str and numerical types are allowed."
% (type(value), value))
try:
rating = float(value)
except ValueError as e:
raise ValueError("Failed to convert '%s' to a numerical rating" % value, e)
if rating < 0:
raise ValueError("Invalid rating: '%s'. Ratings must be larger than or equal to zero." % value)
return rating
|
b4ceb5accd9def6331a84ed4427fe88add216679
| 44,273
|
def browser_labels(labels):
"""Return a list of browser labels only without the `browser-`."""
return [label[8:].encode('utf-8')
for label in labels
if label.startswith('browser-') and label[8:] is not '']
|
d61b26a599d240918798f74b387e09427984c6e8
| 44,287
|
from typing import Union
def _get_paths(symbol: Union[str, int]) -> str:
"""Get the javascript pen paths associated with a given symbol.
These are adapted from plotly.js -> src/components/drawing/symbol_defs.js
Args:
symbol: The symbol whose pen paths should be retrieved.
Returns:
A minified string representation of the paths to be declared in javascript.
"""
if isinstance(symbol, str):
return {'circle': '"M"+b1+",0A"+b1+","+b1+" 0 1,1 0,-"+b1+"A"+b1+","+b1+" 0 0,1 "+b1+",0Z"',
'square': '"M"+b1+","+b1+"H-"+b1+"V-"+b1+"H"+b1+"Z"',
'diamond': '"M"+b1+",0L0,"+b1+"L-"+b1+",0L0,-"+b1+"Z"',
'hexagram': '"M-"+b3+",0l-"+b2+",-"+b1+"h"+b3+"l"+b2+",-"+b1+"l"+b2+","+b1+"h"+b3+"l-"+b2+","+b1+"l"+'
'b2+","+b1+"h-"+b3+"l-"+b2+","+b1+"l-"+b2+",-"+b1+"h-"+b3+"Z"'}[symbol]
return {37: '"M-"+d1+","+d3+"L0,0M"+d1+","+d3+"L0,0M0,-"+d2+"L0,0"',
38: '"M-"+d1+",-"+d3+"L0,0M"+d1+",-"+d3+"L0,0M0,"+d2+"L0,0"',
39: '"M"+d3+","+d1+"L0,0M"+d3+",-"+d1+"L0,0M-"+d2+",0L0,0"',
40: '"M-"+d3+","+d1+"L0,0M-"+d3+",-"+d1+"L0,0M"+d2+",0L0,0"',
34: '"M"+d1+","+d1+"L-"+d1+",-"+d1+"M"+d1+",-"+d1+"L-"+d1+","+d1',
33: '"M0,"+d1+"V-"+d1+"M"+d1+",0H-"+d1',
35: '"M0,"+d1+"V-"+d1+"M"+d1+",0H-"+d1+"M"+d2+","+d2+"L-"+d2+",-"+d2+"M"+d2+",-"+d2+"L-"+d2+","+d2',
36: '"M"+d1+","+d2+"V-"+d2+"m-"+d2+",0V"+d2+"M"+d2+","+d1+"H-"+d2+"m0,-"+d2+"H"+d2'}[symbol]
|
2e9a40a5e55bf1a406655bb91fa298cb0657d9ef
| 44,291
|
import base64
import struct
def _decode_ints(message):
"""Helper for decode_qp, decodes an int array.
The int array is stored as little endian 32 bit integers.
The array has then been base64 encoded. Since we are decoding we do these
steps in reverse.
"""
binary = base64.b64decode(message)
return struct.unpack('<' + ('i' * (len(binary) // 4)), binary)
|
c30816e52ffd9336ac94026611bfe5ae869a4e8c
| 44,292
|
def strict_dict(pairs):
"""Creates a dict from a sequence of key-value pairs, verifying uniqueness of each key."""
d = {}
for k,v in pairs:
if k in d:
raise ValueError("duplicate tupkey '%s'" % k)
d[k] = v
return d
|
11690b8cde2fb2163f9c07001dc5864f21e0339f
| 44,293
|
def generate_dummy_batch(num_tokens, collate_fn, src_vocab, tgt_vocab, src_len=128, tgt_len=128):
"""Return a dummy batch with a given number of tokens."""
bsz = num_tokens // max(src_len, tgt_len)
return collate_fn([
{
'id': i,
'source': src_vocab.dummy_sentence(src_len),
'target': tgt_vocab.dummy_sentence(tgt_len),
'output': tgt_vocab.dummy_sentence(tgt_len),
}
for i in range(bsz)
])
|
7de3b36c57e73382a574bec599e2e62bcf8961a9
| 44,294
|
def create_env_index(replay_buffer):
"""Creates the mapping from env_name to their index in the replay buffer."""
env_index = {}
buffer_items = sorted(
replay_buffer.traj_buffer.iteritems(), key=lambda k: k[0])
for idx, (env_name, _) in enumerate(buffer_items):
env_index[env_name] = idx
return env_index
|
9391e139cb66031fcaf50c1871f8f14ab0adb7f8
| 44,296
|
import json
def convert_json_to_dict(json_data):
"""Converts JSON data containing location info on an IP address to a Python dictionary"""
loc_dict = {}
# Replace default key:'ip' with new key:'source_ip' to match the other data
new_key = "source_ip"
old_key = "ip"
try:
loc_dict = json.loads(json_data)
loc_dict[new_key] = loc_dict.pop(old_key)
for current_key in loc_dict.keys():
if current_key != "source_ip":
new_key = "ip_" + current_key
loc_dict[new_key] = loc_dict.pop(current_key)
except ValueError: # includes simplejson.decoder.JSONDecodeError
print("\n[!] ERROR -> Loading Location JSON data has failed")
return loc_dict
|
aee4c5d2d643ba25f6c66d222a1aeb3a86611ab8
| 44,307
|
def DetailedHelp(version):
"""Construct help text based on the command release track."""
detailed_help = {
'brief': 'SSH into a virtual machine instance',
'DESCRIPTION': """\
*{command}* is a thin wrapper around the *ssh(1)* command that
takes care of authentication and the translation of the
instance name into an IP address.
This command ensures that the user's public SSH key is present
in the project's metadata. If the user does not have a public
SSH key, one is generated using *ssh-keygen(1)* (if the `--quiet`
flag is given, the generated key will have an empty passphrase).
""",
'EXAMPLES': """\
To SSH into 'example-instance' in zone ``us-central1-a'', run:
$ {command} example-instance --zone us-central1-a
You can also run a command on the virtual machine. For
example, to get a snapshot of the guest's process tree, run:
$ {command} example-instance --zone us-central1-a --command "ps -ejH"
If you are using the Google container virtual machine image, you
can SSH into one of your containers with:
$ {command} example-instance --zone us-central1-a --container CONTAINER
""",
}
if version == 'BETA':
detailed_help['DESCRIPTION'] = """\
*{command}* is a thin wrapper around the *ssh(1)* command that
takes care of authentication and the translation of the
instance name into an IP address.
This command uses the Compute Accounts API to ensure that the user's
public SSH key is availibe to the VM. This form of key management
will only work with VMs configured to work with the Compute Accounts
API. If the user does not have a public SSH key, one is generated using
*ssh-keygen(1)* (if `the --quiet` flag is given, the generated key will
have an empty passphrase).
"""
return detailed_help
|
b0afca55c5538ce903fd3c2a2175e7df57c71c7c
| 44,311
|
from typing import Any
def is_byte_data(data: Any):
"""
Checks if the given data is of type byte
:param data: The data to check
:return: Whether the data is of type bytes or not
"""
return type(data) is bytes
|
3b04758f812220b97f21c15cacc4773c92b5bb30
| 44,312
|
def insertion_sort(arr):
"""Refresher implementation of inserstion sort - in-place & stable.
:param arr: List to be sorted.
:return: Sorted list.
"""
for i in range(1, len(arr)):
tmp = arr[i]
j = i
# find the position for insertion
for j in range(i, len(arr)):
# the position is found if the prev element is smaller than current
if arr[j - 1] < tmp:
break
# shift to the right
arr[j] = arr[j - 1]
arr[j] = tmp
return arr
|
2d969c0f1cfeb85a093cf28663bf2ca940dc9d7c
| 44,316
|
import math
def damage_function(variables):
"""
The damage that the attacking pokemon inflicts to the defending pokemon.
The formula is as described by:
https://www.math.miami.edu/~jam/azure/compendium/battdam.htm
The variable dictionary has the following keys
----------
level : int
Attacker's level by default 50
attack : int
Attacker's attack stat
power : int
Power of the move
defender_defense : int
Defender's defense stat
same_type : boolean
True if move type is the same type as the attacking pokemon
modifier : int, optional
Modifier based on type effectveness, by default 10
stochastic : int, optional
A random number, by default random.randint(217, 255)
"""
stab = 1.5 if variables["same_type_advantage"] else 1
damage = math.floor((2 * variables["attacker_level"] / 5) + 2)
damage *= variables["attacker_attack"] * variables["move_power"]
damage = math.floor(damage / variables["defender_defense"])
damage = math.floor(damage / 50)
damage = math.floor(damage * stab)
damage = math.floor(damage * variables["modifier"])
damage *= variables["stochasticity"]
damage /= 255
return math.floor(damage)
|
f23a3a89c8486abab7a0bd0ae6d0d50a8b17c3c8
| 44,317
|
def contain_all_elements(lst, other_lst):
""" checking whether the second contains a list of all the elements of the first
:param lst: first list
:param other_lst: second list
:return: check result
"""
diff = set(other_lst)
diff -= frozenset(lst)
return not len(diff)
|
d7e62d7ed2b163b6ed70d339f0e944c01b8f4ca7
| 44,319
|
import statistics
def stdeviation(data):
""" Returns standard deviation of the data """
return statistics.stdev(data)
|
77bf744f553713a02505934488bcfa1cd0242674
| 44,321
|
def header_transform(key: str) -> str:
"""
Function returns header key in human readable
:param key: header key
:return: translated headers
"""
header_dict = {
'Cve': 'CVE ID',
'CVSS': 'CVSS Score',
'VRR': 'VRR Score',
'ThreatCount': 'Threat Count',
'VulnLastTrendingOn': 'Last Trending On Date',
'Trending': 'Trending',
}
return header_dict.get(key, '')
|
d5c654dc9c31b2fbbe412487692e2052810dde10
| 44,322
|
def _dist(p, q):
"""Returns the squared Euclidean distance between p and q."""
dx, dy = q[0] - p[0], q[1] - p[1]
return dx * dx + dy * dy
|
da387e1e8298e962add266d131528ffc435de10d
| 44,324
|
def dec_to_set(row):
"""Convert the dec columns into a set, and fix the sign.
"""
if '-' in row['sign']:
return (-1*row['dec_deg'],row['dec_minutes'],row['dec_seconds'])
else:
return (row['dec_deg'],row['dec_minutes'],row['dec_seconds'])
|
115b27c2ab96857bdc05dc9975b6e2d010522b6f
| 44,325
|
import textwrap
def wrap(s: str) -> str:
"""Dedent and wrap a string to 79 characters."""
return textwrap.fill(textwrap.dedent(s), width=79)
|
0622063a144fbeba677d8bb02b9af01d9576515f
| 44,330
|
def get_overlap(time_window0, time_window1):
"""
get the overlap of two time windows
:param time_window0: a tuple of date/datetime objects represeting the
start and end of a time window
:param time_window1: a tuple of date/datetime objects represeting the
start and end of a time window
:return: a tuple of date/datetime objects represeting the start and
end of a time window or None if no overlapping found
:raise: ValueError
"""
sdate0, edate0 = time_window0
sdate1, edate1 = time_window1
error = 'start date {} is greater than end date {}'
if edate0 < sdate0:
raise ValueError(error.format(sdate0, edate0))
if edate1 < sdate1:
raise ValueError(error.format(sdate1, edate1))
if sdate1 < sdate0:
if edate1 < sdate0:
overlap = None
elif edate1 <= edate0:
overlap = sdate0, edate1
else:
overlap = sdate0, edate0
elif sdate1 <= edate0:
if edate1 <= edate0:
overlap = sdate1, edate1
else:
overlap = sdate1, edate0
else:
overlap = None
return overlap
|
c267287b4aaa543f6ebeef5c34ca0e349153dc4b
| 44,332
|
def _is_scan_complete(hdr):
"""Checks if the scan is complete ('stop' document exists)
Parameters
----------
hdr : databroker.core.Header
header of the run
hdr = db[scan_id]
The header must be reloaded each time before the function is called.
Returns
-------
True: scan is complete
False: scan is incomplete (still running)
"""
# hdr.stop is an empty dictionary if the scan is incomplete
return bool(hdr.stop)
|
2616d6c504e7648d18af2789f69608bd8da9eccc
| 44,333
|
def get_avg_fps(PIL_Image_object):
""" Returns the average framerate of a PIL Image object """
PIL_Image_object.seek(0)
frames = duration = 0
while True:
try:
frames += 1
duration += PIL_Image_object.info['duration']
PIL_Image_object.seek(PIL_Image_object.tell() + 1)
except EOFError:
return frames / duration * 1000
return None
|
be5c5cd976cd7e08e21b0f402e991954b0f42ecc
| 44,336
|
def parse_args(param_string):
"""Parse a string of comma separated arguments such as '42,43,key=abc' into
a list of positional args [42, 43] and a dict of keyword args {key: abc}"""
if not param_string:
return [], {}
posargs = []
kwargs = {}
param_strings = param_string.split(',')
for p_string in param_strings:
parts = p_string.split('=')
if len(parts) == 1:
posargs.append(p_string)
elif len(parts) == 2:
kwargs[parts[0]] = parts[1]
return posargs, kwargs
|
f6e9257cce7ec0eae8767e5daea6966c47416f1d
| 44,339
|
import re
def cityscape_structure(filename):
"""
Parse the structure of Cityscape file names.
:return: city, seq:0>6, frame:0>6, type, ext
"""
regex = r"([a-zA-Z]+)_(\d+)_(\d+)_([a-zA-Z0-9]+)_*([a-zA-Z]*.[a-zA-Z]+)"
elems = re.compile(regex).findall(filename)[0]
return elems
|
dd08832282bc1d840c5eb912bb09770da87376e8
| 44,342
|
def _regret_matching(cumulative_regrets, legal_actions):
"""Returns an info state policy by applying regret-matching.
Args:
cumulative_regrets: A {action: cumulative_regret} dictionary.
legal_actions: the list of legal actions at this state.
Returns:
A dict of action -> prob for all legal actions.
"""
regrets = cumulative_regrets.values()
sum_positive_regrets = sum((regret for regret in regrets if regret > 0))
info_state_policy = {}
if sum_positive_regrets > 0:
for action in legal_actions:
positive_action_regret = max(0.0, cumulative_regrets[action])
info_state_policy[action] = (
positive_action_regret / sum_positive_regrets)
else:
for action in legal_actions:
info_state_policy[action] = 1.0 / len(legal_actions)
return info_state_policy
|
5772907c78e18895561729eb39185cf4a1dee281
| 44,344
|
from typing import Any
from pathlib import Path
import json
def read_glue_cache(folder: str, docname: str) -> dict[str, Any]:
"""Read a glue cache from the build folder, for a particular document."""
docpath = docname.split("/")
path = Path(folder).joinpath(*docpath[:-1]).joinpath(f"{docpath[-1]}.glue.json")
if not path.exists():
return {}
with path.open("r") as f:
return json.load(f)
|
b723451401a3eea836407271ac514fddf08de524
| 44,350
|
import torch
def cxcy_to_xy(cxcy):
"""
Convert bounding boxes from center-size coords (c_x, c_y, w, h) to boundary coords
(x_min, y_min, x_max, y_max).
Args:
cxcy: bounding boxes in center-size coords - a tensor of size (n_boxes, 4)
Returns:
xy: bounding boxes in boundary coords - a tensor of size (n_boxes, 4)
"""
return torch.cat(
[cxcy[:, :2] - (cxcy[:, 2:] / 2), cxcy[:, :2] + (cxcy[:, 2:] / 2)],
1, # (x_min, y_min), (x_max, y_max)
)
|
94b610ea00bdf665df19680396a8880e9f766e3e
| 44,352
|
def _rate_limit_exceeded(forbidden):
"""Predicate: pass only exceptions with 'rateLimitExceeded' as reason."""
return any(error['reason'] == 'rateLimitExceeded'
for error in forbidden._errors)
|
364d2349fd9619bfb884804602eb737336b95b56
| 44,353
|
import queue
def get_item_from_queue(Q, timeout=0.01):
""" Attempts to retrieve an item from the queue Q. If Q is
empty, None is returned.
Blocks for 'timeout' seconds in case the queue is empty,
so don't use this method for speedy retrieval of multiple
items (use get_all_from_queue for that).
"""
try:
item = Q.get(True, 0.01)
except queue.Empty:
return None
return item
|
c2c8d213e72c5fea4715094ab2084677ff18c3be
| 44,354
|
def search_in(transitions, visited, path, pos, goal):
""" Recursive helper for depth-first search. Takes transition map, set of visited
positions, list of path thus far, the next step to consider, and a goal. Returns
a complete path to goal or None if no path found from this start state. """
# If we're about to go someplace we've been, it's a loop: no path to goal
if pos in visited:
return None
# Add pos to visited locations and path thus far
visited.add(pos)
path.append(pos)
# If pos is the goal, we're done
if pos == goal:
return path
# For each adjacent position, try moving there
for next in transitions[pos]:
r = search_in(transitions, visited, path, next, goal)
if r:
return r
# If we get here, none of these led to the goal, so pos is a bad path, retrace by
# removing pos from path & visited, then returning failure
path.pop()
visited.remove(pos)
return None
|
af84fa0748dfd9988d304197caf2f1fc7b34edfc
| 44,356
|
def create_request_url(title):
"""Replaces space characters with '+' to form a suitable query string for the API"""
q_string = title.replace(' ', '+')
return f"https://google-search3.p.rapidapi.com/api/v1/search/q={q_string}num=2"
|
b014829caa807137e7a03468e688c9aab1b4bb0f
| 44,360
|
def toChunk(data):
"""
Convert string to a chunk.
@returns: a tuple of strings representing the chunked encoding of data
"""
return ("%x\r\n" % len(data), data, "\r\n")
|
d11b043e631b30755ac5bc796edc9edf9edf24f5
| 44,363
|
import math
def map_unit_vector_to_angle(unit_vector):
"""Convert a unit vector to an angle."""
return math.atan2(unit_vector[1] * math.pi, unit_vector[0] * math.pi)
|
c80bb109ca9ee964279cda6d4ffc12016df0e535
| 44,365
|
def getNodeSummary(scModel, node_id):
"""
Return path analysis. This only applicable after running processProjectProperties()
:param scModel:
:param node_id:
:return: None if not found
@type scModel: ImageProjectModel
@type node_id: str
@rtype: dict
"""
node = scModel.getGraph().get_node(node_id)
return node['pathanalysis'] if node is not None and 'pathanalysis' in node else None
|
2469d49a32d84b69d7653e17a64f88fe75428f8a
| 44,368
|
def get_mount_force(density,drag_coef,chord,length,vel):
"""
Computes the drag forces acting on an air foil section with
given drag coeff, chord and length.
"""
return 0.5*density*drag_coef*chord*length*vel**2
|
de6d29cf0bbddd585995bcec4b3f13a69a405f6c
| 44,370
|
from typing import Any
def try_to_cpu(t: Any) -> Any:
"""
Try to move the input variable `t` to a cpu device.
Args:
t: Input.
Returns:
t_cpu: `t` moved to a cpu device, if supported.
"""
try:
t = t.cpu()
except AttributeError:
pass
return t
|
3cf3607e79d4057ff5d5a196bc259291bbe969dd
| 44,371
|
import base64
def decode_object_data(lines):
"""
Decodes the base64 encoded data found within directory document objects.
:param list(str) lines:
the lines as found in a directory document object, not including
newlines or the begin/end lines
:returns: the decoded data
:rtype: bytes
"""
return base64.b64decode("".join(lines))
|
279ed2913880862da6b3a6c245c7e4e2c25f0ecf
| 44,372
|
def get_create_constraint_query(label: str, property_name: str, constraint_name: str = ''):
"""
Build query to create a constraint
:param label: node label
:param property_name: node property for the constraint
:param constraint_name: the constrain name
:return: cypher query
"""
query = 'CREATE CONSTRAINT '
if constraint_name:
query += constraint_name
query += f' IF NOT EXISTS ON (n:{label}) ASSERT n.{property_name} IS UNIQUE'
return query
|
4e8baccf1729a5fc2e3a87f44e0b5de46d4cefb0
| 44,376
|
def delete_subdir(config):
""" Remove subdir from config """
if not config:
return config
if 'subdir' in config:
del config['subdir']
return config
|
3b032d681ac213032e42d78ac56db1d441c9dba6
| 44,378
|
def compute_node_degrees(ugraph):
"""
Returns a dictionary of degree number for
all nodes in the undirected graph
"""
node_deg = {}
# iterate over all dictionary keys to find size of
# adjacency list for each node
for node in ugraph:
node_deg[node] = len(ugraph[node])
return node_deg
|
a6d2f2df91b8536eca7814d54376f8b7855c2e7b
| 44,379
|
def _get_model_device(model):
"""Return the device of a random model property."""
return list(model.values())[0].device
|
6e73a1ed57926ddfde023f24c010d47459bf0492
| 44,380
|
def untgz(archive):
"""Remove .tar.gz"""
return archive.replace('.tar.gz', '')
|
6f2506b3559d19e46d3428fc59ec65fdf480b988
| 44,381
|
from datetime import datetime
import json
def generate_event(file_name, datetime=datetime):
"""Function to generate json with a timestamp and filname headers.
"""
return json.dumps({'timestamp': datetime.now().isoformat(),
'filename': file_name})
|
d546f28902ebbc98d81031740c8c2368e5aa7baa
| 44,382
|
import re
def is_valid_rank_dir_name(name):
"""Check if the name followed the rank directory format."""
return bool(re.search(r'^rank_\d+$', name))
|
d40d3cbd3b4e8d749fd85400eb6d68e3d3ae1394
| 44,383
|
def linecut(x, y, ycut):
"""
Given a sequence of line segments defined by arrays x, y, and a
specified y value ycut, return a list of x values where the line
segments cross ycut, return empty list if they don't cut.
NB: Often useful when e.g. x is an iteration number and y a
residual, xcut gives then at what iterations a convergence
criteria was achieved.
"""
xcut = []
if x.size < 2:
return []
for i in range(x.size - 1):
xx, yy = [x[i], x[i + 1]], [y[i], y[i + 1]]
if yy[1] < yy[0]:
xx.reverse()
yy.reverse()
if ycut >= yy[0] and ycut < yy[1]:
yfrac = (ycut - yy[0]) / (yy[1] - yy[0])
xcut.append(xx[0] + yfrac * (xx[1] - xx[0]))
return xcut
|
b0bd5d9c03e24a28a67d0be4b5ca6877b334f603
| 44,401
|
def resolve_boolean_value(val: str):
""" Resolve a string which represents a boolean value
Args:
val: The value
Returns:
True, False or None
"""
val = val.upper()
if val == "TRUE":
return True
elif val == "FALSE":
return False
else:
return None
|
6ec89280a23c0ea9819fe7c0c4294d0d5d16b3ad
| 44,409
|
def compute_RayleighsQuotient(Y, A):
""" Rayleigh's quotient with the vector Y and input matrix A """
return (Y.T)@(A.dot(Y)) / (Y.T@Y)
|
29eb22bcbe86ee18ea60a0cf5b20ecb9c7e274c6
| 44,411
|
def customer_name(toml_data):
"""Return the customer name from the toml data file"""
return toml_data["customer"]["customer_name"]
|
91d75f04c832c75eca26bdc23ac16bc3d0d80dc8
| 44,412
|
def noop(data):
"""
No-op "encoder" returns the object exactly as it is.
"""
return data
|
2929d8ce17197946e1af8f705ea17fdd4dfc6e41
| 44,414
|
def split_instruction(ins):
"""
Split an assembly instruction into seperate parts.
:param ins: The assembly line.
:return: A list with the parts of the instruction.
"""
newins = ins.replace(',', ' ')
splitins = newins.split()
return splitins
|
f3799deb9dc41c3c5184effec7bd1b1c07c61ffc
| 44,415
|
def parse_lists_from_api(list_string, separator=';'):
"""
Parse list of items returned by the OpenCitations API
:param list_string: list of dois separated by sep
:type list_string: str
:param separator: (Default ';') separator to separate elements in string
:type separator: str
:return: items in the string as a list
:rtype: list
"""
if list_string is None:
return None
items = [item.strip() for item in list_string.split(separator)]
return items
|
fa56eaed2bdfb9d10be1917ba6c02cc08cd9974b
| 44,417
|
def a_source_password(plugin_ctx, fsm_ctx):
"""send source password."""
src_password = plugin_ctx.src_password
fsm_ctx.ctrl.send_command(src_password, password=True)
return True
|
19d9bcba5ad41395b767422ef585b45ceb4d2d42
| 44,419
|
from typing import List
def function(name: str,
type_sig: str,
arg_names: List[str] = [],
body: str = '') -> str:
"""Function definition."""
args = f'{arg_names} ' if arg_names else ''
return f'{name} : {type_sig}\n{name} {args}=\n{body}'
|
804a6d01dd64183b738e5b8dbb7713194921eb1c
| 44,420
|
def compatible_versions(actual_version: str, required_version: str) -> bool:
"""Determine whether two versions are equal.
Only the dot separated elements in common are taken into account, so
actual "3.7.4" compared with "3.7" will return True.
Args:
actual_version: A dot separated version.
required_version: A dot separated version.
Returns:
True if the actual_version is compatible with the required_version,
otherwise False.
"""
return all(
actual == expected
for actual, expected in zip(actual_version.split("."), required_version.split("."))
)
|
1876c7ce1ca1b992640ba7bb4f96cc9420de7965
| 44,422
|
from typing import Callable
import math
def logistic(A, K, B, v, Q, M) -> Callable[[float], float]:
"""
Creates a generalized logistic function. https://en.wikipedia.org/wiki/Generalised_logistic_function
:param A: the lower asymptote
:param K: the upper asymptote
:param B: the growth rate
:param v: near which asymptote the growth occurs
:param Q: Y(0)
:param M: starting point x_0
:return: a function
"""
def f(t):
return A + (K - A) / ((1 + Q * math.exp(-B * (t - M))) ** (1 / v))
return f
|
8102cd6cd324d0a48671156424302a34fe73c1fa
| 44,423
|
import textwrap
def wrap_message(message: str, chars_in_line: int = 80) -> str:
"""Wrap a block of text to a certain amount of characters
:param message:
:param chars_in_line: The width to wrap against
:returns: the wrapped message
"""
return_text = []
for paragraph in message.split('\n'):
lines = textwrap.wrap(paragraph, chars_in_line)
if not lines:
return_text.append('')
else:
return_text.extend(lines)
return '\n'.join(return_text)
|
21662addb2d93e9a858d61b8ba12e6148587a9cb
| 44,424
|
def days_index(df):
"""
Find indices of daily trips.
Parameters
----------
df : pandas DataFrame
Dataframe containing bikeshare trip data with columns that have been
renamed to the common key.
Returns
-------
d_i : dict
Contains the indices of the first trip per day.
"""
days = df['start_dt'].dt.day
d_i = [(days == i).idxmax() for i in range(1, max(days)+1)]
return dict(zip(range(1, max(days)+1), d_i))
|
31be6f32a4ef5f248500a7a3162d8f07bbde6c09
| 44,428
|
def fib(x):
"""
Returns the xth term of the Fibonacci sequence
"""
a, b = 1, 1
for i in range(1, x):
a, b = b, a + b
x += 1
return(a)
|
76cb08e89f11152c1aa2240b14c216042850b0ab
| 44,432
|
def norm_stress(p, a):
"""
norm_stress(p, a)
Returns the normal stress given the force, p, and the area, a.
"""
return p / a
|
a14ca7cd5e3add11aab8b8c02edb73d8ee0e905d
| 44,437
|
def _listify(obj):
""" makes sure `obj` is a `list` """
if isinstance(obj, str):
return [obj]
try:
# will fail if obj is not iterable
return list(obj)
except TypeError:
return [obj]
|
f42fb6c7f40c286bb96d5980f69f0f08fb040328
| 44,439
|
def swap(l, i, j):
"""
Swap the index i with the index j in list l.
Args:
l (list): list to perform the operation on.
i (int): left side index to swap.
j (int): Right side index to swap.
Returns:
list
"""
l[i], l[j] = l[j], l[i]
return l
|
8283e8c1dfe07edf25a07c9912b8d240e538e790
| 44,441
|
def consume_until(line_iter, end_re):
"""
Consume and return lines from the iterator until one matches 'end_re'.
The line matching 'end_re' will not be returned, but will be consumed.
"""
ret = []
for l in line_iter:
line = l.group(0)
if end_re.search(line):
break
ret.append(line)
return ret
|
2a729d53a9658f53617c5f5ab62b3e35431ab796
| 44,443
|
def _should_unpack_args(args):
"""Returns `True` if `args` should be `*args` when passed to a callable."""
return type(args) is tuple # pylint: disable=unidiomatic-typecheck
|
ba7b91a64911841ab0d21f81c5b5a4d045ae78e3
| 44,446
|
import requests
from datetime import datetime
import pytz
def bit_bucket_user_details(user_name):
"""
Function to find the bit bucket user details from bit bucket user name.
:param user_name: string - bit bucket user name
:return: dict - dictionary of bit bucket user details
"""
bit_bucket_url = 'https://api.bitbucket.org/2.0/users/%s' % user_name
bit_bucket_data = requests.get(bit_bucket_url).json()
date_conversion = bit_bucket_data['created_on'].split('+')[0]
account_created_at = date_conversion.split('.')
# account_created_at = datetime.strptime(date_conversion, "%Y-%m-%dT%X").replace(tzinfo=pytz.utc)
# account_created_at = date_conversion.split('T')
account_created_at = datetime.strptime(account_created_at[0], "%Y-%m-%dT%X").replace(tzinfo=pytz.utc)
# account_created_at = datetime.strptime(account_created_at[0], "%Y-%m-%d").date()
repo_url = list(bit_bucket_data['links']['repositories'].values())[0]
total_no_of_repos = requests.get(repo_url).json()['size']
followers_url = list(bit_bucket_data['links']['followers'].values())[0]
total_no_of_followers = requests.get(followers_url).json()['size']
following_url = list(bit_bucket_data['links']['following'].values())[0]
total_no_of_following = requests.get(following_url).json()['size']
snippets_url = list(bit_bucket_data['links']['snippets'].values())[0]
user_details = {
'user_name': user_name,
'display_name': bit_bucket_data['display_name'],
'account_created_at': account_created_at,
'repo_url': repo_url,
'total_no_of_repos': total_no_of_repos,
'followers': total_no_of_followers,
'following': total_no_of_following,
'blog_url': bit_bucket_data['website'],
'profile_url': list(bit_bucket_data['links']['html'].values())[0],
'snippets_url': snippets_url,
'location': bit_bucket_data['location'],
}
return user_details
|
405c97e5cb6bad0b8a68536eb6afc047e2a92216
| 44,452
|
def poly(x, *args, export=False):
"""Polynom nth degree for fitting.
:param x: parameter
:type x: int, float
:param *args: list of coefficients [a_N,a_N-1, ..., a_1, a_0]
:type *args: list
:param export: enable text output of function, defaults to False
:type export: bool or string, optional
:return: returns the polynomial
:rtype: str, int, float
>>> poly(3.4543, 5,4,3,2,1, export='Mathematica')
'5*3.4543^5 + 4*3.4543^4 + 3*3.4543^3 + 2*3.4543^2 + 1*3.4543^1'
>>> poly(3.4543, 5,4,3,2,1)
920.4602110784704
"""
a = list(args)
if export == 'Mathematica':
return ' + '.join([f'{a[i]}*{x}^{len(a)-i}' for i in range(len(a))])
else:
return poly(x, *a[0:-1])*x + a[-1] if len(a) > 1 else a[0]
|
d780f123de597565adeedf2cfc717c703ed5051c
| 44,453
|
def to_bytes(string: str) -> str:
"""Convert string to space seperated octates (big endian); two octates a group."""
_bytes = string.encode()
return ' '.join(hex(int.from_bytes(_bytes[i:i+2], 'big')) for i in range(0, len(_bytes), 2))
|
ae226cebfbbe2f1e116c834086e6323f522818e0
| 44,454
|
def message(blockers):
"""Create a sequence of key messages based on what is blocking."""
if not blockers:
return ['You have 0 projects blocking you from using Python 3!']
flattened_blockers = set()
for blocker_reasons in blockers:
for blocker in blocker_reasons:
flattened_blockers.add(blocker)
need = 'You need {0} project{1} to transition to Python 3.'
formatted_need = need.format(len(flattened_blockers),
's' if len(flattened_blockers) != 1 else '')
can_port = ('Of {0} {1} project{2}, {3} {4} no direct dependencies '
'blocking {5} transition:')
formatted_can_port = can_port.format(
'those' if len(flattened_blockers) != 1 else 'that',
len(flattened_blockers),
's' if len(flattened_blockers) != 1 else '',
len(blockers),
'have' if len(blockers) != 1 else 'has',
'their' if len(blockers) != 1 else 'its')
return formatted_need, formatted_can_port
|
a14c31dd0222ebf1e841d78ebea253cb3ca22a88
| 44,459
|
def check_true(option):
"""Check if option is true.
Account for user inputting "yes", "Yes", "True", "Y" ...
"""
option = option.lower()
if 'y' in option or 't' in option:
return True
else:
return False
|
98310fae9a22fdc87c2faf98a942eebb85710d42
| 44,461
|
import re
def commas(line):
""" Return the location of all the commas in an input string or line """
return [m.start() for m in re.finditer(',', line)]
|
c5327e3c34336db3c64acfa391da179018276b3b
| 44,462
|
def is_subsequence(needle, haystack):
"""Are all the elements of needle contained in haystack, and in the same order?
There may be other elements interspersed throughout"""
it = iter(haystack)
for element in needle:
if element not in it:
return False
return True
|
1b3c1d66258bc3ae66e52c9eacfe1dbfed345c87
| 44,464
|
def filter_candidates(candidates: list, usr_input: str):
"""
Return entries in candidates that contain usr_input as substring, ordered by the index
where the substring is found. Duplicates are removed. For matches where the index of
the substring is the same, original order (time) is kept.
"""
with_dups = list(sorted(filter(lambda x: usr_input in x, candidates), key=lambda x: x.find(usr_input)))
seen = set()
return [x for x in with_dups if not (x in seen or seen.add(x))]
|
97663f1deffba024c052aad2015d9059953a20ca
| 44,465
|
import inspect
def get_args(method):
"""
Helper to get the arguments (positional and keywords)
from the parameters of any function or class
Args:
method [function]:
returns:
args, kwargs
"""
sig = inspect.signature(method)
parameters = sig.parameters
args = []
kwargs = []
for key in parameters:
param = parameters[key]
if param.default is not param.empty:
kwargs.append(param)
else:
args.append(param)
return args, kwargs
|
6b9b4c2fd326574924f9941cda67b60b47f13fae
| 44,466
|
from functools import reduce
def _bit_list_to_bytes(bits):
"""Convert a sequence of truthy values into a byte string, MSB first."""
return bytes(
reduce(lambda a, b: (a << 1) | b, (int(bool(x)) for x in byte_bits))
for byte_bits
in zip(*[iter(bits)]*8)
)
|
005f3b800204e4b2053d0ba7d534d8e62e1449c7
| 44,467
|
import inspect
def check_params(
func,
return_invalids=False,
**kwargs,
):
"""Return only valid parameters for a function a class from named parameters.
Parameters
----------
func : callable
n_features-dimensional points.
**kwargs :
Arbitrary keyword arguments.
return_invalids : bool (default: False)
If True, return both the valid and invalid arguments in a list.
Returns
-------
new_kwargs :
Only valid keyword arguments w.r. of func.
kwargs : (optional)
Only invalid keyword arguments w.r. of func. (only if return_invalids is True)
"""
params = list(inspect.signature(func).parameters.keys())
new_kwargs = dict()
keys = kwargs.keys()
for key in list(keys):
if key in params:
new_kwargs[key] = kwargs[key]
del kwargs[key]
if return_invalids:
return new_kwargs, kwargs
else:
return new_kwargs
|
91e3e63a338aa416b55e8894ac409f14f8e0299d
| 44,474
|
def season(x):
"""Returns season based on month"""
x = x.month
if (x > 3) and (x <= 6):
return 1
elif (x > 6) and (x <= 9):
return 2
elif (x > 9) and (x <= 11):
return 3
else:
return 4
|
f7304e65933c681731050fb67c85724c6ac492e9
| 44,476
|
import json
def read_file(database):
"""Reads the file then returns it"""
with open(database) as file:
info = json.load(file)
return info
|
42c2d2cf148bee58f4ec9cb54ac8d6644139478c
| 44,479
|
import torch
def gen_causal_mask(input_size, dim_k, full_attention=False):
"""
Generates a causal mask of size (input_size, dim_k) for linformer
Else, it generates (input_size, input_size) for full attention
"""
if full_attention:
return (torch.triu(torch.ones(input_size, input_size))==1).transpose(0,1)
return (torch.triu(torch.ones(dim_k, input_size))==1).transpose(0,1)
|
48e6d47ff0171f8f123c88f6c2b329394e2d6acb
| 44,488
|
def encDec0(i):
"""Round to the nearest decade, decade starts with a '0'-ending year."""
return (i // 10) * 10
|
e4019cfc5dbef38516e1eec0782026fd201ba68a
| 44,492
|
def response_to_lines(response):
"""Converts an HTTP response to a list containing each line of text."""
return response.read().decode("utf-8").replace("\xad", "").split("\n")
|
b4e2359e9be09f5bc1838033bf77b0f20c670de8
| 44,493
|
def getSplicingData(data):
"""
Keeps only those rows that correspond to splicing mutations.
Arguments:
data = dataframe
Returns:
sp_data = dataframe
"""
sp_data = data[(data["SpliceSite"] == 'yes')]
return sp_data
|
c4039f334ad6450b04b28743c7ee26454f33f4aa
| 44,494
|
def mi(self):
"""Return mutual information between events and observations using equation I(X;Y) = H(X) - H(X|Y)"""
return self.entropy() - self.entropy(conditional = True)
|
18624d4926acf7519dc1b2f9987e11480688be85
| 44,496
|
def _build_rule_table(bnf_grammar_ast, terminals, skip):
"""
Args:
bnf_grammar_ast: grammar on bnf ast form produced by _ebnf_grammar_to_bnf
terminals (list): list of terminals of the language
Returns:
A dict that maps every non-terminal to a list of
right hand sides of production rules from that non-terminal.
"""
rule_table = {}
for rule in bnf_grammar_ast:
lhs = rule[0]
rhs = rule[1]
if lhs in terminals or lhs in skip:
continue
if lhs not in rule_table:
rule_table[lhs] = []
rule_table[lhs] += [rhs]
return rule_table
|
d95e53a801aeef9a0fc3ba1679ccc7d7b98c2be9
| 44,497
|
def get_facts(cat_file="facts.txt"):
"""Returns a list of facts about cats from the cat file (one fact
per line)"""
with open(cat_file, "r") as cat_file:
return [line for line in cat_file]
|
b2e9666bc1a833d25e73e61529c256f43bfb5c3b
| 44,498
|
def _esc(code):
"""Get an ANSI color code based on a color number."""
return '\033[{}m'.format(code)
|
086f9c0062dd78bca6772771b65e60132f1a5d08
| 44,501
|
def round_float(number, decimals):
"""
Rounds decimal number to exact number of decimals.
:param number: (float) Float number.
:param decimals: (int) Number of numbers after decimal point
:return:
"""
number = float(number)
out = round(number, decimals)
return out
|
faa92a2fe2187f111cc028395085edd8b4dda8f1
| 44,503
|
def calc_point_squre_dist(point_a, point_b):
"""Calculate distance between two marking points."""
distx = point_a[0] - point_b[0]
disty = point_a[1] - point_b[1]
return distx ** 2 + disty ** 2
|
855d1678f8ff66c0047c45441942e6e9d978fe2e
| 44,505
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.