content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_exposure_id(exposure):
"""Returns exposure id.
"""
expinf = exposure.getInfo()
visinf = expinf.getVisitInfo()
expid = visinf.getExposureId()
return expid | a269a08cd62b629d65db803d92d6e6356b76feab | 41,508 |
import re
def escapeRegex(text):
"""
Escape string to use it in a regular expression:
prefix special characters « ^.+*?{}[]|()\$ » by an antislash.
"""
return re.sub(r"([][^.+*?{}|()\\$])", r"\\\1", text) | 97f13b05996daf2a7d01ade0e428244bed156af4 | 41,509 |
def results_query_part(entity):
""" Generates the results part of the query. The results contain
all the entity's fields as well as prefetched relationships.
Note that this is a recursive function. If there is a cycle in the
prefetched relationship graph, this function will recurse infinitely.
Args:
entity (type): The entity which needs fetching.
"""
return " ".join(field.graphql_name for field in entity.fields()) | 63f1e613c67bc1591218d965cc20486f5624c748 | 41,510 |
def dataframe_cleaner(df):
"""This function takes in a pandas dataframe
and gets rid of columns with 25% percent
of null data. It returns a dataframe"""
num_nan = {}
for col in df:
num_nan[col]=df[col].isnull().sum()
colums = []
for colum,nan in sorted(num_nan.items(), key = lambda x:x[1], reverse=True):
if nan < 0.25*len(df[colum]):
colums.append(colum)
return df[colums] | 2a105f3b59c6d3b30e7fa2ec37dc5bd62bf951ad | 41,511 |
def none(active):
"""No tapering window.
Parameters
----------
active : array_like, dtype=bool
A boolean array containing ``True`` for active loudspeakers.
Returns
-------
type(active)
The input, unchanged.
Examples
--------
.. plot::
:context: close-figs
plt.plot(sfs.tapering.none(active1))
plt.axis([-3, 103, -0.1, 1.1])
.. plot::
:context: close-figs
plt.plot(sfs.tapering.none(active2))
plt.axis([-3, 103, -0.1, 1.1])
"""
return active | e323f7e153049a4f663688d7bee4b73bd8dd1ca9 | 41,513 |
import re
def get_sandwich(seq, aa="FYW"):
"""
Add sandwich counts based on aromatics.
Parameters:
seq: str, peptide sequence
aa: str,
amino acids to check fo rsandwiches. Def:FYW
"""
# count sandwich patterns between all aromatic aminocds and do not
# distinguish between WxY and WxW.
pattern = re.compile(r"(?=([" + aa + "][^" + aa + "][" + aa + "]))")
return len(re.findall(pattern, seq)) | d8df68a1873d3912aa64fc91563aae9827da324c | 41,514 |
import torch
def pad_to_size(tensor, sizes, axes=-1, before_and_after=False,
mode='constant', constant_values=0):
""" Pads tensor to a given size along specified axis. """
if isinstance(sizes, int):
sizes = [sizes]
if isinstance(axes, int):
axes = [axes]
for i, ax in enumerate(axes):
if ax < 0:
axes[i] = len(tensor.shape) + ax
size_deltas = [sizes[i] - tensor.shape[ax] for i, ax in enumerate(axes)]
size_paddings = {ax: (delta // (1 + before_and_after), (delta + 1) // 2 * before_and_after)
for ax, delta in zip(axes, size_deltas)}
paddings = []
for i in [3, 2, 1, 0]:
tup = size_paddings.get(i, (0, 0))
paddings.extend(tup)
return torch.nn.functional.pad(tensor, paddings, mode=mode, value=constant_values) | 344b08976b289aa1a5f05e30240fac0efbd9adc0 | 41,516 |
def pure_literal(ast_set):
"""
Performs pure literal rule on list format CNF
:param: Result of cnf_as_disjunction_lists
:return: CNF with clauses eliminated
>>> pure_literal([['a']])
[]
>>> pure_literal([[(Op.NOT, 'a')], ['x', (Op.NOT, 'y'), 'b'], ['z'], ['c', 'd']])
[]
>>> pure_literal([[(Op.NOT, 'a')], ['a', (Op.NOT, 'c')]])
[[(Op.NOT, 'a')]]
>>> pure_literal([['a'], [(Op.NOT, 'a')], ['b']])
[['a'], [(Op.NOT, 'a')]]
>>> pure_literal([[(Op.NOT, 'a')], ['a', (Op.NOT, 'c')], ['c']])
[[(Op.NOT, 'a')], ['a', (Op.NOT, 'c')], ['c']]
>>> pure_literal([[(Op.NOT, 'a')], [(Op.NOT, 'b')], ['a', 'b'], ['c']])
[[(Op.NOT, 'a')], [(Op.NOT, 'b')], ['a', 'b']]
>>> pure_literal([['a', 'b', (Op.NOT, 'c'), (Op.NOT, 'b')], ['d', (Op.NOT, 'e'), (Op.NOT, 'b'), 'e'], [(Op.NOT, 'a'), (Op.NOT, 'b'), 'c']])
[['a', 'b', (Op.NOT, 'c'), (Op.NOT, 'b')], [(Op.NOT, 'a'), (Op.NOT, 'b'), 'c']]
"""
positive_variables = []
negative_variables = []
new_ast = []
for expression in ast_set:
for segment in expression:
# Case for NOTs
if isinstance(segment, tuple):
negative_variables.append(segment[1])
# Case for lone variables
else:
positive_variables.append(segment)
positive_only = set(positive_variables) - set(negative_variables)
negative_only = set(negative_variables) - set(positive_variables)
for expression in ast_set:
elim_clause = False
for segment in expression:
if isinstance(segment, tuple) and segment[1] in negative_only:
elim_clause = True
elif segment in positive_only:
elim_clause = True
if not elim_clause:
new_ast.append(expression)
return new_ast | 1f90b738d3085ea81c8e1a752e50e93d2e17c1bc | 41,517 |
import subprocess
def run_raw_cli(cmd, print_output=True):
"""Runs the command with `dcos` as the prefix to the shell command
and returns the resulting output (stdout seperated from stderr by a newline).
eg. `cmd`= "package install <package-name>" results in:
$ dcos package install <package-name>
"""
dcos_cmd = "dcos {}".format(cmd)
result = subprocess.run([dcos_cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = ""
stderr = ""
if result.stdout:
stdout = result.stdout.decode('utf-8').strip()
if result.stderr:
stderr = result.stderr.decode('utf-8').strip()
if print_output:
print(stdout)
print(stderr)
return result.returncode, stdout, stderr | 94edd528e9df5ee08dd342394f7b082557a685cd | 41,518 |
def is_param_free(expr) -> bool:
"""Returns true if expression is not parametrized."""
return not expr.parameters() | e9dadcaae0c9c0cdcffe1afe5202925d98c6b4fb | 41,519 |
import os
from pathlib import Path
def find_installed_cargo_packages(env):
"""Find out which prefix contains each of the dependencies.
:param env: Environment dict for this package
:returns: A mapping of package names to paths
:rtype dict(str, Path)
"""
prefix_for_package = {}
for prefix in env['AMENT_PREFIX_PATH'].split(os.pathsep):
prefix = Path(prefix)
packages_dir = prefix / 'share' / 'ament_index' / 'resource_index' \
/ 'rust_packages'
if packages_dir.exists():
packages = {path.name for path in packages_dir.iterdir()}
else:
packages = set()
for pkg in packages:
prefix_for_package[pkg] = prefix
return {pkg: str(prefix / 'share' / pkg / 'Rust')
for pkg, prefix in prefix_for_package.items()} | bf96d8a8cbcb5f2a7c9367dd4a50c9e6edfa0d87 | 41,520 |
def getSecret():
"""
parses credentials.json and allows other program files to access database password for user 'rcos' to create tables/schemas
Returns:
list of size 1 containing password for user 'rcos' in database
"""
f = open("secret.txt", 'r')
return (f.read().split('\n')) | f17528dc32d69fc2b94c9644236f52749d5f249a | 41,521 |
def null_count(df):
"""
Returns the number of null values in the input DataFrame
"""
return df.isna().sum().sum() | d92582292a01412df3433cfaa69ec68a92057301 | 41,522 |
def get_armstrong_value(num):
"""Return Armstrong value of a number, this is the sum of n**k
for each digit, where k is the length of the numeral.
I.e 54 -> 5**2 + 4**2 -> 41.
Related to narcisstic numbers and pluperfect digital invariants.
"""
num = str(num)
length = len(num)
armstrong_value = 0
for char in num:
armstrong_value += int(char)**length
return armstrong_value | fd0692566ab0beffb785c1ac4fbd4aa27893cfbf | 41,523 |
import json
def serialize(c):
"""
Takes a message and turns it into a json string.
"""
if c.Class == 'Image':
c.encodeB64()
return json.dumps(c, default=lambda o: vars(o)) | 7342b95b7e3f0e4f2391be8ccba98abe631eb228 | 41,525 |
import re
def convert_bert_word(word):
"""
Convert bert token to regular word.
"""
return re.sub("##|\[SEP\]|\[CLS\]", "", word) | f7f7d95fd7fd90b55a104ce13e10b08f6ae0c9f0 | 41,526 |
def cleanly_separate_key_values(line):
"""Find the delimiter that separates key from value.
Splitting with .split() often yields inaccurate results
as some values have the same delimiter value ':', splitting
the string too inaccurately.
"""
index = line.find(':')
key = line[:index]
value = line[index + 1:]
return key, value | d790f6bca2b52ee87bce01467a561901ba08655d | 41,527 |
def batch(tensor, batch_size = 50):
""" It is used to create batch samples, each batch has batch_size samples"""
tensor_list = []
length = tensor.shape[0]
i = 0
while True:
if (i+1) * batch_size >= length:
tensor_list.append(tensor[i * batch_size: length])
return tensor_list
tensor_list.append(tensor[i * batch_size: (i+1) * batch_size])
i += 1 | 2b4b10520fd72b90ebe1239b7f52e61ba442484d | 41,528 |
def binary_search1():
"""
Input: nums = [-1, 0, 3, 5, 9, 12], target = 9
Output: 4
Explanation: 9 exists in nums and its index is 4
"""
nums = [-1, 0, 3, 5, 9, 12]
target = 9
nums = [-1, 0, 3, 5, 9, 12]
target = 2
left, right = 0, len(nums) - 1
while left <= right:
mid = left + (right - left) // 2
if nums[mid] == target:
return mid
elif nums[mid] < target:
left = mid + 1
elif nums[mid] > target:
right = mid - 1
return -1 | e50916b577a7deec4829b69c5e730c71845fee4d | 41,529 |
def pack():
"""packs content."""
return 0 | b440220cddbda9ef2358ab5e9cf34a70ebb90286 | 41,530 |
def mse(predictions, targets):
"""Compute mean squared error"""
return ((predictions - targets) ** 2).mean() | 516c8767731577db36dc4b503c179c034fca1166 | 41,531 |
def _command(register_offset, port_number, register_type):
"""Return the command register value corresponding to the register type for a given port number and register offset."""
return (register_offset * register_type) + port_number | d09815a2451e86448e0da280c8b037b59cfbd87b | 41,532 |
def fetch_synonyms(what):
"""
fetch_synonyms
Parameter(s): what - The region type. Use to determine which synonym list to return.
Return: syns - A list of strings containing synonyms of 'what'.
Description: Takes in a string and returns a list containing the string and several synonyms.
"""
switcher = {
"shallows":["Bay", "Shallows"],
"ocean":["Sea", "Ocean"],
"prarie": ["Grasslands", "Fields", "Prairie", "Plains", "Steppes"],
"desert": ["Desert", "Badlands", "Wastes", "Barrens"],
"mountain": ["Mountains", "Peaks", "Crags"],
"ridge":["Ridge"],
"wetland": ["Swamp","Bog", "Fen", "Marsh"],
"gentle forest": ["Forest", "Woods", "Woodlands", "Backwoods"],
"dark forest": ["Darkwoods","Tangle", "Rainforest", "Wilds", "Jungle"],
"scrub":["Wastes", "Scrubland","Flats","Expanse","Rot"],
"tundra": ["Boreal","Frost","Arctic"],
"river": ["Creek","River","Stream", "Rapids"],
"savanah":["Savanah"],
"kingdom":["Kingdom"],
"county":["Barony","County", "Nation"]
}
return switcher.get(what, ["Land"]) | 4e4458199aa59494ac9331884ab86789c922829d | 41,533 |
def tile_to_screen(pos, dims, off, p, screen_height=None):
"""Take tile coords and convert to screen coords
by default converts into bottom-left screen coords,
but with height attribute supplied converts to top-left
returns the bottom-left position of the tile on the screen"""
offx, offy = off
if offx < 0:
offx = 0
xpos, ypos, zpos = pos # x, y and z position
xdims, ydims, zdims = dims # Total size of x, y and z
xx = (xdims - 1 - xpos + ypos) * (p / 2) + offx
# Gives top-left position of subsection
yy = ((xdims - xpos) + (ydims - ypos)) * (p / 4) + (zpos * p) + offy + (p / 2)
if screen_height is not None:
yy = screen_height - yy
return (xx, yy) | 86c09d8cec52c79acf5a9b503120419cf823acb6 | 41,534 |
import os
import glob
import json
def extract_files_from_dir(results_dir_path):
"""
Checks if existent an then extracts relevant files (params.pkl, variant.json) from results_dir_path
:param results_dir_path: directory which shall be evaluated
:return: variant_dict with additional entries 'params_pickle_file'
"""
assert os.path.isdir(results_dir_path)
pkl_files = glob.glob(os.path.join(results_dir_path, '*.pkl'))
assert pkl_files, 'Directory must not contain more than one parameter file'
pkl_files.sort()
params_pickle_file = pkl_files[-1]
assert len(glob.glob(os.path.join(results_dir_path,'*variant*.json'))) == 1, 'Directory must not contain more than one variant file'
variant_json_path = glob.glob(os.path.join(results_dir_path,'*variant*.json'))[0]
with open(variant_json_path, 'r') as f:
variant_dict = json.load(f)
variant_dict["params_pickle_file"] = params_pickle_file
return variant_dict | fdcdfe3287a5dd04a407490593e732b1d98568ae | 41,536 |
import math
def myceil(x, base=10):
"""
Returns the upper-bound integer of 'x' in base 'base'.
Parameters
----------
x: float
number to be approximated to closest number to 'base'
base: float
base used to calculate the closest 'largest' number
Returns
-------
n_high: float
Closest float number to 'x', i.e. upper-bound float.
Example
-------
>>>> myceil(12,10)
20
>>>>
>>>> myceil(12.05, 0.1)
12.10000
"""
n_high = float(base*math.ceil(float(x)/base))
return n_high | 6e52b99755cdd25b882f707c54c3ff27f8ff279e | 41,537 |
import math
def _assign_shard_id_to_roidb(roidb, num_splits, tot_vids):
"""
Returns:
list with one element for each entry in roidb
(shard_dir_name,
(start_frame_id (0-indexed, included),
end_frame_id (0-indexed, not included)))
"""
shards = []
vids_per_job = int(math.ceil(tot_vids / num_splits))
last_proc = 0
for start_id in range(num_splits):
this_end_pos = min(last_proc + vids_per_job, tot_vids + 1)
this_outdir = '{0:05d}_range_{1}_{2}'.format(
start_id, last_proc, this_end_pos)
# run through the entries that get assigned to this shard, and set
# what frames out of it belong to which video.
last_frame_proc = 0
for i in range(last_proc, min(this_end_pos, len(roidb))):
# start_id is included and last_proc is not, as happens in the
# ROIDB_SUBSET code
this_frame_proc = last_frame_proc + roidb[i]['nframes']
shards.append((
this_outdir, (last_frame_proc, this_frame_proc)))
last_frame_proc = this_frame_proc
last_proc = this_end_pos
return shards | 0bbe1bfeb87d2b3b6e7a6cc510299fedbc26f772 | 41,538 |
import sys
def progress_factory(message):
"""
"""
def progress(position, total):
"""
"""
done = int((position / total) * 100)
sys.stdout.write('\r{}: {:2d}%'.format(message, done))
sys.stdout.flush()
return progress | e841ceb35e67a41caeae5b6844c6458ed1be8a16 | 41,539 |
def verifyTraceConstraints(graph):
"""
"""
for neg in graph.layers["nterms"]:
if graph.traceConstraint(graph.layers["$"], neg):
return False
for pos in graph.layers["pterms"]:
if not graph.traceConstraint(graph.layers["$"], pos):
return False
return True | 09939e3c13dec1e41e4a570eaf874b9fc01ccc89 | 41,541 |
def get_branch_name(ref):
"""
Take a full git ref name and return a more simple branch name.
e.g. `refs/heads/demo/dude` -> `demo/dude`
:param ref: the git head ref sent by GitHub
:return: str the simple branch name
"""
refs_prefix = 'refs/heads/'
if ref.startswith(refs_prefix):
# ref is in the form "refs/heads/master"
ref = ref[len(refs_prefix):]
return ref | bb7e791c6dac430fef9a38f8933879782b943fd1 | 41,542 |
import torch
import sys
def load_esm_model(model, use_gpu=True):
"""
Load an ESM model
"""
esm_model, alphabet = torch.hub.load("facebookresearch/esm:main", model)
batch_converter = alphabet.get_batch_converter()
esm_model = esm_model.eval()
if torch.cuda.is_available() and use_gpu:
esm_model = esm_model.cuda()
print("Transferred ESM model to GPU", file=sys.stderr)
return esm_model, alphabet, batch_converter | 40860bf7bff72f5a9d8c1408a18a767fd5c87daa | 41,544 |
def test_auto(auto):
"""Test automatic setting."""
assert auto.foo == 42
return True | b8b9cbcbdaf407539fdb0509645054867d248200 | 41,545 |
def stdin(sys_stdin):
"""
Imports standard input.
"""
return [int(x.strip()) for x in sys_stdin] | 54964452384ae54961472328e603066f69391957 | 41,546 |
from typing import Tuple
def parse_interval(interval: str) -> Tuple[str, str, str]:
"""
Split the interval in three elements. They are, start time of the working day,
end time of the working day, and the abbreviation of the day.
:param interval: A str that depicts the day, start time and end time
in one string. Ex.: 'SA14:00-18:00'
:return: A tuple with three strings. Ex.: ('SA', '14:00', '18:00')
"""
interval_parsed = interval.split('-')
day = interval_parsed[0][:2] # SU
initial_hour = interval_parsed[0][2:] # '19:00'
end_hour = interval_parsed[1] # '21:00'
return day, initial_hour, end_hour | f2bac53a1b56e1f6371187743170646c08109a27 | 41,547 |
def surrogate_loss(policy, all_obs, all_actions, all_adv, old_dist):
"""
Compute the loss of policy evaluated at current parameter
given the observation, action, and advantage value
Parameters
----------
policy (nn.Module):
all_obs (Variable):
all_actions (Variable):
all_adv (Variable):
old_dist (dict): The dict of means and log_stds Variables of
collected samples
Returns
-------
surr_loss (Variable): The surrogate loss function wrapped in
Variable
"""
new_dist = policy.get_policy_distribution(all_obs)
old_dist = policy.distribution(old_dist)
ratio = new_dist.likelihood_ratio(old_dist, all_actions)
surr_loss = -(ratio * all_adv).mean()
return surr_loss | 26e0f53e91a9537a4b9d0bfc3a3122dbe4917fc2 | 41,550 |
def _get_zero_one_knapsack_matrix(total_weight: int, n: int) -> list:
"""Returns a matrix for a dynamic programming solution to the 0/1 knapsack
problem.
The first row of this matrix contains the numbers corresponding to the
weights of the (sub)problems. The first column contains an enumeration of
the items, starting from the fact that we could not include any item, and
this is represented with a 0.
m[0][0] is 0 just because of the alignment, it does make any logical sense
for this purpose, it could be None, or any other value."""
m = [[0 for _ in range(total_weight + 2)] for _ in range(n + 2)]
for x in range(1, total_weight + 2):
m[0][x] = x - 1
for j in range(1, n + 2):
m[j][0] = j - 1
m[j][1] = 0
return m | bc96eb43f487b6ad20b143c177474ba04afc2319 | 41,551 |
def get_bus_stop_index(bus_stop_osm_id, bus_stop_osm_ids, start):
"""
:param bus_stop_osm_id: int
:param bus_stop_osm_ids: [int]
:param start: int
:return: bus_stop_index: int
"""
bus_stop_index = -1
for i in range(start, len(bus_stop_osm_ids)):
if bus_stop_osm_ids[i] == bus_stop_osm_id:
bus_stop_index = i
break
return bus_stop_index | b294b766bb3151436c0dd0dad1c1e706b5f33158 | 41,552 |
def read_atom_file(file):
"""Read file with atoms label
Args:
file (str): Name of file with atoms labels
Returns:
list: atoms labeled as <Residue Name> <Atom Name>
"""
atoms = [line.rstrip('\n').split() for line in open(file, "r")]
atoms = [[a[0] + " " + aa for aa in a[1::]] for a in atoms]
return atoms | 3d85dff55f7165d1c9b747eb75d395ec03f5b3ce | 41,554 |
def combine_bolds(graph_text):
"""
Make ID marker bold and remove redundant bold markup between bold elements.
"""
if graph_text.startswith("("):
graph_text = (
graph_text.replace(" ", " ")
.replace("(", "**(", 1)
.replace(")", ")**", 1)
.replace("** **", " ", 1)
)
return graph_text | b87afc51de6bb1e83c0f8528773e50f5d797fe2d | 41,555 |
def format_id(kepler_id):
"""Formats the id to be a zero padded integer with a length of 9 characters.
No ID is greater than 9 digits and this function will throw a ValueError
if such an integer is given.
:kepler_id: The Kepler ID as an integer.
:returns: A 0 padded formatted string of length 9.
"""
return f'{kepler_id:09d}' | 427726b825f6ee1c4a20c07d098a3a063c18a0c1 | 41,556 |
import argparse
def parse_args():
"""argument parser"""
parser = \
argparse.ArgumentParser(description='Check tensorflow split correctness.')
parser.add_argument('--tfmodel_path', type=str, default='',
required=True,
help="Tensorflow pb model path")
parser.add_argument('--submodel_path', type=str, default='',
required=True,
help="Tensorflow sub model path")
parser.add_argument('--use_cmodel', type=int, default=0,
required=False,
help="0 or 1, if running on cmodel mode.")
parser.add_argument('--from_zip', type=int, default=0,
required=False,
help="0 or 1, if load from zip")
return parser.parse_args() | 6f2b5eeb4189cc944e88de1ea235455254a7ae05 | 41,558 |
def rev_comp(s):
"""A simple reverse complement implementation working on strings
Args:
s (string): a DNA sequence (IUPAC, can be ambiguous)
Returns:
list: reverse complement of the input sequence
"""
bases = {
"a": "t", "c": "g", "g": "c", "t": "a", "y": "r", "r": "y", "w": "w",
"s": "s", "k": "m", "m": "k", "n": "n", "b": "v", "v": "b", "d": "h",
"h": "d", "A": "T", "C": "G", "G": "C", "T": "A", "Y": "R", "R": "Y",
"W": "W", "S": "S", "K": "M", "M": "K", "N": "N", "B": "V", "V": "B",
"D": "H", "H": "D"}
sequence = list(s)
complement = "".join([bases[b] for b in sequence])
reverse_complement = complement[::-1]
return reverse_complement | 3fd61300016da5e8546f0c00303a8477af79902f | 41,559 |
def sumIO(io_list, pos, end_time):
"""
Given io_list = [(timestamp, byte_count), ...], sorted by timestamp
pos is an index in io_list
end_time is either a timestamp or None
Find end_index, where io_list[end_index] is the index of the first entry in
io_list such that timestamp > end_time.
Sum the byte_count values in io_list from [pos..end_index).
Return (sum_byte_count, end_index).
"""
sum_byte_count = 0
# print(f'sum to {end_time}')
while pos < len(io_list) and io_list[pos][0] <= end_time:
# print(f' {pos}')
sum_byte_count += io_list[pos][1]
pos += 1
return (sum_byte_count, pos) | a6b36148e05ac57f599bc6c68ffac242020dc65f | 41,561 |
def gap(gap_size=5):
"""Returns a given number of whitespace characters.
Parameters
----------
gap_size : int, optional
Number of whitespace characters to be printed.
Returns
-------
str
Whitespace.
"""
gap_str = gap_size*' '
return gap_str | 048fb5da573b392b00a4def446e9fb74bfd78598 | 41,563 |
def get_main_folder_name(item_container):
""" liefert den Namen der Hauptseite """
ic = item_container.get_parent()
while ic.item.integer_2 >=2:
ic = ic.get_parent()
return ic.item.name | ce1f8e463be99024f2dccf6dfdc0051aeaa88cd3 | 41,564 |
import subprocess
def get_conda_envs():
"""Get a list of all conda environments on the system.
Returns
-------
list
Name (str) of all the conda environments returned by "conda env list"
"""
output = subprocess.run(["conda", "env", "list"], capture_output=True, check=True)
lines = output.stdout.decode("utf-8").split("\n")
envs = [line.split()[0] for line in lines if len(line) > 0]
envs = [env for env in envs if env != "#"]
return envs | 6e5d931474327eaa9e351c8d1937644f0ef4dbac | 41,565 |
def is_child_class(target, base):
""" Check if the target type is a subclass of the base type and not base type itself """
return issubclass(target, base) and target is not base | 731c551149f94401a358b510aa124ee0cba6d0bd | 41,567 |
import os
def ignore_files(directory, files) -> list:
"""Callable that returns list of files to be ignored while copying tree.
Ignore files
"""
return [f for f in files if os.path.isfile(os.path.join(directory, f))] | 505601236e362035642aced6324f0b9d93f55947 | 41,568 |
def get_pyramid_single(xyz):
"""Determine to which out of six pyramids in the cube a (x, y, z)
coordinate belongs.
Parameters
----------
xyz : numpy.ndarray
1D array (x, y, z) of 64-bit floats.
Returns
-------
pyramid : int
Which pyramid `xyz` belongs to as a 64-bit integer.
Notes
-----
This function is optimized with Numba, so care must be taken with
array shapes and data types.
"""
x, y, z = xyz
x_abs, y_abs, z_abs = abs(x), abs(y), abs(z)
if (x_abs <= z) and (y_abs <= z): # Top
return 1
elif (x_abs <= -z) and (y_abs <= -z): # Bottom
return 2
elif (z_abs <= x) and (y_abs <= x): # Front
return 3
elif (z_abs <= -x) and (y_abs <= -x): # Back
return 4
elif (x_abs <= y) and (z_abs <= y): # Right
return 5
else: # (x_abs <= -y) and (z_abs <= -y) # Left
return 6 | 8fe2fbc727f13adf242094c3b0db3805af31a1e9 | 41,569 |
import copy
def get_midpoint_radius(pos):
"""Return the midpoint and radius of the hex maze as a tuple (x,y), radius.
Params
======
pos: PositionArray
nelpy PositionArray containing the trajectory data.
Returns
=======
midpoint: (x0, y0)
radius: float
"""
# make a local copy of the trajectory data
local_pos = copy.copy(pos)
# merge the underlyng support to make computations easier
local_pos._support = pos.support.merge(gap=10)
# apply smoothing to tame some outliers:
local_pos = local_pos.smooth(sigma=0.02)
midpoint = local_pos.min() + (local_pos.max() - local_pos.min())/2
radius = ((local_pos.max() - local_pos.min())/2).mean()
return midpoint, radius | 76290309f12e00b6a487f71b2393fabd8f3944ac | 41,570 |
import threading
def Lockable(cls):
"""
This class decorator will add lock/unlock methods to the thusly decorated
classes, which will be enacted via an also added `threading.RLock` member
(`self._rlock`)::
@Lockable
class A (object) :
def call (self) :
print 'locked: %s' % self._locked
The class instance can then be used like this::
a = A ()
a.call ()
a.lock ()
a.call ()
a.lock ()
a.call ()
a.unlock ()
a.call ()
with a :
a.call ()
a.call ()
a.unlock ()
a.call ()
which will result in::
locked: 0
locked: 1
locked: 2
locked: 1
locked: 2
locked: 1
locked: 0
The class A can also internally use the lock, and can, for example, use:
@Lockable
class A (object) :
...
def work (self) :
with self :
# locked code section
...
"""
if hasattr (cls, '__enter__') :
raise RuntimeError ("Cannot make '%s' lockable -- has __enter__" % cls)
if hasattr (cls, '__exit__') :
raise RuntimeError ("Cannot make '%s' lockable -- has __exit__" % cls)
if hasattr (cls, '_rlock') :
raise RuntimeError ("Cannot make '%s' lockable -- has _rlock" % cls)
if hasattr(cls, '_locked') :
raise RuntimeError ("Cannot make '%s' lockable -- has _locked" % cls)
if hasattr(cls, 'locked') :
raise RuntimeError ("Cannot make '%s' lockable -- has locked" % cls)
if hasattr (cls, 'lock') :
raise RuntimeError ("Cannot make '%s' lockable -- has lock()" % cls)
if hasattr (cls, 'unlock') :
raise RuntimeError ("Cannot make '%s' lockable -- has unlock()" % cls)
def locked(self):
return self._locked
def locker(self):
self._rlock.acquire()
self._locked += 1
def unlocker(self, *args):
self._rlock.release()
self._locked -= 1
cls._rlock = threading.RLock ()
cls._locked = 0
cls.locked = locked
cls.is_locked = locked
cls.lock = locker
cls.unlock = unlocker
cls.__enter__ = locker
cls.__exit__ = unlocker
return cls | 6a779306db6d4882e1e54ad117c864cb03cf058a | 41,572 |
def dictzip(keys,values):
""" zips to lists into a dictionary """
#if not keys or not values:
# raise Exception("Bad params")
#if len(keys) != len(values):
# raise
d = {}
for x in list(zip(keys,values)):
d[x[0]] = x[1]
return d | 66579650a6b126ea16ed118b8d62865feeef16d2 | 41,573 |
from pathlib import Path
from typing import Tuple
from typing import List
def listdir_grouped(root: Path, ignore_folders=[], include_hidden=False) -> Tuple[List, List]:
"""Dizindeki dosya ve dizinleri sıralı olarak listeler
Arguments:
root {Path} -- Listenelecek dizin
Keyword Arguments:
ignore_folders {list} -- Atlanılacak yollar (default: {[]})
include_hidden {bool} -- Gizli dosyaları dahil etme (default: {False})
Returns:
tuple -- dizin, dosya listesi
Examples:
>>> dirs, files = listdir_grouped(".")
"""
if isinstance(root, str):
root = Path(root)
paths = [x for x in root.iterdir()]
dirs, files = [], []
for path in paths:
condition = not include_hidden and path.name.startswith('.')
condition = condition and path.name not in ignore_folders
condition = not condition
if condition:
dirs.append(path) if path.is_dir() else files.append(path)
dirs.sort()
files.sort()
return dirs, files | 13d2ea09cafd3b4062714cbf151c0a4b290616e1 | 41,574 |
from typing import Optional
from typing import List
def _with_config_file_cmd(config_file: Optional[str], cmd: List[str]):
""" Prefixes `cmd` with ["--config-file", config_file] if
config_file is not None """
return (["--config-file", config_file] if config_file else []) + cmd | a7a93f2b089e4265a22fb7fd0ccced1e78e1c55c | 41,575 |
def _clip_points(gdf, poly):
"""Clip point geometry to the polygon extent.
Clip an input point GeoDataFrame to the polygon extent of the poly
parameter. Points that intersect the poly geometry are extracted with
associated attributes and returned.
Parameters
----------
gdf : GeoDataFrame, GeoSeries
Composed of point geometry that will be clipped to the poly.
poly : (Multi)Polygon
Reference geometry used to spatially clip the data.
Returns
-------
GeoDataFrame
The returned GeoDataFrame is a subset of gdf that intersects
with poly.
"""
return gdf.iloc[gdf.sindex.query(poly, predicate="intersects")] | c7f21807d28f37044a4f36f4ededce26defbb837 | 41,578 |
def hamiltonian_mse_blocks(blocks1, blocks2, scale=1.0):
""" Average element-wise error between Hamiltonians in block form.
Defined so that the sum over blocks equals the overall MSE in the matrix form.
"""
mse = {}
nel = 0
for k in blocks1.keys():
mse[k] = {}
for b in blocks1[k]:
na, la, nb, lb = b
mse[k][b] = 0
for l in blocks1[k][b]:
mse[k][b] += ((blocks1[k][b][l] - blocks2[k][b][l]).flatten()**2).sum()
if (nb!=na or lb!=la):
mse[k][b]*=2.0 # multiplicity
mse[k][b]*=scale
return mse | ec74bdde9092cc693f39f7bcc4ac89b40c771eca | 41,582 |
import re
def get_relval_id(file):
"""Returns unique relval ID (dataset name) for a given file."""
dataset_name = re.findall('R\d{9}__([\w\d]*)__CMSSW_', file)
return dataset_name[0] | 5b3369920ae86d7c4e9ce73e1104f6b05779c6d4 | 41,584 |
import os
def librato_test_space(test_space=os.environ.get('LIBRATO_TEST_SPACE')):
"""Return librato test space from an environment variable"""
return test_space | b97d313be8e0a1192dedcc6bc0894a76c71dc27c | 41,585 |
def splitdrive(path):
"""Split a pathname into drive and path specifiers.
Returns a 2-tuple "(drive,path)"; either part may be empty.
"""
# Algorithm based on CPython's ntpath.splitdrive and ntpath.isabs.
if path[1:2] == ':' and path[0].lower() in 'abcdefghijklmnopqrstuvwxyz' \
and (path[2:] == '' or path[2] in '/\\'):
return path[:2], path[2:]
return '', path | ed50877516130669cfe0c31cd8a6d5085a83e7c6 | 41,586 |
def triedctk():
"""
Les méthodes ctk : Cartes Topologiques de Kohonen
findbmus : Détermine les référents les plus proches (Best Match Units)
mbmus : Trouve les multiples bmus dans l'ordre des plus proches
errtopo : Erreur topologique (cas d'une carte rectangulaire uniquement)
findhits : Calcul les nombres d’éléments captés par les référents
showmap : Affiche les variables de la carte
showmapping : Montre le déploiement de la carte dans l’espace des données (2 à 2)
showcarte : Affichage de la carte selon différents paramètres
showbarcell : Affiche les référents de la cartes en forme de bar
showprofils : Montre les référents et/ou leurs données sous forme de courbe
showrefactiv: Fait apparaitre l’activation des neurones (distance inverse)
en fonction des formes présentées
showrefpat : Montre les formes (intégrées) captés par les neurones
cblabelmaj : Labellisation des référents par vote majoritaire
reflabfreq : Tableau de frequences des labels par referent
cblabvmaj : Label des référents attribué par vote majoritaire
cblabfreq : Label Frequence des 'Labels' des référents
label2ind : Transforme des labels de type string en indice (de type entier)
mapclassif : Classifie de données par la carte.
classifperf : Performance en classification
"""
return None | 74444be34275e5842e8930b6431796e925df09b1 | 41,587 |
import re
def safe_filename(url):
"""
Sanitize input to be safely used as the basename of a local file.
"""
ret = re.sub(r'[^A-Za-z0-9]+', '.', url)
ret = re.sub(r'^\.*', '', ret)
ret = re.sub(r'\.\.*', '.', ret)
# print('safe filename: %s -> %s' % (url, ret))
return ret | 4f43984c35678e9b42e2a9294b5ee5dc8c766ac6 | 41,588 |
async def combine_channel_ids(ctx):
"""Combines all channel IDs.
Called by `channel_setter` and `channel_deleter`.
Args:
ctx (discord.ext.commands.Context): context of the message
Returns:
list of int: of Discord channel IDs
"""
channels = []
if not ctx.message.channel_mentions:
channels.append(ctx.channel.id)
else:
for channel_mention in ctx.message.channel_mentions:
channels.append(str(channel_mention.id))
return channels | 1351faa7e49ce026d4da04c2c8886b62a5f294c3 | 41,589 |
def get_role_arn(iam, role_name):
"""Gets the ARN of role"""
response = iam.get_role(RoleName=role_name)
return response['Role']['Arn'] | ab6ed4fa7fd760cd6f5636e77e0d1a55372909a8 | 41,590 |
def wordCount(wordListRDD):
"""Creates a pair RDD with word counts from an RDD of words.
Args:
wordListRDD (RDD of str): An RDD consisting of words.
Returns:
RDD of (str, int): An RDD consisting of (word, count) tuples.
"""
return wordListRDD.map(lambda w: (w, 1)).reduceByKey(lambda a, b: a + b) | 061581ef8e7719fe39d0209d3409500da0e33d2a | 41,592 |
def is_ascii(identifier) -> bool:
"""
print(is_ascii('\\xAA'))
print(is_ascii('\\u02C6-'))
print(is_ascii('0x10000'))
print(is_ascii('你好'))
"""
if ('0x' in identifier) or ('\\x' in identifier) or ('\\u' in identifier): # hex or unicode
return False
else:
return str.isascii(identifier) | 36b6ceae30adc0263213b3975e7d9200c07c302e | 41,593 |
import logging
def get_logger(name: str, level: int = logging.INFO) -> logging.Logger:
"""
Creates a logger with the given attributes and a standard formatter format.
:param name: Name of the logger.
:param level: Logging level used by the logger.
:return: The newly or previously created Logger object.
"""
logger = logging.getLogger(name)
logger.setLevel(level)
# Simple check that prevents a logger from having more than one formatter when using the method.
if len(logger.handlers) == 0:
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter("%(asctime)s - %(levelname).3s - %(name)s > %(message)s")
formatter.datefmt = "%Y/%m/%d %I:%M:%S"
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger | 7348e1775d236e34e25d9810d71db59a7faff88e | 41,594 |
def write_raw(code, input):
""" write <args> - Send a raw command to the server. WARNING THIS IS DANGEROUS! Owner-only. """
secure = '{red}That seems like an insecure message. Nope!'
r = input.group(2).encode('ascii', 'ignore')
bad = ['ns', 'nickserv', 'chanserv', 'cs',
'q', 'authserv', 'botserv', 'operserv']
for bot in bad:
if (' %s ' % bot) in r.lower():
return code.reply(secure)
try:
args, text = r.split(':')
args, text = args.strip().split(), text.strip()
except:
return code.write(input.group(2), raw=True)
return code.write(args, text, raw=True) | 64a4aa52fe29f3ae9870436c0676739d15f9f755 | 41,595 |
def gera_estados(quantidade_estados: int) -> list:
"""
Recebe uma quantidade e retorna uma lista com nomes de estados
"""
estados: list = []
for i in range(quantidade_estados):
estados.append('q'+str(i))
return estados | 555ff4821626121b4e32d85acfa1f2ef2a7b08bb | 41,596 |
def valid_cards_syntax(cards_str):
""" Confirm that only numeric values separated by periods was given as input """
cards = cards_str.split('.')
for c in cards:
if not c.isnumeric():
return 'Cards must contain only digits 0-9 separated by periods'
return None | 8e3811505075269d2b1a37751c14017e107ce69b | 41,597 |
def togti(boollst, tlst):
"""得到bool值列表和与其对应的时间列表的时间段"""
boolcha = boollst[1:]^boollst[:-1] #寻找边界,TF翻转时为T
boolcha[0] = boollst[0]^boolcha[0]
boolcha[-1] = boollst[-1]^boolcha[-1]
timeint = tlst[:-1][boolcha]
time0 = timeint[::2]; time1 = timeint[1::2]
#timestart, timestop
return time0, time1 | 7b172e560927c3ae1665299bc2256c7607856634 | 41,598 |
def db_to_amplitude(amplitude_db: float, reference: float = 1e-6) -> float:
"""
Convert amplitude from decibel (dB) to volts.
Args:
amplitude_db: Amplitude in dB
reference: Reference amplitude. Defaults to 1 µV for dB(AE)
Returns:
Amplitude in volts
"""
return reference * 10 ** (amplitude_db / 20) | d1a2a4a1c82ad2d3083b86687670af37dafa8269 | 41,599 |
def hsv2rgb(hsvColor):
"""
Conversione del formato colore HSV in RGB
:param hsvColor: Stringa del colore HSV
:return: Tripla RGB
"""
# hsvColor = '#1C8A88' #LOW
# hsvColor = '#BDEFEF' #HIGH
h = hsvColor.lstrip('#')
rgb = tuple(int(h[i:i + 2], 16) for i in (0, 2, 4))
return rgb[0], rgb[1], rgb[2] | a836d70f791cb1cca553ea8806b694cc26c11a0a | 41,600 |
def get_title(soup):
"""
Returns the product title. If there is an unknown character in the title, it
returns it as '?'.
"""
title = soup.find('h1', attrs={'class': 'it-ttl'})
if not title:
return "N/A"
for i in title('span'):
i.extract()
title = title.get_text() # .encode('ascii', 'replace')
return title | ea4a6d4e53f57c0ba0419e9268a80a7a21d7ad21 | 41,601 |
def simple_expand_spark(x):
""" Expand a semicolon separated strint to a list (ignoring empties)"""
if not x:
return []
return list(filter(None, x.split(";"))) | 1d4a9f8007f879c29770ea0ea8350a909b866788 | 41,602 |
def STRING_examples(request):
"""
:return: Tuple(ENSPs_list, TaxID(str))
"""
return request.param | 23d7f24850ff31e49d29cbdf452a9de45cda0269 | 41,603 |
def full_code(code,is_index=True,is_dot=False):
"""补全证券代码
code:6位证券代码
is_index:是否是指数代码
return:补全的代码
"""
if is_dot:
sh = 'sh.'
sz = 'sz.'
else:
sh = 'sh'
sz = 'sz'
if is_index:
if code[0] == '0':
full = sh + code
else:
full = sz + code
else:
if code[0] == '6':
full = sh + code
else:
full = sz + code
return full | 90aa27209865d5f2c58430ee26e27e1e76a8a2f3 | 41,605 |
def get_output_parameters_of_execute(taskFile):
"""Get the set of output parameters of an execute method within a program"""
# get the invocation of the execute method to extract the output parameters
invokeExecuteNode = taskFile.find('assign', recursive=True,
value=lambda value: value.type == 'atomtrailers'
and len(value.value) == 2
and value.value[0].value == 'execute')
# generation has to be aborted if retrieval of output parameters fails
if not invokeExecuteNode:
return None
# only one output parameter
if invokeExecuteNode.target.type == 'name':
return [invokeExecuteNode.target.value]
else:
# set of output parameters
return [parameter.value for parameter in invokeExecuteNode.target.value] | 56ba0c1f1941a72174befc69646b4bdb6bc9e009 | 41,606 |
from typing import Union
from typing import Tuple
from typing import Dict
def generate_collector_dicts(collect_agents) -> Union[Tuple[Dict, Dict], Dict]:
"""
This returns two dictionaries consisting of as many key/value pairs as the elements
contained within the @model_reporters, @agent_reporters parameters.
:return: Tuple[Dict, Dict]
"""
model_reporters = ["seed", "family_intervention", 'social_support', 'welfare_support',
'this_is_a_big_crime', 'good_guy_threshold', 'number_deceased',
'facilitator_fails', 'facilitator_crimes', 'crime_size_fails',
'number_born', 'number_migrants', 'number_weddings',
'number_weddings_mean', 'number_law_interventions_this_tick',
'correction_for_non_facilitators', 'number_protected_recruited_this_tick',
'people_jailed', 'number_offspring_recruited_this_tick', 'number_crimes',
'crime_multiplier', 'kids_intervention_counter', 'big_crime_from_small_fish',
'arrest_rate', 'migration_on', 'initial_agents', 'intervention',
'max_accomplice_radius', 'number_arrests_per_year', 'ticks_per_year',
'num_ticks', 'tick', 'ticks_between_intervention', 'intervention_start',
'intervention_end', 'num_oc_persons', 'num_oc_families',
'education_modifier', 'retirement_age', 'unemployment_multiplier',
'nat_propensity_m', 'nat_propensity_sigma', 'nat_propensity_threshold',
'facilitator_repression', 'facilitator_repression_multiplier',
'percentage_of_facilitators', 'targets_addressed_percent',
'threshold_use_facilitators', 'oc_embeddedness_radius',
'oc_boss_repression', 'punishment_length',
'constant_population', "number_crimes_yearly_per10k",
"number_crimes_committed_of_persons", "current_oc_members",
"current_num_persons", 'criminal_tendency_mean', 'criminal_tencency_sd',
'age_mean', 'age_sd', 'education_level_mean', 'education_level_sd',
'num_crime_committed_mean', 'num_crime_committed_sd',
"crimes_committed_by_oc_this_tick", "current_prisoners", "employed",
"facilitators", "tot_friendship_link", "tot_household_link",
"tot_partner_link", "tot_offspring_link", "tot_criminal_link",
"tot_school_link", "tot_professional_link", "tot_sibling_link",
"tot_parent_link", "number_students", "number_jobs",
"likelihood_of_facilitators"]
agent_reporters = ['unique_id', 'gender_is_male', 'prisoner', 'age', 'sentence_countdown',
'num_crimes_committed', 'num_crimes_committed_this_tick',
'education_level', 'max_education_level', 'wealth_level',
'job_level', 'propensity', 'oc_member', 'retired', 'number_of_children',
'facilitator', 'hobby', 'new_recruit', 'migrant', 'criminal_tendency',
'target_of_intervention', "cached_oc_embeddedness", 'sibling',
'offspring', 'parent', 'partner', 'household', 'friendship',
'criminal', 'professional', 'school']
net_names = ['sibling', 'offspring', 'parent', 'partner', 'household', 'friendship',
'criminal', 'professional', 'school']
model_reporters_dic = {key: key for key in model_reporters}
agent_reporters_dic = {key: key if key not in net_names else lambda x: x.dump_net(key)
for key in agent_reporters }
if collect_agents:
return agent_reporters_dic, model_reporters_dic
else:
return model_reporters_dic | 7a813319c29d1bce8bc11fd6a4e7a22e86445ef5 | 41,607 |
def drop_empty(facets):
"""Prevent any 'bucket' type facets from being displayed on the portal
if the buckets contain no data. This also preserves backwards
compatibility for v0.3.x"""
return [facet for facet in facets if facet.get('buckets') != []] | 83059a6f91eb1d7f3e13ee99d93477de1b2dc7b4 | 41,608 |
def split_addr(ip_port: tuple[str, int]) -> tuple[int, ...]:
"""Split ip address and port for sorting later.
Example
--------
>>> split_addr(('172.217.163.78', 80))
>>> (172, 217, 163, 78, 80)
"""
split = [int(i) for i in ip_port[0].split(".")]
split.append(ip_port[1])
return tuple(split) | d768e493d9bdfe07923927c7d157683515c52d7d | 41,610 |
def is_set(bb, bit):
""" Is bit a position `bit` set? """
return (bb & 1 << bit) != 0 | 4990ccb7eb796da8141bf2b3aec7741addfe2a0c | 41,611 |
def redirect(status, location, start_response):
"""
Return a redirect code. This function does not set any cookie.
Args:
status (str): code and verbal representation (e.g. `302 Found`)
location (str): the location the client should be redirected to (a URL)
start_response: the start_response() callable
Returns:
list: an empty list
"""
response_headers = [("location", location)]
start_response(status, response_headers)
return [] | 7fb5569d5872be69285a29c404b04df7ff7eef74 | 41,612 |
import re
def _parse_path_string(variables, path):
"""
Parse the longest prefix of 'path'
made of any characters beyond '{' and '}'.
"""
pattern = re.compile(r"([^{}]*)(.*)")
match = pattern.match(path)
if match:
string = match.group(1)
rest = match.group(2)
return string, rest
else:
# Should never happen
raise Exception("_parse_path_string(): Unable to parse the path '{}'!".format(path)) | 48bacdde239cf4c074cf59801363b55f0dc5c135 | 41,613 |
def all_data(self):
"""
# 猴子补丁,增加Query对象all_data 方法返回字典
"""
field = tuple([f["name"] for f in self.column_descriptions])
all_info = self.all()
result_data = []
for item in all_info:
result_data.append(dict(zip(field, item)))
return result_data | 2132f759bac5e321eb2995dffda3c8660de378dc | 41,614 |
def count_att(data, column, value):
"""
:param data: Pandas DataFrame
:param column: specific column in the dataset
:param value: which value in the column should be counted
:return: probability of (value) to show in (column), included Laplacian correction
"""
dataset_len = len(data)
try: # if (value) not been found then return laplacian calculation to preserve the probability
p = data[column].value_counts()[value] / dataset_len
if p == 0:
p = 1 / (dataset_len + len(data[column].value_counts()))
return p
except KeyError:
return 1 / (dataset_len + len(data[column].value_counts())) | 4dd11d02257ab2a5ac7fcc0f366de0286cdc84f7 | 41,615 |
def _get_method_for_string(method_str, the_globals=None):
"""
This setting will provide a way to move easily from
'my_method' --> my_method the function
"""
if not the_globals:
the_globals = globals()
return the_globals[method_str] | de9fdad657cd3084b20a7ed5ea00c70b61e49803 | 41,616 |
from unittest.mock import patch
def patch_data_collector():
"""
Replaces DataCollector with a dummy mock.
"""
def decorator(old_function):
patcher = patch("insights.client.client.DataCollector")
return patcher(old_function)
return decorator | c588bae380c59fb1c3e1bebd6f56d3863fb8958e | 41,617 |
def _afunc(arg1, arg2):
"""
Used to concatenate two lists inside a reduce call
"""
return arg1 + arg2 | 5895aeabebf93e714dfefbd7ea0a187eff57a88f | 41,618 |
def process(text):
"""Метод, которым будет обрабатываться каждое текстовое сообщение.
В нашем случае обработка следующая: разбить строку по пробелам -> перевести
строки в числа -> посчитать сумму -> вернуть ответ.
Аргументы:
text (str): Строка, которую необзодимо обработать.
Возвращает:
int: Сумма чисел, содержащихся в сообщении.
"""
# Получаем список чисел.
numbers = list(map(int, text.split()))
# Считаем сумму чисел.
numbers_sum = sum(numbers)
# Возвращаем ответ.
return numbers_sum | 9339758f6b199307cdbf192b9d8a1b9a334576c3 | 41,619 |
def get_height(root):
"""
>>> assert(get_height(None) == -1)
>>> root = Node(1)
>>> assert(get_height(root) == 0)
>>> root = Node(1, Node(2))
>>> assert(get_height(root) == 1)
>>> root = Node(1, Node(2, Node(3)))
>>> assert(get_height(root) == 2)
"""
if root is None:
return -1
height_left = get_height(root.left)
height_right = get_height(root.right)
return 1 + max(height_left, height_right) | 340783583252922d796a3988da76b15678ca8fc7 | 41,621 |
def null_getter(d, fld, default="-"):
"""
Return value if not falsy else default value
"""
return d.get(fld, default) or default | 7970045e766c60e96bca005f86d214e21762f55c | 41,625 |
def convert_vars_to_readable(variables_list):
"""Substitutes out variable names for human-readable ones
:param variables_list: a list of variable names
:returns: a copy of the list with human-readable names
"""
human_readable_list = list()
for var in variables_list:
if False: # var in VARIABLE_NAMES_DICTONARY:
pass # human_readable_list.append(VARIABLE_NAMES_DICTONARY[var])
else:
human_readable_list.append(var)
return human_readable_list | cfc8b9309a728f228dbd09ce6fdb30d849c3ff62 | 41,627 |
import os
import requests
import json
def _get_jama_item_types():
"""GETs item types defined in a Jama instance
Args:
None
Returns:
Array: objects of users data in the Jama instance
"""
url = os.environ['JAMA_URL'] + "/rest/latest/itemtypes?maxResults=50"
resp = requests.get(url, auth=(os.environ["JAMA_USER"], os.environ["JAMA_PASS"]))
assert(200 == resp.status_code)
resp_json = json.loads(resp.text)
item_types = {}
for item in resp_json["data"]:
print(item["display"])
item_name = str(item["id"])
print(item_name)
item_types[item_name] = item["display"]
# Returns an array of objects
return [{
"label": item["display"],
"value": item["id"]
}
for item in resp_json["data"]
], item_types | 0eb47b4fc6c6a33b913e4285f1b58bd78f28c873 | 41,629 |
from typing import Dict
import re
import pkgutil
import io
from typing import OrderedDict
import json
def _load_signatures(filepath: str) -> Dict[str, re.Pattern]:
"""Load signatures for blockpage matching.
Args:
filepath: relative path to json file containing signatures
Returns:
Dictionary mapping fingerprints to signature patterns
"""
data = pkgutil.get_data(__name__, filepath)
if not data:
raise FileNotFoundError(f"Couldn't find file {filepath}")
content = io.TextIOWrapper(io.BytesIO(data), encoding='utf-8')
signatures = OrderedDict()
for line in content.readlines():
if line != '\n':
signature = json.loads(line.strip())
pattern = signature['pattern']
fingerprint = signature['fingerprint']
signatures[fingerprint] = re.compile(pattern, re.DOTALL)
return signatures | a4046adcb4873de4f1de2e0fdd06ab88de7b64b1 | 41,630 |
def get_list_string(data: list, delim: str = '\t', fmt: str = '{}') -> str:
"""Converts a 1D data array into a [a0, a1, a2,..., an] formatted string."""
result = "["
first = True
for i in data:
if not first:
result += delim + " "
else:
first = False
result += fmt.format(i)
result += "]"
return result | 25e9bb0e0220776ff79e121bab3bddf99168602d | 41,631 |
def mbar2kPa(mbar: float) -> float:
"""Utility function to convert from millibars to kPa."""
return mbar/10 | 9dcb74581fb099da0d136aad57de1c8ac3cf7c1d | 41,632 |
def user_requested_cassette_replacement(request):
"""Did the user ask to delete VCR cassettes?"""
return request.config.getoption("--replace-vcrs") | 92347f44e845fe67af625fc38e251990dad3aaa7 | 41,636 |
import re
def prep_for_search(string):
"""
Expects a string. Encodes strings in a search-friendy format,
lowering and replacing spaces with "+"
"""
string = re.sub('[^A-Za-z0-9 ]+', '', string).lower()
string = string.replace(" ", "+")
return string | 61a6762598fe3538b2d2e2328bc77de53fba4d74 | 41,638 |
def twoSum(nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
numsSorted = sorted(nums)
n = len(nums)
print(numsSorted)
for i in range(n - 1, 0, -1):
# for j in range(i+1, n):
# if nums[i] + nums[j] == target:
# print(nums[i], " = ", i)
# print(nums[j], " = ", j)
# return [i, j]
j = 0
print(i, " ", j, " ", numsSorted[i], " ", numsSorted[j])
while numsSorted[i] + numsSorted[j] < target:
print("do while")
j += 1
if numsSorted[i] + numsSorted[j] == target:
print(numsSorted)
print(i, " ", j, " ", numsSorted[i], " ", numsSorted[j])
a = nums.index(numsSorted[j])
nums.reverse()
b = n - 1 - nums.index(numsSorted[i])
return [a, b]
pass | 41af967611844b0b34d3a17dff518b293b08311f | 41,639 |
import sys
def write_arpa(prob_list, out=sys.stdout):
"""Convert an lists of n-gram probabilities to arpa format
The inverse operation of :func:`pydrobert.torch.util.parse_arpa_lm`
Parameters
----------
prob_list : list of dict
out : file or str, optional
Path or file object to output to
"""
if isinstance(out, str):
with open(out, "w") as f:
return write_arpa(prob_list, f)
entries_by_order = []
for idx, dict_ in enumerate(prob_list):
entries = sorted((k, v) if idx else ((k,), v) for (k, v) in dict_.items())
entries_by_order.append(entries)
out.write("\\data\\\n")
for idx in range(len(entries_by_order)):
out.write("ngram {}={}\n".format(idx + 1, len(entries_by_order[idx])))
out.write("\n")
for idx, entries in enumerate(entries_by_order):
out.write("\\{}-grams:\n".format(idx + 1))
if idx == len(entries_by_order) - 1:
for entry in entries:
out.write("{} {}\n".format(" ".join(entry[0]), entry[1]))
else:
for entry in entries:
out.write(
"{} {} {}\n".format(entry[1][0], " ".join(entry[0]), entry[1][1])
)
out.write("\n")
out.write("\\end\\\n") | 0c65ca1a63a332cfc50fe807b44e699827a2d76c | 41,640 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.