content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_pubkey(elem) -> str:
"""Returns the primary key element from a tag"""
return elem.get('key') | 2f11661dee73d858dbd8c37b5045442c192f8799 | 34,525 |
def getcoroutinelocals(coroutine):
"""
Get the mapping of coroutine local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
frame = getattr(coroutine, "cr_frame", None)
if frame is not None:
return frame.f_locals
else:
return {} | 8fdc3a968b1c0eeaf503ba6c5ab167f22dcb54f8 | 34,526 |
from unittest.mock import call
def info_default(proc, var):
"""wrapper for 'info default'"""
return call("safe_info_default", proc, var, to=tuple) | 14eb8e0457d592045274df279cd4846cfdb073ed | 34,527 |
import re
def get_urls(clean_text):
"""
returns a link of potentially clickable emails
"""
for text in clean_text:
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
text)
return urls | 5b6199a2e187e81af8e178fef2ded55c181b9faf | 34,528 |
def prepare_credential_content(filename, itype=False, use_file=True):
"""
Format strings for inclusion in radl templates
"""
if use_file:
with open(filename) as file_in:
content = file_in.readlines()
else:
content = []
for line in filename.split(b'\n'):
content.append('%s\n' % line.decode('utf-8'))
if itype:
content = [' %s' % line for line in content]
else:
content = ['%s' % line for line in content]
return ''.join(content) | 9a685636b6a057bef7847f2a88c269e818c2bb17 | 34,529 |
def round_tat(num):
"""Changes the turnaround time from a float into natural language hours + minutes."""
remain = num%1
if num.is_integer():
if int(num) == 1:
return str(int(num)) + " hour."
else:
return str(int(num)) + " hours."
else:
if num < 1:
return str(int(remain*60)) + " minutes."
elif num-remain == 1:
return str(int(num-remain)) + " hour and " + \
str(int(remain*60)) + " minutes."
else:
return str(int(num-remain)) + " hours and " + \
str(int(remain*60)) + " minutes." | cccf14355b7fc090df855a45b88e9b8562449f9a | 34,530 |
import idna
def domain_to_idna(passed_domain):
"""
Change unicode domain to bytes
:param passed_domain: bytes or str object
:return: bytes domain in idna format
"""
# make sure we are unicode
if not isinstance(passed_domain, str):
# domain is already bytes. Return as-is
return passed_domain
else:
unicode_domain = passed_domain
# try to convert to idna2008
try:
return_val = idna.encode(unicode_domain)
except:
# on fail, fall back to older (non compatible) IDNA 2003
try:
return_val = unicode_domain.encode('idna')
except:
# could not decode, return string as is.
return_val = unicode_domain
return return_val | 7bd1cceb3e46134da5b574a668b7870bc8d3483e | 34,531 |
def score_pop(
population_value, min_population_acceptable=10, max_population_acceptable=30
):
"""
:param population_value: population value to be scored
:param min_population_acceptable: minimum population value in the scoring range (saturates at 0)
:param max_population_acceptable: maximum population value in the scoring range (saturates at 1)
:return: a score value between 0 and 1
"""
if population_value > max_population_acceptable:
score_population = 1.0
elif population_value < min_population_acceptable:
score_population = 0.0
else: # linear scaling between 0 and 1 for between min and max acceptable
score_population = (population_value - min_population_acceptable) / (
max_population_acceptable - min_population_acceptable
)
return score_population | 074837e892cea618705139513ea03889aa76fac3 | 34,532 |
from typing import Mapping
from typing import List
def get_docstring(sections: Mapping[str, List[str]]) -> List[str]:
"""Get (unindeted) docstring from docker-compose <cmd> --help. Use general and usage section.
:param sections: Output from `collect_help_lines`
"""
lines = sections["general"]
if usages := sections.get("usage", []):
lines += ["Usage:"] + usages
return lines | e0d811fcd037b712288692103eccd71fc9c8dfad | 34,533 |
import random
def cpu_choise():
"""This function randomly selects five of the seven
colors and returns them as a list."""
all_color = ("green","red","blue","pink","yellow","orange","gray")
chois_color = random.sample(all_color,5)
return chois_color | 3a1e8d00677d1c3eeb0923b064c91d74bc7ef1b3 | 34,534 |
def _first_upper(k):
"""Returns string k, with the first letter being upper-case"""
return k[0].upper() + k[1:] | f28bfa9f6457c4c0b3d75704e44486f31f247831 | 34,535 |
def last_third_first_third_mid_third(seq):
"""with the last third, then first third, then the middle third in the new order."""
i = len(seq) // 3
return seq[-i:] + seq[:i] + seq[i:-i] | e09a4ea8d5f59b6d98b94b80290eeac148594a1d | 34,536 |
def has_samples(args):
"""Returns whether there's some kind of sample option in the command
"""
return args.sample or args.samples or args.sample_tag | 47728a442d90538e72f47b72882ba22dd102d61a | 34,537 |
def graph_order(graph):
"""Get graph order."""
graph = graph.get_vertices()
order = len(graph.keys())
return order | 1ea6fea10bdd80033a67fac1e5326f6a80239c58 | 34,538 |
def find_all_indexes(text, pattern):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found.
Time Complexity: O(n) where n is the number of characters between index 0 and our max_index
Space Complexity: O(n) where n is the number of items in the array holding our indexes"""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
if len(pattern) is not 0: # O(1) time
max_index = len(text) - (len(pattern) - 1) # O(1) time, O(1) space
else:
max_index = len(text) # O(1) time
indexes = [] # O(1) time, O(n) since we don't know how many items will go into the array
for index in range(0, max_index): # O(n) time where n is the number of characters between 0 and max_index
curr_range = index + len(pattern) # O(1) time, O(1) space
curr_letters = text[index:curr_range] # O(1) space
if curr_letters == pattern: # O(1) time
indexes.append(index) # O(1) time
return indexes | 2dbcb5101053a5255f8d7a03cd87b5b557e0df04 | 34,539 |
def remove_close(df, primary_distance_cutoff):
"""
This code will take what should ideally be a merged dataframe and cut any rows
out according to the difference in their aa1_loc and aa2_loc columns. Which is
to say, it will get rid of comparisons between amino acids that are too close
to each other in primary sequence space since these are less interesting.
Common parameters are probably 6 ish to isolate medium to long range interactions
and 12 to isolate long range ones in particular.
"""
df['abs_diff_in_loc'] = df['aa1_loc'] - df['aa2_loc']
df['abs_diff_in_loc'] = df['abs_diff_in_loc'].abs()
##Only consider amino acids separated by greater than some chain distance
df = df[df['abs_diff_in_loc'] >=primary_distance_cutoff]
df.reset_index(drop=True, inplace=True)
return df | 3907f1328d66f3d8cff945b2ad8ec2786f9720f2 | 34,540 |
def reload_attribute(model_instance, attr_name):
"""Fetch the stored value of a model instance attribute.
:param model_instance: Model instance.
:param attr_name: Attribute name to fetch.
"""
qs = type(model_instance).objects.filter(id=model_instance.id)
return qs.values_list(attr_name, flat=True)[0] | 1bfc60ba34ff4687aae8d5404e545949232f9066 | 34,542 |
import doctest
def doctest_test():
"""run test procedure with doctest"""
# invoke the testmod function to run tests contained in docstring
stats = doctest.testmod()
print(stats)
return stats | ac1d43ac9e712f64e3c0ed076a023d2e5ed4da2c | 34,543 |
import os
def get_jpeg(path):
"""
Returns all JPEG files given a path
:param path:
"""
image_names = []
for f in os.listdir(path):
if f.endswith(".jpg"):
image_names.append(path + f)
return image_names | 5de6ea54f7ebbf561101e37953468fdeace84290 | 34,545 |
import json
def make_json(data_dict, simple=None):
"""Make well formatted JSON for insertion into cascade word docs.
JSON will be enclosed by '$ like: '${"key":"value"}$'
JSON will be on one line (simple) if it contains only one key/value pair, or if
the argument simple==true
"""
if simple is None:
# Default to simple as long as the JSON contains only one item and
# that items is not a dict.
simple = False
if len(data_dict) <= 1:
for key in data_dict:
if not isinstance(data_dict[key], dict):
simple = True
if simple:
return '${}$'.format(json.dumps(data_dict, separators=(', ', ':')))
return '${}$'.format(json.dumps(data_dict, indent=4, separators=(',', ':'))).replace('${\n ', '${') | c7b8400995ed105f88de4bb6c3e22c5b17aedd4a | 34,546 |
import collections
def get_sorted_transitive_dependencies(top, deps_func):
"""Gets the list of all transitive dependencies in sorted order.
There should be no cycles in the dependency graph (crashes if cycles exist).
Args:
top: A list of the top level nodes
deps_func: A function that takes a node and returns a list of its direct
dependencies.
Returns:
A list of all transitive dependencies of nodes in top, in order (a node
will appear in the list at a higher index than all of its dependencies).
"""
# Find all deps depth-first, maintaining original order in the case of ties.
deps_map = collections.OrderedDict()
def discover(nodes):
for node in nodes:
if node in deps_map:
continue
deps = deps_func(node)
discover(deps)
deps_map[node] = deps
discover(top)
return list(deps_map.keys()) | 7e758410c785e7f1b6df0dbd2a3571a402b95641 | 34,547 |
def r1_p_r2(R1, R2):
"""
Calculate the Resistance of a parallel connection
"""
return R1 * R2 / (R1 + R2) | 3c98e8a24020e76b008d151a2611fa85856b8417 | 34,549 |
def enum2str(enumType, enum):
"""
Translates a pokerth_pb2 enum type to a string.
:param enumType: enum type class
:param enum: the enum element of the type
:return: identifier string of enum
"""
return [k for k, v in enumType.items() if v == enum][0] | 46de0fcd78f2e8b450ede050679f9e776b5a0bf9 | 34,550 |
def get_data_from_context(context):
"""Get the django paginator data object from the given *context*.
The context is a dict-like object. If the context key ``endless``
is not found, a *PaginationError* is raised.
"""
try:
return context['endless']
except KeyError:
raise Exception('Cannot find endless data in context.') | 304fd11f75ec72f703e03e7a8431c613a3648f47 | 34,555 |
def window_width(g_core: float, t_in: float, t_out: float, g: float, t_r: float, g_r: float) -> float:
"""
is the calculated winding width if there
x g_core - is the distance between the core and the inner winding in [mm]
x t_in - is the thickness of the inner winding in [mm]
x t_out - is the thickness of the outer winding in [mm]
x g is - is the main gap in [mm]
x t_r - is the width of the regulating winding in [mm]
x g_r - is the distance between the outer winding and the regulating winding [mm]
g is considered as a phase distance at the end of the windings
"""
return round(g_core + t_in + t_out + g + t_r + g_r + g, 1) | fd86eeb816c75b8e7d940d5321259c62abc0ec50 | 34,556 |
import yaml
def _parse_top_cfg(content):
"""Allow top_cfg to be YAML"""
try:
obj = yaml.safe_load(content)
if isinstance(obj, list):
return obj
except Exception as e:
pass
return content.splitlines() | acd7e83cd5978873d0eae3fc08c40dba4dd4ef64 | 34,557 |
def build_key_name(app_name, os_name, file_name):
"""
Creates key using app name, os and filename
:param app_name: app name
:param os_name: OS the app is written for
:param filename: the name of the file
:return: S3 bucket key for given app/os/filename combination
"""
return (app_name.replace(" ", "").lower() + "/" +
os_name.replace(" ", "").lower() + "/" + file_name) | 845e6c2734ec105c6a9bbcd5032bc7569063c297 | 34,558 |
import re
def name_in_string(string: str, name: str) -> bool:
"""Checks if string contains name.
Args:
string (str): input searchable string
name (str): input name
Examples:
>>> assert name_in_string("Across the rivers", "chris")
>>> assert not name_in_string("Next to a lake", "chris")
>>> assert not name_in_string("A crew that boards the ship", "chris")
>>> assert name_in_string("thomas", "Thomas")
"""
return bool(re.compile(f'(?i){".*".join(name)}').search(string)) | 168abc4ebfd078a2d9220bcea3c0efd2e0e79091 | 34,559 |
def filter_boxes(boxes, skip_ts=int(5e5), min_box_diag=60, min_box_side=20):
"""Filters boxes according to the paper rule.
To note: the default represents our threshold when evaluating GEN4 resolution (1280x720)
To note: we assume the initial time of the video is always 0
Args:
boxes (np.ndarray): structured box array with fields ['t','x','y','w','h','class_id','track_id','class_confidence']
(example BBOX_DTYPE is provided in src/box_loading.py)
Returns:
boxes: filtered boxes
"""
#gen1 diag30 side10
#gen2 diag60 side20
#ts = boxes['t']
ts = boxes[:,0]
#width = boxes['w']
width = boxes[:,3]
#height = boxes['h']
height = boxes[:,4]
diag_square = width**2+height**2
mask = (ts>skip_ts)*(diag_square >= min_box_diag**2)*(width >= min_box_side)*(height >= min_box_side)
return boxes[mask] | 021ce5e60501538e84aa897a70fd2704b651eab9 | 34,561 |
import re
def parse_expression(expr, local_data=None):
""" Parses a math expression into a list """
i = 0
data = []
while True:
if i >= len(expr):
break
if expr[i] == ' ':
i += 1
elif re.search(r'^[\d,.]+', expr[i:]):
number = []
while re.search('^-?[\d,.]+', expr[i:]):
number.append(expr[i])
i += 1
data.append({'type': 'number', 'value': ''.join(number)})
elif re.search(r'^[_\w][_\w\d]*', expr[i:]):
# If we find any identifier starting with _ or [a-zA-Z] then this is a variable
# We will attempt to get it's value from the variables dictionary (local_data)
# This local_data will usually be set to locals() or a defaultdict that returns 0
number = []
while re.search(r'^[_\w][_\w\d]*', expr[i:]):
number.append(expr[i])
i += 1
# Grab value from local_data and append as a number
# Functions are not allowed
variable_name = ''.join(number)
if not local_data or variable_name not in local_data:
raise Exception('Variable not defined: %s' % variable_name)
variable = local_data[variable_name]
try:
variable = float(variable)
except ValueError:
raise Exception('Invalid variable type found for: %s, with type: %s' % (variable_name, type(variable)))
data.append({'type': 'number', 'value': variable})
elif re.search(r'\+|\-|/', expr[i]):
data.append({'type': 'operator', 'value': expr[i]})
i += 1
elif expr[i] == ')':
data.append({'type': 'operator', 'value': '('})
i += 1
elif expr[i] == '(':
data.append({'type': 'end', 'value': ')'})
i += 1
elif expr[i] == '*':
if i + 1 < len(expr) and expr[i + 1] == '*':
data.append({'type': 'operator', 'value': '**'})
i += 2
else:
data.append({'type': 'operator', 'value': '*'})
i += 1
else:
raise Exception('Character is not an operator or a number: ', expr[i])
return [x for x in reversed(data)] | 4e426e3e1b04562f44a9183131d4c9fee2ef15a7 | 34,562 |
from typing import Dict
from typing import List
from typing import Set
from typing import Tuple
def _validate_reply(
reply: Dict[str, List[str]], performatives_set: Set[str]
) -> Tuple[bool, str]:
"""
Evaluate whether the reply structure in a protocol specification is valid.
:param reply: Reply structure of a dialogue.
:param performatives_set: set of all performatives in the dialogue.
:return: Boolean result, and associated message.
"""
performatives_set_2 = performatives_set.copy()
for performative in reply.keys():
if performative not in performatives_set_2:
return (
False,
"Performative '{}' specified in \"reply\" is not defined in the protocol's speech-acts.".format(
performative,
),
)
performatives_set_2.remove(performative)
if len(performatives_set_2) != 0:
return (
False,
"No reply is provided for the following performatives: {}".format(
performatives_set_2,
),
)
return True, "Reply structure is valid." | 2243e60edd6497a7f699676be5fe765711db4134 | 34,563 |
def get_contrib_read_ids(indexes, reads):
"""
Takes a set of indexes from assign_reads and the list of read signatures
plus the dictionary mapping signatures to aligned read IDs and returns
the set of corresponding aligned read IDs (BAM query IDs).
"""
hap_read_ids = set()
for read_idx in indexes:
for read_id in reads[read_idx]:
hap_read_ids.add(read_id)
return hap_read_ids | 1839790b85917bed53675ae43cd04fafedfceea9 | 34,566 |
import os
def is_mp3(path):
"""
Determines whether a file looks like an MP3.
@param str path
The path to the file to check.
@return bool
True if the file seems like an MP3, False if not.
"""
if path.startswith('._'):
return False
if path.lower().endswith('.mp3'):
return os.path.isfile(path)
return False | 3a37437efd9c70710c8676eee34e067949bfcfa6 | 34,568 |
from typing import Optional
def _builtin_schema(type_name: str, type_format: Optional[str],
event_name: str, datatype: type) -> dict:
"""
Build type schema for predefined datatypes
"""
schema = {
"type": "object",
"required": [
event_name
],
"properties": {
event_name: {
"type": type_name,
}
},
"description": f"{event_name} {type_name} payload"
}
if type_format is not None:
schema['properties'][event_name]['format'] = type_format # type: ignore
return schema | 0ab5cdf1a99dd724ac1894a09551769f400d83e4 | 34,571 |
def scs(qs):
""" Gets the number of occurrences of each state in qs/each unigram in bis """
ss={}
for q in qs:
ss[q]=ss.get(q,0)+1
return ss | 317abacce8f3dcdc49c5b0ea61fca6ddbe4deaaf | 34,572 |
import re
def simple_sql_parse(data, periscope_type):
"""Extract table names used in the SQL code using regex
Args:
data ([type]): [description]
periscope_type (str): 'view' or 'chart'
Returns:
tables_list (array of dict): one table/persicope entity per row
"""
exp = r"\b((accounting|dim_models|raw|data_marts|base)\.\w*)\b"
print("Extracting table names used in {}s...\n".format(periscope_type))
tables_list = []
for row in data:
tuple_list = re.findall(exp, row['SQL_CODE_RAW'], re.S|re.I)
table_names_list = set(["".join(tuple[0]) for tuple in tuple_list])
for table_name in table_names_list:
tables_dict = {}
tables_dict['PERISCOPE_NAME'] = row['NAME']
tables_dict['PERISCOPE_TYPE'] = periscope_type
tables_dict['TABLE_NAME'] = table_name
tables_list.append(tables_dict)
return tables_list | a0b2eb37fe3f1182a42c0906158bfdb6609fe1df | 34,573 |
def use_autoparal(request):
"""
This fixture allows to run some of the test both with autoparal True and False
"""
return request.param | a1116cb2937acf490177fb82055e8dbc7d39e434 | 34,575 |
def default_cost(alignment_matrix, i, j, tokens_1, tokens_2):
"""
Ignore input and return 1 for all cases (insertion, deletion, substitution)
"""
return 1 | ab5c721fad6d6a6cd5f5e7ee3262b0beed8c56e9 | 34,576 |
def summary_stats_from_file(input_file):
"""
:param input_file: The name of the file that has the information in the next manner:
1) A set of measures names separated by tabulator
2) A undefined number of lines with:
classifier name: \t measure_value_0 \t measure_value_1 \t etc.
:return: Summary structure which contains classifiers statistics. Each classifiers has a structure
of measures names and its values associated
"""
summary = {}
measures_names = []
for i, line in enumerate(open(input_file).readlines()):
if i > 0:
name = line[:line.find(":")]
for measure_name, value in zip(measures_names, line[line.find("\t") + 1:].split("\t")):
summary[name][measure_name] = value
else:
measures_names.append([x for x in line.split("\t") if len(x)])
return summary | 31933beac52d398266bb0e451766194bb957fccf | 34,577 |
import argparse
def get_arguments():
""" gets command line arguments.
:return:
"""
# init parser:
parser = argparse.ArgumentParser("Downsample ModelNet40 by category.")
# add required and optional groups:
required = parser.add_argument_group('Required')
optional = parser.add_argument_group('Optional')
# add required:
required.add_argument(
"-i", dest="input", help="Input path of ModelNet 40 dataset in off format.",
required=True
)
required.add_argument(
"-o", dest="output", help="Output path of ModelNet 40 dataset in ply format.",
required=True
)
# parse arguments:
return parser.parse_args() | 599f698cc80bdb4de9f8f61e6e3684fd80d064d9 | 34,578 |
import traceback
def get_loan_budget_details(obj):
"""备用金,费用预算明细
:param obj:
:return:
"""
try:
return obj.LoanBudgetDetails.count()
except:
traceback.print_exc()
return 0 | c6df82ef62f0851fb9d8d690f8344359abd61d69 | 34,581 |
def saved_certificate_to_cnf(file_path):
""" Load a certificate from file
Parameters
----------
file_path :string
Path of the file that contains the certificate fo a cnf
Returns
-------
set[(string,bool)]
The object that represents the loaded certificate
"""
cert = set()
f = open(file_path, "r")
for line in f:
if line[0] == 'v' and line[2] != '0':
for leterals in line.split(' ')[1:]:
numeric_leterals =int(leterals)
cert.add(('x{}'.format(abs(numeric_leterals)),True if numeric_leterals > 0 else False))
return cert | b8a5b6d327c406fa9f8b1abdac97781f5813208b | 34,582 |
def speaking_player(bot, state):
""" A player that makes moves at random and tells us about it. """
move = bot.random.choice(bot.legal_positions)
bot.say(f"Going {move}.")
return move | 58392931510a86ddf1fd6bdc3402cdf1665241d0 | 34,583 |
def permission_classes(permission_classes):
"""
Specifies authorization requirements.
"""
def decorator(func):
func.permission = permission_classes
return func
return decorator | c0c4af8dc4007e979104f1b77e71af5f1af4337e | 34,584 |
def trim(d, prepended_msg):
"""remove the prepended-msg from the keys of dictionary d."""
keys = [x.split(prepended_msg)[1] for x in d.keys()]
return {k:v for k,v in zip(keys, d.values())} | a7bf495750713a51c74dfd95dbbabcbab76f1910 | 34,587 |
from datetime import datetime
def part_of_day() -> str:
"""Checks the current hour to determine the part of day.
Returns:
str:
Morning, Afternoon, Evening or Night based on time of day.
"""
am_pm = datetime.now().strftime("%p")
current_hour = int(datetime.now().strftime("%I"))
if current_hour in range(4, 12) and am_pm == 'AM':
greet = 'Morning'
elif am_pm == 'PM' and (current_hour == 12 or current_hour in range(1, 4)):
greet = 'Afternoon'
elif current_hour in range(4, 8) and am_pm == 'PM':
greet = 'Evening'
else:
greet = 'Night'
return greet | 5736b7049924197595341a173e642b8e3ea9e856 | 34,588 |
def format_tarball_url(package):
"""
Creates the url to fetch the tar from github.
>>> format_tarball_url({'owner': 'elm-lang', 'project': 'navigation', 'version': '2.0.0'})
'https://github.com/elm-lang/navigation/archive/2.0.0.tar.gz'
"""
return "https://github.com/{owner}/{project}/archive/{version}.tar.gz".format(**package) | 44304d62797730de2a4b9bd8dd43d4588b287607 | 34,591 |
def _GetChartFactory(chart_class, display_class):
"""Create a factory method for instantiating charts with displays.
Returns a method which, when called, will create & return a chart with
chart.display already populated.
"""
def Inner(*args, **kwargs):
chart = chart_class(*args, **kwargs)
chart.display = display_class(chart)
return chart
return Inner | 10741f18bf78bb2b7f301e50b7a98306678d6a31 | 34,592 |
import os
def listdir(directory, split_ext=False):
"""Lists directory"""
try:
if split_ext:
return [os.path.splitext(dir_)[0] for dir_ in os.listdir(directory)]
else:
return os.listdir(directory)
except OSError:
return [] | be46d83134eb42f21e602e59480a2219389a22eb | 34,594 |
def update_vcf_motifs_info(outline, names, varscores, refscores, varht, refht,
vargc, refgc, options, chips, col):
"""
Used in conjunction with update_vcf() to add motifs information to the line.
Reduces code maintenance by putting update code in 1 place
Args:
see update_vcf for definition
names set of match strings; matching MOTIF
*scores string of 4 digit decimal, set of scores as string
*ht
*gc string of 4 digit decimal; gc count
options options list
chips Y or N
col current column string
Returns:
Modifies passed outline string and returns modified copy
"""
outline += "MOTIFN=" + names + ";MOTIFV=" + varscores + ";MOTIFR=" + refscores
if refgc != "":
outline += ";MOTIFVH=" + varht + ";MOTIFRH=" + refht
outline += ";MOTIFVG=" + vargc + ";MOTIFRG=" + refgc
if (options.chip_present):
outline += ";MOTIFC=" + chips
if col != '.':
outline += ";" + col
return outline | a819df250faaf4eb3ef39aa3069f000d941d7d1b | 34,596 |
def generate_big_data():
"""
Generate some data.
The data=True in the job decorator tells jobflow to store all outputs in the "data"
additional store.
"""
mydata = list(range(1000))
return mydata | 9f82ac51bec24986afa395fbfe5ca91904f361f1 | 34,597 |
import glob
def find_file(path):
"""
Search file
Parameters
----------
path : str
Path and/or pattern to find files.
Returns
-------
str or list of str
List of files.
"""
file_path = glob.glob(path)
if len(file_path) == 0:
raise ValueError("No such file {}".format(file_path))
if len(file_path) == 1:
file_path = file_path[0]
return file_path | 7f7dad61a2faddd4ab6e6735419abb0b50196d67 | 34,598 |
from typing import List
def parse_tags(s: str) -> List[str]:
"""
Parse comma separated tags str into list of tags.
>>> parse_tags('one tag')
['one tag']
>>> parse_tags(' strip left and right ends ')
['strip left and right ends']
>>> parse_tags('two, tags')
['two', 'tags']
>>> parse_tags('"quoted, string" is one tag')
['quoted, string is one tag']
>>> parse_tags(', empty tags, , will be skipped, ')
['empty tags', 'will be skipped']
Args:
s: The comma separated tags str.
Returns:
The parsed tags.
"""
tags = []
buf = []
in_quoted = None
for c in s:
if in_quoted:
if c == in_quoted:
in_quoted = None
else:
buf.append(c)
elif c == '"' or c == '\'':
in_quoted = c
elif c == ',':
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
buf.clear()
else:
buf.append(c)
if buf:
tag = ''.join(buf).strip()
if tag:
tags.append(tag)
return tags | 7529f0b6746bdfe7996eb4a963ae4e07622183aa | 34,599 |
def bfs(initial_state, step_fn, eval_fn, max_depth=None, not_found_value=None):
"""Bread-first search"""
queue2 = [initial_state]
states = set(queue2)
depth = 0
value = eval_fn(initial_state, depth)
if value is not None:
return value
while queue2 and (max_depth is None or depth < max_depth):
depth += 1
queue1 = queue2
queue2 = []
for state in queue1:
for s in step_fn(state, depth):
if s not in states:
value = eval_fn(s, depth)
if value is not None:
return value
states.add(s)
queue2.append(s)
return not_found_value | d0edf20a9b5899a232cfb2bb1f6f9e6e7ff55c80 | 34,600 |
def encipher_kid_rsa(msg, key):
"""
Here ``msg`` is the plaintext and ``key`` is the public key.
Examples
========
>>> from sympy.crypto.crypto import (
... encipher_kid_rsa, kid_rsa_public_key)
>>> msg = 200
>>> a, b, A, B = 3, 4, 5, 6
>>> key = kid_rsa_public_key(a, b, A, B)
>>> encipher_kid_rsa(msg, key)
161
"""
n, e = key
return (msg*e) % n | 37ccf5d80e10c5f90e1b2cfb0a085f718ba3d845 | 34,602 |
def _ansible_verbose(verbose_level=1):
"""
Return an ansible verbose flag for a given Cliff app verbose
level to pass along desired verbosity intent.
"""
flag = ''
if verbose_level > 1:
flag = '-{}'.format("v" * (verbose_level - 1))
return flag | 0313b5f7c41858c6d2ecaba2275cb56cd89b628a | 34,603 |
def format_proxies(proxy_host, proxy_port, proxy_user=None, proxy_password=None):
"""Sets proxy dict for requests."""
PREFIX_HTTP = 'http://'
PREFIX_HTTPS = 'https://'
proxies = None
if proxy_host and proxy_port:
if proxy_host.startswith(PREFIX_HTTP):
proxy_host = proxy_host[len(PREFIX_HTTP):]
elif proxy_host.startswith(PREFIX_HTTPS):
proxy_host = proxy_host[len(PREFIX_HTTPS):]
if proxy_user or proxy_password:
proxy_auth = '{proxy_user}:{proxy_password}@'.format(
proxy_user=proxy_user if proxy_user is not None else '',
proxy_password=proxy_password if proxy_password is not
None else ''
)
else:
proxy_auth = ''
proxies = {
'http': 'http://{proxy_auth}{proxy_host}:{proxy_port}'.format(
proxy_host=proxy_host,
proxy_port=str(proxy_port),
proxy_auth=proxy_auth,
),
'https': 'http://{proxy_auth}{proxy_host}:{proxy_port}'.format(
proxy_host=proxy_host,
proxy_port=str(proxy_port),
proxy_auth=proxy_auth,
),
}
return proxies | f8f8539a1caff91d5ea4cee1159dc2d5916dd3c0 | 34,604 |
import math
def neg_exp_distribute(mean_time, U):
"""
Generate series satisfied negative exponential distribution
X = -mean*lnU
Parameters:
-----------
mean_time: mean time
U: a list as a parameter for negative exponential time
Return:
-------
X: Generated time (interarrival time or service time)
"""
return [-1.0*mean_time*math.log(u) for u in U] | 680c92d629208268aae9c8835da9e70a6a9263d3 | 34,605 |
def figshare_metadata_readme(figshare_dict: dict) -> dict:
"""
Function to provide shortened dict for README metadata
:param figshare_dict: Figshare API response
:return: README metadata based on Figshare response
"""
readme_dict = {}
if 'item' in figshare_dict:
print("figshare_metadata_readme: Using curation responses")
readme_dict['article_id'] = figshare_dict['item']['id']
readme_dict['curation_id'] = figshare_dict['id']
figshare_dict = figshare_dict['item']
else:
readme_dict['article_id'] = figshare_dict['id']
single_str_citation = figshare_dict['citation']
# Handle period in author list. Assume no period in dataset title
author_list = ([single_str_citation.split('):')[0] + ').'])
author_list += [str_row + '.' for str_row in
single_str_citation.split('): ')[1].split('. ')]
readme_dict.update({
'title': figshare_dict['title'],
'description': figshare_dict['description'],
'doi': f"https://doi.org/{figshare_dict['doi']}",
'preferred_citation': author_list,
'license': figshare_dict['license'],
'summary': figshare_dict['description'],
'references': figshare_dict['references'],
})
return readme_dict | 5c7a5559d4e09767032888465156eb9ea291d6c2 | 34,608 |
def add_response_tokens_to_option(_option, response_tokens, response_tokens_from_meta):
"""
:param _option: (delivery_api_client.Model.option.Option) response option
:param response_tokens: (list<str>) list of response tokens from decisioning context
:param response_tokens_from_meta: (list<str>) list of response tokens from decisioning rule meta
:return: (delivery_api_client.Model.option.Option) Returns option updated with response tokens
"""
_option.response_tokens = dict(response_tokens_from_meta)
_option.response_tokens.update(response_tokens)
return _option | 7c30f7cdfda5db9cc4e3e9ea3eb8d887ad491747 | 34,609 |
import os
def get_timestamp(trg_dir):
"""a little function to read a time stamp from a csv file placed in
the target directory the last time we backed everything up.
Returns None if a timestamp file cannot be found in the source
directory, in which case all reports will be copied.
Arguments:
- `trg_dir`: where should we look for the time stamp file?
"""
fname = os.path.join(trg_dir, 'timestamp.csv')
try:
with open(fname,'r') as f:
ts = f.read()
except:
ts=None
return(ts) | f4d0dd7891ceafd05d4c62c780e766c6017b50e9 | 34,611 |
def pos_embed(x, position_num):
"""
get position embedding of x
"""
maxlen = int(position_num / 2)
return max(0, min(x + maxlen, position_num)) | 1364ca51b76f3690bc73de743056b1ab435e1805 | 34,612 |
def render_string(s, f, colour, background, antialiasing = True):
"""
Create pygame.Surface and pygame.Rect objects for a string, using a
given font (f) and colour.
Parameters:
s: the string to render.
f: the font in which to render s.
colour: the colour of text to use, expressed as an RGB list or tuple.
background: the background colour.
Keyword Parameters:
antialiasing: indicates whether text is rendered with antialiasing;
defaults to True.
Returns:
s: the pygame.Surface object.
r: the pygame.Rect object.
"""
s = f.render(s, antialiasing, colour, background)
r = s.get_rect()
return s, r | 5f6c72d55a864fd607503ff887edc132cfdd5e3c | 34,613 |
def replace_entities(df, col_name, entity_dict):
""" A function to replace values in a Pandas df column given an entity dict, as created in associate_entities()
Args:
df (DataFrame) : A Pandas DataFrame
col_name (string) : A column in the Pandas DataFrame
entity_dict (dict) : A dictionary as created in the associate_entities() function
"""
if col_name not in df.columns:
raise ValueError("Column does not exist!")
df[col_name].replace(entity_dict, inplace=True)
return df | 71aa5bbf5f8a42a6fa7a85d51c280307dec2ee96 | 34,614 |
from typing import List
def clean_strings(string_list: List[str]) -> List[str]:
"""
Clean up a list of strings ready to be cast to numbers.
"""
clean_string = []
for string in string_list:
new_string = string.strip()
clean_string.append(new_string.strip(","))
return clean_string | 59b7653f36771b79588381ba255acf89c0294c02 | 34,616 |
def vertical_path(size):
"""
Creates a generator for progressing vertically through an image.
:param size: A tuple (width, height) of the image size
:return: A generator that yields a set of columns through the image.
Each column is a generator that yields pixel coordinates.
"""
width, height = size
return (((x, y) for y in range(height)) for x in range(width)) | 91d42be4bdd8f501405f226a0a158491932d6b2b | 34,617 |
def getProperties(object) :
"""
Extracts and returns properties of an object from the FBX tree.
"""
dict = {}
Prop70 = object.find("Properties70")
if Prop70 != None :
allProp = Prop70.findall("P")
for prop in allProp :
allinfo = prop.text.split(",")
for info in allinfo :
dict[allinfo[0]] = [info.strip() for info in allinfo[1:]]
return dict | d4cea99aa0d2bf6507909c6fa0f462532bcd98b9 | 34,618 |
from unittest.mock import patch
def patch_init_modem():
"""Mock modem."""
return patch(
"homeassistant.components.modem_callerid.PhoneModem.initialize",
) | 19f7c8e19f0e5c77922e7a956f349647b3a3b5ba | 34,619 |
def hourly_info(x):
"""
separates the hour from time stamp. Returns hour of time.
"""
n1 = x.hour
return n1 | 2c6277e1ccc3f40706241541a1674fc8690159c6 | 34,622 |
from typing import List
import shlex
def bash_and_fish_remove_path(value: List[str]) -> str:
"""Renders the code to remove directories from the path in bash and fish.
:param value: A list of values to prepend to the path.
:return: The code to prepend to path.
"""
return "\n".join(f"remove_path {shlex.quote(entry)}" for entry in value) | fb207ba333cff0431ede497593df22f3f1a806c0 | 34,623 |
def binary_encoder_decoder(obj):
"""
Assumes value is already in binary format, so passes unchanged.
"""
return obj | e0de5bfd00e043f95cbac680fa9499c8037055c3 | 34,624 |
def _find_max_power(grid, size):
"""Find the maximum power in the grid for a given size."""
size -= 1
max_power = grid[size][size]
max_coords = (1, 1)
for coord_x in range(300 - size):
for coord_y in range(300 - size):
power = grid[coord_x + size][coord_y + size]
if coord_x > 0:
power -= grid[coord_x - 1][coord_y + size]
if coord_y > 0:
power -= grid[coord_x + size][coord_y - 1]
if coord_x > 0 and coord_y > 0:
power += grid[coord_x - 1][coord_y - 1]
if power > max_power:
max_power = power
max_coords = (coord_x + 1, coord_y + 1)
return max_power, max_coords | fb451e3d8cad1b85cc014e43f917da74c207cb37 | 34,626 |
def retrieve_usernames(list_of_ids, dict_of_ids_to_names):
"""
For retrieving usernames when we've already gotten them from twitter.
For saving on API requests
"""
usernames = []
for user_id in list_of_ids:
usernames.append(dict_of_ids_to_names[user_id])
return usernames | edf0f8baca112f1741813b35261042f4e19f8d08 | 34,627 |
def create_search_criterion_by_date(datetime, relative=None, sent=False):
"""Return a search criteria by date.
.. versionadded:: 0.4
:param relative: Can be one of 'BEFORE', 'SINCE', 'ON'.
:param sent: Search after "sent" date instead of "received" date.
"""
if relative not in ['BEFORE', 'ON', 'SINCE']:
relative = 'SINCE'
formated_date = datetime.strftime('%d-%h-%Y')
return '{}{} {}'.format('SENT'
if sent is True
else '', relative, formated_date) | e7d6d6bb8a85c277bc45ec3b2c30d586927a58e4 | 34,628 |
def read_config(filename):
"""read the config file and return a dict"""
try:
with open(filename, 'r') as conf_file:
lines = conf_file.readlines()
token: str
procd = dict([[token.strip() for token in line.split("=")] for line in lines])
return procd
except FileNotFoundError:
print(
"{0} not found. copy gcea.conf.example to {0}, edit, and retry"
.format(filename)
)
exit(1) | 44f4723377d872f04326dfa5ef3e38ead30019ba | 34,629 |
def get_cleaned_string(string):
"""
Return ``string`` removing unnecessary special character
"""
if string:
for replaceable in ("'", '"', '{', '}', '[', ']', '%q', '<', '>', '.freeze'):
string = string.replace(replaceable, '')
return string.strip() | e20549e2f46e65be17c04edeb62b3d7a8c60f2fb | 34,630 |
def setitimer(which, seconds, interval=0):
"""Sets given interval timer (one of :const:`signal.ITIMER_REAL`,
:const:`signal.ITIMER_VIRTUAL` or :const:`signal.ITIMER_PROF`) specified
by *which* to fire after *seconds* (float is accepted, different from
:func:`alarm`) and after that every *interval* seconds. The interval
timer specified by *which* can be cleared by setting seconds to zero."""
return (0.0, 0.0) | 8b1d56245813e7851b0157d5f5e9e50b6d0b67ac | 34,631 |
import os
def same_partition(f1, f2):
"""Returns True if both files or directories are on the same partition
"""
return os.stat(f1).st_dev == os.stat(f2).st_dev | 18a36780732ad6cbe317f01326ccec13475fc4a6 | 34,633 |
def evaluate(f, x ,y):
"""Uses a function f that evaluates x and y"""
return f(x, y) | 894bea46653312e7a600788df268af0e9e26fbee | 34,635 |
def withattr(**kwg):
"""
Set attributes to a func, example:
>>> Class Model(DjangoModel):
... @withattr(alters_data=True)
... def delete(self):
... delete(self)
:param kwg: attributes dict
"""
def foo(func):
for name, value in list(kwg.items()):
setattr(func, name, value)
return func
return foo | 3a51f9f7f04af4b23152e43fe21053f12a333158 | 34,637 |
def pyimpl_universe_setitem(universe, handle, value):
"""Implement `universe_setitem`."""
return universe.set(handle, value) | 87198c2c3d8072979da775576babb8fb8de885b4 | 34,638 |
def test_fetch_doc_by_id(client, db, _id):
"""Test document fetch by Id."""
try:
response = client.get_document(
db=db,
doc_id=_id
).get_result()
if "error" in response:
raise ValueError(f'Document with id {_id} not found')
else:
print(f'SUCCESS GET: Document with id {_id}: {response}')
return response
except:
print('FAILED FETCH:')
raise | c3eb74d924d779bcbf7d5403dd2a1fd028c6bd84 | 34,639 |
import copy
def filter_date_in_range(phase_dates, starttime, endtime):
"""
Exclude phases date outside range, and handle its date boundary.
"""
phases = copy.deepcopy(phase_dates)
for item in phases:
if not (item[0] >= starttime and item[0] < endtime):
item[0] = None
if not (item[1] > starttime and item[1] <= endtime):
item[1] = None
new_phases = [
item for item in phases if not (item[0] is None and item[1] is None)
]
new_phases[0][0] = starttime
new_phases[-1][1] = endtime
return new_phases | e8cd8a893e5375959ecc498c3a7c90ce896e29dd | 34,640 |
import logging
def final_policy(policy_network, policy_params, alpha, x):
"""Rollout the final policy."""
logging.info("jit-ing final_policy")
n_policies = len(policy_params)
prefactor = alpha / (1 - ((1 - alpha)**n_policies))
def weighted_policy(k, params):
return prefactor * (
(1 - alpha)**(n_policies - k - 1)) * policy_network.apply(params, x)
ret = weighted_policy(0, policy_params[0])
for k_minus_1, params in enumerate(policy_params[1:]):
ret += weighted_policy(k_minus_1 + 1, params)
return ret | a497d1d38b69c9a6582b60e32a44dad0f5bb716a | 34,643 |
def _common_gpipe_transformer_params(p):
"""Add GPipe params to layer."""
p.Define(
'is_transparent', False,
'If set, encoder outputs a list of layer outputs while decoder '
'expects a list of source input vectors.')
p.Define(
'num_transparent_outputs', 0,
'Number of transparent outputs. Only positive if this is the '
'last encoder')
p.Define('transparent_merger_tpl', None,
'Merger op for layer outputs. Not none if this is the last encoder')
return p | a848f0bc36a0a4c4fac2028d4c2b99026af1d180 | 34,646 |
def calculate_consonant_magnification(velocity):
"""子音速度を倍率に変換する。
"""
return 2 ** ((100 - velocity) / 100) | 892b44a6df77107860dc92844bbea98c4f816d53 | 34,647 |
def compare(initial, candidate):
"""
Compares two shingles sequence and returns similarity value.
:param initial: initial sentence shingles sequence
:param candidate: compared sentence shingles sequence
:return: similarity value
"""
matches = 0
for shingle in initial:
if shingle in candidate:
matches += 1
return matches * 2 / float(len(initial) + len(candidate)) * 100 | 07bd224c422db70382875647028cb159a2810686 | 34,649 |
def field_to_int(field):
"""
Return an integer representation. If a "-" was provided return zero.
"""
if field == "-":
return 0
return int(field) | 1cb3910a77abce808fd35a208af91945a5759322 | 34,651 |
def stress_model(strain, modulus):
"""
Returns the linear estimate of the stress-strain curve using the strain and estimated modulus.
Used for fitting data with scipy.
Parameters
----------
strain : array-like
The array of experimental strain values, unitless (or with cancelled
units, such as mm/mm).
modulus : float
The estimated elastic modulus for the data, with units of GPa (Pa * 10^9).
Returns
-------
array-like
The estimated stress data following the linear model, with units of Pa.
"""
return strain * modulus * 1e9 | 5e84742805ecfcfda0299d88ed28e439adbfbadc | 34,652 |
def vdiv_scalar(vector, scalar):
""" div vectors """
return (vector[0] / scalar, vector[1] / scalar) | 0bdfae21fd6c831c280ac54319f3ff43c138bf47 | 34,653 |
import time
def time_current():
"""Returns the number of second since the epoch."""
return int(time.time()) | 01ed89d69d5cfa529e8e4b921c8985d957df996f | 34,654 |
from typing import List
def get_requirements() -> List[str]:
"""read the contents of the requirements file"""
with open('requirements.txt', encoding='utf-8') as f:
return [line.replace('==', '>=') for line in f.readlines()] | 26d652ef8ac9ea2d1e96d1a06b7d07d860ea9d13 | 34,655 |
from numpy import zeros, where
def calc_13_category_usda_soil_type(clay, sand, silt):
"""Calculate the 13 category usda soil type from the clay sand and silt
0 -- WATER
1 -- SAND
2 -- LOAMY SAND
3 -- SANDY LOAM
4 -- SILT LOAM
5 -- SILT
6 -- LOAM
7 -- SANDY CLAY LOAM
8 -- SILTY CLAY LOAM
9 -- CLAY LOAM
10 --SANDY CLAY
11 --SILY CLAY
12 --CLAY
Parameters
----------
clay : type
Description of parameter `clay`.
sand : type
Description of parameter `sand`.
silt : type
Description of parameter `silt`.
Returns
-------
type
Description of returned object.
"""
stype = zeros(clay.shape)
stype[where((silt + clay * 1.5 < 15.) & (clay != 255))] = 1. # SAND
stype[where((silt + 1.5 * clay >= 15.) & (silt + 1.5 * clay < 30) &
(clay != 255))] = 2. # Loamy Sand
stype[where((clay >= 7.) & (clay < 20) & (sand > 52) & (
silt + 2 * clay >= 30) & (clay != 255))] = 3. # Sandy Loam (cond 1)
stype[where((clay < 7) & (silt < 50) & (silt + 2 * clay >= 30) &
(clay != 255))] = 3 # sandy loam (cond 2)
stype[where((silt >= 50) & (clay >= 12) & (clay < 27) &
(clay != 255))] = 4 # silt loam (cond 1)
stype[where((silt >= 50) & (silt < 80) & (clay < 12) &
(clay != 255))] = 4 # silt loam (cond 2)
stype[where((silt >= 80) & (clay < 12) & (clay != 255))] = 5 # silt
stype[where((clay >= 7) & (clay < 27) & (silt >= 28) & (silt < 50) &
(sand <= 52) & (clay != 255))] = 6 # loam
stype[where((clay >= 20) & (clay < 35) & (silt < 28) & (sand > 45) &
(clay != 255))] = 7 # sandy clay loam
stype[where((clay >= 27) & (clay < 40.) & (sand > 40) &
(clay != 255))] = 8 # silt clay loam
stype[where((clay >= 27) & (clay < 40.) & (sand > 20) & (sand <= 45) &
(clay != 255))] = 9 # clay loam
stype[where((clay >= 35) & (sand > 45) & (clay != 255))] = 10 # sandy clay
stype[where((clay >= 40) & (silt >= 40) &
(clay != 255))] = 11 # silty clay
stype[where((clay >= 40) & (sand <= 45) & (silt < 40) &
(clay != 255))] = 12 # clay
return stype | e70a9e2ac1ec20e0106adedccf6548f096a5ac8d | 34,656 |
def mapping_ids_dicts(unique_ids, filtered_align_df_positive):
"""
makes dict from ids mapping to one another
meeting criteria set above
"""
ids_dict = {}
for i in unique_ids:
ids_dict[i] = []
ids_dict[i].append(filtered_align_df_positive[filtered_align_df_positive[0] == i][2].tolist())
ids_dict[i].append(filtered_align_df_positive[filtered_align_df_positive[2] == i][0].tolist())
ids_dict_fixed = {}
for i in ids_dict:
for j in ids_dict[i]:
if len(j) > 0 and i in ids_dict_fixed:
for z in j:
ids_dict_fixed[i].append(z)
elif len(j) > 0 and i not in ids_dict_fixed:
ids_dict_fixed[i] = j
return ids_dict_fixed | 840206a999a23b30063b7ce8a55b947b97f95718 | 34,657 |
import pickle
def load_dictionary(file_path):
"""
Loads a categorical variable dictionary that was saved in pickle format.
"""
with open(file_path, "rb") as dictionary_file:
return pickle.load(dictionary_file) | ea500d9739d725f2889f83a3d935f708600eb52e | 34,658 |
def evaluate(predictions):
""" Calculates scores for the predictions"""
legit_predictions , spam_predictions = predictions
true_legit = legit_predictions.count("legit") # count true legit predictions
false_legit = spam_predictions.count("legit") # count false legit predictions
true_spam = spam_predictions.count("spam") # count true spam predictions
false_spam = legit_predictions.count("spam") # count false spam predictions
# calculate precision, recall and f scores for each class
# calculate macro-averaged scores
legit_prec = true_legit / (true_legit + false_legit)
spam_prec = true_spam / (true_spam + false_spam)
macro_prec = (legit_prec + spam_prec) / 2
legit_recall = true_legit / (true_legit + false_spam)
spam_recall = true_spam/ (true_spam + false_legit)
macro_recall = (legit_recall + spam_recall) / 2
legit_f = 2* legit_prec* legit_recall / (legit_prec + legit_recall)
spam_f = 2* spam_prec* spam_recall / (spam_prec + spam_recall)
macro_f = (legit_f + spam_f) / 2
return {'legit_prec' : legit_prec, 'spam_prec' : spam_prec, 'macro_prec' : macro_prec,
'legit_recall' : legit_recall, 'spam_recall' : spam_prec, 'macro_recall' : macro_recall,
'legit_f' : legit_f, 'spam_f' : spam_f, 'macro_f' : macro_f } | 7d05ad2f0dac67c986dc66cfcd7082ef9a592bf3 | 34,660 |
import math
def p_largest_prime_factor(num : int, start : int, largest : int) -> int:
"""Find the largest prime factor of an odd number"""
if largest * largest > num:
return num
lim = int(math.sqrt(num)) + 1
for i in range(start, lim, 2):
if num % i == 0:
largest = i
while num % i == 0:
num //= i
if num == 1:
return largest
return p_largest_prime_factor(num, i + 2, largest)
return num | 50cd80b662dca0d94aebfc569aec5c464143c254 | 34,661 |
def getRawName(path):
"""
Given a filename with no path before it, returns just the name, no type
:returns filename
"""
loc = path.rfind(".")
if loc == -1:
return loc
else:
return path[0:loc] | b3d48fe92f52899346beae0929eedab48bd7dfef | 34,662 |
def filter_index(collection, predicate=None, index=None):
"""
Filter collection with predicate function and index.
If index is not found, returns None.
:param collection:
:type collection: collection supporting iteration and slicing
:param predicate: function to filter the collection with
:type predicate: function
:param index: position of a single element to retrieve
:type index: int
:return: filtered list, or single element of filtered list if index is defined
:rtype: list or object
"""
if index is None and isinstance(predicate, int):
index = predicate
predicate = None
if predicate:
collection = collection.__class__(filter(predicate, collection))
if index is not None:
try:
collection = collection[index]
except IndexError:
collection = None
return collection | 11ad880ab62a0b8c06bf9888fd486ef2bf76888b | 34,663 |
from pathlib import Path
def load_html(filename):
"""
Load HTML from file
"""
return Path(filename).read_text() | 50ddc08e7fc7a90bc9e1f3818d7b7eb564b1c98b | 34,664 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.