content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import copy
def mutate(sequence,variants_df):
""" This function takes in a Seq object and a data frame with mutations with chr, pos, ref, var columns. It
applies the mutations and then returns a sequence containing all the mutations in the variant data frame.
maybe I'll use lists of sequences instead of one sequence.
"""
seq=copy.deepcopy(sequence)
seq.seq=seq.seq.tomutable()
# Get the most recent coding position
df=variants_df
for index, row in df.iterrows():
if row["ref"]!=seq.seq[int(row["pos"])-1]:
raise ValueError("Reference base does not match the reference base in the sequence")
seq.seq[int(row["pos"])-1]=row["var"]
seq.seq=seq.seq.toseq()
return seq | aa5cbdd42e43d9e6f27721715c49f7a2b0413399 | 41,241 |
def question_result_type(response):
"""
Generate the answer text for question result type.
:param response: Kendra query response
:return: Answer text
"""
try:
faq_answer_text = "On searching the Enterprise repository, I have found" \
" the following answer in the FAQs--"
faq_answer_text += '\"' + response['resultItems'][0]['documentExcerpt']['text'] + '\"'
except KeyError:
faq_answer_text = "Sorry, I could not find an answer in our FAQs."
return faq_answer_text | 15fd06ee46d6377f1fe38243c11acf5e66d739b6 | 41,243 |
from typing import Counter
def add_numbers_to_repeated_items(items_list) -> list:
"""Add numeric consecutive labels to repeated items in a list.
:param items_list: list of strings
:return: list
"""
updated_items_list = []
counts = Counter(items_list)
current_count = {}
for item in items_list:
if counts[item] > 1:
if item not in current_count:
current_count[item] = 1
updated_items_list.append(f"{item}{current_count[item]}")
current_count[item] = current_count[item] + 1
else:
updated_items_list.append(item)
return updated_items_list | 5b3337d4b75521e121c8011827eafe7b49dec88f | 41,244 |
def readItemsFile(fileName):
"""
itype: String, items file name
rtype: String, raw data of items file
"""
with open(fileName, 'r', encoding='utf-8') as f:
data = f.read().strip()
# remove 3ds header
if data and ord(data[0]) == 65279:
data = data[1:]
return data | 291b468813102467e47b8bca1d103d69ee8efc3c | 41,245 |
def _get_ui_metadata_data(metadata, config, result_data):
"""Get ui metadata data and fill to result."""
data_dict = {}
for key, config_value in config.items():
if isinstance(config_value, dict) and key != 'content_data':
if key in metadata.keys():
_get_ui_metadata_data(metadata[key], config_value, result_data)
else:
_get_ui_metadata_data(metadata, config_value, result_data)
elif isinstance(config_value, list):
option_list = []
for item in config_value:
if isinstance(item, dict):
option_list.append(item)
data_dict[key] = option_list
else:
if isinstance(metadata['_self'][item], bool):
data_dict[item] = str(metadata['_self'][item]).lower()
else:
data_dict[item] = metadata['_self'][item]
else:
data_dict[key] = config_value
if data_dict:
result_data.append(data_dict)
return result_data | 35feff19e63c34e1084211982c646fb2c44bf209 | 41,246 |
import numpy
def check_lats(climatology_dict, experiment):
"""Sometimes the latitude axes are not exactly equal after regridding."""
experiment_lats = climatology_dict[experiment].coord('latitude')
control_lats = climatology_dict['piControl'].coord('latitude')
if not control_lats == experiment_lats:
diffs = experiment_lats.points - control_lats.points
assert numpy.abs(diffs).max() < 0.0001, "%s and control have very different latitude axes" %(experiment)
climatology_dict[experiment].coord('latitude').points = control_lats.points
climatology_dict[experiment].coord('latitude').bounds = control_lats.bounds
assert climatology_dict[experiment].coord('latitude') == climatology_dict['piControl'].coord('latitude'), \
"Problem with %s latitude axis" %(experiment)
return climatology_dict[experiment] | c2e5643884f1931e989223f4b8146ccde097d15a | 41,247 |
def list_2_dict(kv_list, fld_del="."):
"""Function: list_2_dict
Description: Change a key.value list into a dictionary list. The
key_value is a list of keys and values delimited. Values for the same
key will be appended to the list for that key.
Arguments:
(input) kv_list -> Key_Value list.
(input) fld_del -> Field delimiter for the split.
(output) dict_list -> Dictionary list.
"""
kv_list = list(kv_list)
dict_list = {}
for item in kv_list:
dbs, tbl = item.split(fld_del)
if dbs not in dict_list:
dict_list[dbs] = [tbl]
else:
dict_list[dbs].append(tbl)
return dict_list | 25c0177a466a633cee156119ec67d5143db774c2 | 41,248 |
def sftp_prefix(config):
"""
Generate SFTP URL prefix
"""
login_str = ''
port_str = ''
if config['username'] and config['password']:
login_str = '%s:%s@' % (config['username'], config['password'])
elif config['username']:
login_str = '%s@' % config['username']
if config['port'] and config['port'] != 22:
port_str = ':%d' % config['port']
return 'sftp://%s%s%s/' % (login_str, config['host'], port_str) | 225ae15212f7024590b1aa91f3ad7a32594cb9c3 | 41,250 |
import os
def RelativizePaths(base, paths, template):
"""converts paths to relative paths"""
rels = []
for p in paths:
#print "---"
#print "base: ", os.path.abspath(base)
#print "path: ", os.path.abspath(p)
relpath = os.path.relpath(os.path.abspath(p), os.path.dirname(os.path.abspath(base))).replace("\\", "/")
#print "rel : ", relpath
rels.append(template % relpath)
return "\n".join(rels) | 7a881c245c256f8f60c2d85f5eab1d7cd5379d7c | 41,251 |
def minimum_swaps(arr: list) -> int:
"""
Time Complexity: O(n Log n)
Auxiliary Space: O(n)
"""
length = len(arr)
# Create list of pairs where each pair; the first element in the pair is the index of the element & second is
# the value
array_positions = [*enumerate(arr)]
# Sort the array by values to get the right position of every element
array_positions.sort(key=lambda x: x[1])
# keep track of visited elements. Initialize all emements as not visited(False)
visited_map = {k: False for k in range(length)}
# keep track of swaps so far
swaps = 0
for x in range(length):
# if we have already visited the element or at the correct position
if visited_map[x] or array_positions[x][0] == x:
continue
# find number of nodes in this cycle
cycles = 0
y = x
while not visited_map[y]:
# mark node as visited
visited_map[y] = True
# move to next node
y = array_positions[y][0]
cycles += 1
# update by adding current cycle - 1
if cycles > 0:
swaps += (cycles - 1)
return swaps | 77f0eecd25dedaf0e7938a218e94514f51ffd634 | 41,252 |
def get_prov(uid):
"""
return provenance string
"""
return uid.prov | 0f92e61f946f42ddcda0ca8962e6364ef3c2cc32 | 41,253 |
import os
def likelihood_files_are_close(filepaths: list, compare_path: str):
"""
Find all of the paths in filepaths that are in the same directory alongside compare_paths
filenames = ['testthis', 'testoutthisthing', 'whataboutthisguy']
compare_file = 'testout'
likelihood_file_name_match(filenames, compare_file)
['testthis']
Parameters
----------
filepaths
list of file paths
compare_path
file path we want to use to see which filepaths are in the same directory
Returns
-------
list
list of filepaths that are in the same directory as compare_path
"""
close_paths = []
for pth in filepaths:
if os.path.dirname(pth) == os.path.dirname(compare_path):
close_paths.append(pth)
return close_paths | 0522d32d446c36d11c4614cedd85bf18854feb0b | 41,254 |
def wrap_row(r:list, by:int = 1) -> list:
"""Wraps the list r by number of positions.
Positive by will shift right.
Negative shifts left.
Args:
r (list): list to wrap.
by (int, optional): number of positions to shift by. Defaults to 1.
Returns:
list: wrapped list.
"""
return r[-by:] + r[:-by] | bb1302fd7f20e2a8f3356448cc6da3826b3baf4d | 41,255 |
import re
def extract_pin(module, pstr, _regex=re.compile(r"([^/]+)/([^/]+)")):
"""
Extract the pin from a line of the result of a Yosys select command, or
None if the command result is irrelevant (e.g. does not correspond to the
correct module)
Inputs
-------
module: Name of module to extract pins from
pstr: Line from Yosys select command (`module/pin` format)
"""
m = re.match(r"([^/]+)/([^/]+)", pstr)
if m and m.group(1) == module:
return m.group(2)
else:
return None | 6d48dc9ccdb2dfe1dc89a8c7a56f564fccfe60a3 | 41,256 |
import os
def find_locks(ldir):
"""Finds all files whose name begins with 'lock-' within the
directory ldir, and returns their paths as a list. Returns an
empty list if no locks are found. Recursively searches
subdirectories of ldir."""
lock_list = []
lock_filename_start = "lock-"
if(os.path.isdir(ldir)):
for f in os.listdir(ldir):
fname = os.path.join(ldir,f)
if(os.path.isdir(fname)):
lock_list.append(find_locks(ldir + fname))
if(os.path.isfile(fname) and f[0:len(lock_filename_start)] == lock_filename_start):
lock_list.append(os.path.join(ldir,fname))
else:
print("Warning: lockfile directory {0:s} does not exist or is not a directory.\n".format(ldir))
return lock_list | b82a37da265de9cd3eab7ff73309733efa98af98 | 41,257 |
def compute_logits(theta, ob):
"""
theta: A matrix of size |A| * (|S|+1)
ob: A vector of size |S|
return: A vector of size |A|
"""
#ob_1 = include_bias(ob)
logits = ob.dot(theta.T)
return logits | 1913409b9a2f95b83c199379d602d111f6e49851 | 41,260 |
def AddOrdinalSuffix(value):
"""Adds an ordinal suffix to a non-negative integer (e.g. 1 -> '1st').
Args:
value: A non-negative integer.
Returns:
A string containing the integer with a two-letter ordinal suffix.
"""
if value < 0 or value != int(value):
raise ValueError('argument must be a non-negative integer: %s' % value)
if value % 100 in (11, 12, 13):
suffix = 'th'
else:
rem = value % 10
if rem == 1:
suffix = 'st'
elif rem == 2:
suffix = 'nd'
elif rem == 3:
suffix = 'rd'
else:
suffix = 'th'
return str(value) + suffix | 732ac382c83983d2083f22bb23ff8968bb05875d | 41,261 |
def slim_to_keras_namescope():
"""
Utility function that produces a mapping btw
old names scopes of MobilenetV1 variables
"""
nameMapping = {}
nameMapping['MobilenetV1/Conv2d_0/conv2d/kernel'] = 'MobilenetV1/Conv2d_0/weights'
for i in range(1,14):
newNameDepthwise = 'MobilenetV1/Conv2d_%d_depthwise/depthwise_conv2d/depthwise_kernel' %i
oldNameDepthwise = 'MobilenetV1/Conv2d_%d_depthwise/depthwise_weights' %i
newNamePointwise = 'MobilenetV1/Conv2d_%d_pointwise/conv2d/kernel' %i
oldNamePointwise = 'MobilenetV1/Conv2d_%d_pointwise/weights' %i
nameMapping[oldNameDepthwise] = newNameDepthwise
nameMapping[oldNamePointwise] = newNamePointwise
return nameMapping | 3c827eba99c727ef97343cb659c59aeb5a656023 | 41,262 |
def first_player_wins(a, b):
"""
If tie : Returns 0
If first player wins : Returns 1
If second player wins : Returns -1
"""
if a == b:
return 0
elif [a, b] == ["R", "S"] or [a, b] == ["S", "P"] or [a, b] == ["P", "R"]:
return 1
return -1 | ebb0b92862039ed5a8227573d5e79e1a5ea7f353 | 41,264 |
def _get_right_parentheses_index_(struct_str):
"""get the position of the first right parenthese in string"""
# assert s[0] == '('
left_paren_count = 0
for index, single_char in enumerate(struct_str):
if single_char == '(':
left_paren_count += 1
elif single_char == ')':
left_paren_count -= 1
if left_paren_count == 0:
return index
else:
pass
return None | 43c1d890fb4ba62ae6e1a7c7df603428f9b342cd | 41,265 |
def collect_village_number_for_binoculars():
"""Let the user pick the village to peek into."""
while True:
print("")
village_for_binoculars = input("Which village would you like to peek into? Enter the village number. >")
#print(type(village_for_binoculars))
village_for_binoculars = int(village_for_binoculars)
if (village_for_binoculars>0) and (village_for_binoculars <17):
return village_for_binoculars
elif (village_for_binoculars<0) or (village_for_binoculars >16):
print("")
print("Please enter a village number between 1 and 16.")
else:
print("")
print("I'm sorry, I didn't understand that.") | f97b9121f1f7efc126e5f9410358037300a2349d | 41,266 |
def same(obj1,obj2):
""" helper function to identify empty measurement instances """
if obj1.__class__ != obj2.__class__:
return False
if obj1.get() != obj2.get():
return False
return True | e580ebc6e29fbecf7667f04b7ae448051c75bf29 | 41,267 |
def obs_is_afternoon(obcode):
"""Given an observation code (eg 'ob_1a', 'ob12_b') is this an afternoon obs?"""
return obcode[-1] == 'b' | 8d1f87b7f526f98a831c1da3fd6ebeb429d954ef | 41,268 |
def center_on_atom(obj_in, idx=None, copy=True):
"""Shift all coords in `obj` such that the atom with index `idx` is at the
center of the cell: [0.5,0.5,0.5] fractional coords.
"""
assert idx is not None, ("provide atom index")
obj = obj_in.copy() if copy else obj_in
obj.coords = None
# [...,idx,:] works for (natoms,3) and (nstep,natoms,3) -- numpy rocks!
obj.coords_frac = obj.coords_frac - obj.coords_frac[...,idx,:][...,None,:] + 0.5
obj.set_all()
return obj | 690f12a7e95a8e24930096e76941c3061a080b17 | 41,269 |
import platform
def format_npm_command_for_logging(command):
"""Convert npm command list to string for display to user."""
if platform.system().lower() == 'windows':
if command[0] == 'npx.cmd' and command[1] == '-c':
return "npx.cmd -c \"%s\"" % " ".join(command[2:])
return " ".join(command)
# Strip out redundant npx quotes not needed when executing the command
# directly
return " ".join(command).replace('\'\'', '\'') | d99ccb88b337e5db19550415227c49800d7323b5 | 41,270 |
def binary_search(num_list, num, not_found="none"):
"""Performs a binary search on a sorted list of numbers, returns index.
Only works properly if the list is sorted, but does not check whether it is
or not, this is up to the caller.
Arguments:
num_list: a sorted list of numbers.
num: a number to search for.
not_found: string. Controls what happens if the number is not in the
list.
- "none": None is returned.
- "upper", "force_upper": upper index is returned
- "lower", "force_lower": lower index is returned
- "nearest": index to nearest item is returned
If num is larger than all numbers in num_list,
if "upper", "lower", "force_lower", or "nearest":
index to the last item of the list is returned.
if "force_upper":
index to the next item past the end of the list is returned.
If num is smaller than all numbers in num_list,
if "upper", "force_upper", "lower", or "nearest":
0 is returned.
if "force_lower":
-1 is returned.
Default: None.
returns:
None if len(num_list) is 0
None if num is not in num_list and not_found is "none"
Integer index to item, or perhaps nearest item (depending on
"not_found" keyword argument).
"""
if not_found not in (
"none",
"upper",
"force_upper",
"lower",
"force_lower",
"nearest",
):
raise ValueError(
f"{not_found} is not a recognized value for argument " "'not_found'"
)
lower_i, upper_i = 0, len(num_list)
if upper_i == 0:
return None
if num < num_list[0]:
if not_found == "none":
return None
if not_found == "force_lower":
return -1
return 0
if num > num_list[upper_i - 1]:
if not_found == "none":
return None
if not_found == "force_upper":
return upper_i
return upper_i - 1
while True:
mid_i = (lower_i + upper_i) // 2
n = num_list[mid_i]
if n == num:
return mid_i
if mid_i == lower_i:
if not_found == "none":
return None
if not_found in ("upper", "force_upper"):
return upper_i
if not_found in ("lower", "force_lower"):
return lower_i
return lower_i + (num_list[upper_i] - num < num - n)
if n > num:
upper_i = mid_i
else:
lower_i = mid_i | 91b77d6910698e18f0369990dee11dfab3333b6e | 41,271 |
def _int_to_hex(x: int) -> str:
"""Converts an integer to a hex string representation.
"""
return hex(x) | 2a9bdeb96339747ec33e90393a448519daa59a84 | 41,272 |
def make_header(map_table_row, sort1, sort2, sort3):
"""
項目に応じたヘッダをセット
"""
header_data = {}
header_data["name"] = map_table_row["name"]
header_data["config"] = map_table_row["config"]
header_data["sort1"] = sort1
header_data["sort2"] = sort2
header_data["sort3"] = sort3
return header_data | f0f83e90174d676a0d9c5368e9d272c07433321b | 41,273 |
def encode_generator(obj):
"""Encode generator-like objects, such as ndb.Query."""
return list(obj) | 989ce609070bb8d7be545ca5d439387aa99cff52 | 41,274 |
def toggle_modal(n1, n2, is_open):
"""
Controls the state of the modal collapse
:param n1: number of clicks on modal
:param n2: number of clicks on close button
:param is_open: open state
:return is_open: open/close state
"""
if n1 or n2:
return not is_open
return is_open | 0eb9addbcdd7d7b7eecf8f661a380bf065fb963f | 41,275 |
def printfunccloser():
""" return the closer to a printfunc """
s = """
print__closing();
}
"""
return s | 5d6791287327c54b417d5000323e565ab7bc5555 | 41,276 |
import os
def get_git_dir(tree):
"""Get Git directory from tree."""
return os.path.join(tree, ".git") | d654643f0bb8c9b6de8007fad4c7f654e91defa8 | 41,277 |
import copy
def get_root_source(source):
""" Get the main file source from a doc's source list.
Parameters:
source (str/list/dict): contents of doc['source'] or the doc itself.
Returns:
str: "root" filename, e.g. if source = ['KP.cell', 'KP.param',
'KP_specific_structure.res'] then root = 'KP_specific_structure'.
"""
try:
sources = copy.deepcopy(source['source'])
except (KeyError, TypeError):
sources = copy.deepcopy(source)
if isinstance(source, str):
return source
src_list = set()
for src in sources:
if any([src.endswith(ext) for ext in
['.res', '.castep', '.history', '.history.gz', '.phonon', '.phonon_dos', '.bands', '.cif', '.magres']]):
src_list.add('.'.join(src.split('/')[-1].split('.')[0:-1]))
elif 'OQMD' in src.upper():
src_list.add(src)
elif 'MP-' in src.upper():
src_list.add(src)
elif len(sources) == 1:
src_list.add(src)
elif src == 'command_line':
src_list.add('command line')
if len(src_list) > 1:
raise RuntimeError('Ambiguous root source {}'.format(sources))
if len(src_list) < 1:
raise RuntimeError('Unable to find root source from {}'.format(sources))
return list(src_list)[0] | d5e81089ca3706aef6289fb1ec1375c7e4bde7c8 | 41,280 |
def bai_from_bam_file(bam_file):
"""
Simple helper function to change the file extension of a .bam file to .bai.
"""
if not bam_file.endswith('.bam'):
raise ValueError('{0} must have a .bam extension.'.format(bam_file))
return bam_file[:-3] + 'bai' | 812aee46a94a3a1d3eec15a72d820785cf531692 | 41,282 |
def get_events_summaries(events, event_name_counter, resource_name_counter, resource_type_counter):
""" Summarizes CloudTrail events list by reducing into counters of occurences for each event, resource name, and resource type in list.
Args:
events (dict): Dictionary containing list of CloudTrail events to be summarized.
Returns:
(list, list, list)
Lists containing name:count tuples of most common occurences of events, resource names, and resource types in events list.
"""
for event in events['Events']:
resources = event.get("Resources")
event_name_counter.update([event.get('EventName')])
if resources is not None:
resource_name_counter.update([resource.get("ResourceName") for resource in resources])
resource_type_counter.update([resource.get("ResourceType") for resource in resources])
return (event_name_counter.most_common(10),
resource_name_counter.most_common(10),
resource_type_counter.most_common(10)) | b8d061f9710a3914b74da9ec56a2037dcf8320d4 | 41,283 |
def _convert_to_code(input, k):
""" Converts a binary string into words of k length """
current = 0
res = []
#Need it to round downwards
for i in range(int(len(input)/k)):
row = [int(e) for e in list(input[current:current+k])]
res.append(row)
current = current + k
return res | b1e1cc9b6aafcc7afdc99689639190d54dd177bc | 41,285 |
def note_css_class(note_type):
"""
Django Lesson Note Type
text = blocks.TextBlock()
note_type = blocks.ChoiceBlock(
choices=(
('info', 'Info'),
('warning', 'Warning'),
('danger', 'Danger'),
('note', 'Note'),
),
required=False,
default='info',
)
mapped to bootstrap
alert types css classes:
https://getbootstrap.com/docs/4.3/components/alerts/
"""
css_class_map = {
'info': 'success',
'warning': 'warning',
'danger': 'danger',
'note': 'primary'
}
return css_class_map.get(note_type, 'info') | 241ec5698e384d1d5026b955b32bff8e8e188dd3 | 41,286 |
def ion_size():
"""
Library of ion size parameters.
Mapped to formulas used in ModelSEED database
From lookup table in excel spreadsheet released by Brian M. Tissue
http://www.tissuegroup.chem.vt.edu/a-text/index.html
This is only a partial list for now, can populate more as needed
"""
sizes = {
"Ag": 2.5,
"Al": 9,
"Ba": 5,
"B": 8,
"Br": 3,
"BrO3": 3.5,
"CHO3": 4.5, # ModelSEED treats this as a synonym for CO3
"Acetate": 4.5,
"Ca": 6,
"Cd": 5,
"Cl": 3,
"ClO2": 4,
"ClO3": 3.5,
"ClO4": 3.5,
"CN": 3,
"Co": 6,
"Cr": 9,
"CrO4": 4,
"Cs": 2.5,
"Cu": 6,
"F": 3.5,
"Fe2": 6,
"Fe3": 9,
"H": 9,
"H3O+": 9,
"I": 3,
"IO3": 4,
"IO4": 3.5,
"K": 3,
"Mg": 8,
"Mn": 6,
"MnO4": 3.5,
"MoO4": 4.5,
"NH4": 2.5,
"NO2": 3,
"NO3": 3,
"Na": 4,
"Nd": 9,
"Ni": 6,
"OH": 3.5,
"PO4": 4,
"HPO4": 4,
"H2PO4": 4,
"S": 5,
"HS": 3.5,
"SO3": 4.5,
"HSO3": 4,
"SO4": 4,
"O4S2": 5,
"Zn": 6,
"H2O": 3
}
return sizes | 17b29d502d5e4d879b591e8755fe69bbdab48e6b | 41,288 |
def polynomial(coeffs):
"""
Return a polynomial function which with coefficients `coeffs`.
Coefficients are list lowest-order first, so that ``coeffs[i]`` is the coefficient in front of ``x**i``.
"""
if len(coeffs)==0:
return lambda x:x*0
def f(x):
y=(x*0)+coeffs[0]
for p,c in enumerate(coeffs[1:]):
y=y+c*x**(p+1)
return y
return f | 4df8fa27e3dab2d7feca9b19d6e8f87a07acd100 | 41,289 |
import collections
def group_train_data(training_data):
"""
Group training pairs by first phrase
:param training_data: list of (seq1, seq2) pairs
:return: list of (seq1, [seq*]) pairs
这里的defaultdict(function_factory)构建的是一个类似dictionary的对象,
其中keys的值,自行确定赋值,但是values的类型,是function_factory的类实例,而且具有默认值。
比如defaultdict(int)则创建一个类似dictionary对象,里面任何的values都是int的实例,
而且就算是一个不存在的key, d[key] 也有一个默认值,这个默认值是int()的默认值0.
一般用法为:
d = collections.defaultdict(list)
for k, v in s:
d[k].append(v)
"""
groups = collections.defaultdict(list)
for p1, p2 in training_data:
# 取出key为tuple(p1)的value;
# If no value is assigned to the key, the default value (in this case empty list) is assigned to the key.
l = groups[tuple(p1)]
# 将p2挂在value后面,完成grouping操作;
l.append(p2)
return list(groups.items()) | b224d7585ab57ea872582d8ef08cc3ebb0516af7 | 41,290 |
from urllib.request import Request, urlopen
import csv
def available_environments(repository="http://sintefneodroid.github.io/environments/ls"):
"""
@param repository:
@type repository:
@return:
@rtype:
"""
req = Request(repository, headers={"User-Agent": "Mozilla/5.0"})
environments_m_csv = urlopen(req).read()
environments_m_csv = environments_m_csv.decode("utf-8")
reader = csv.reader(environments_m_csv.split("\n"), delimiter=",")
environments_m = {row[0].strip(): row[1].strip() for row in reader if len(row) > 1}
return environments_m | d7d2c58cc3c09d1493999b408872397dd40f32dc | 41,291 |
import math
def get_xy(lat, lng, zoom):
"""
Generates an X,Y tile coordinate based on the latitude, longitude
and zoom level
Returns: An X,Y tile coordinate
"""
tile_size = 256
# Use a left shift to get the power of 2
# i.e. a zoom level of 2 will have 2^2 = 4 tiles
num_tiles = 1 << zoom
# Find the x_point given the longitude
point_x = (tile_size / 2 + lng * tile_size / 360.0) * num_tiles // tile_size
# Convert the latitude to radians and take the sine
sin_y = math.sin(lat * (math.pi / 180.0))
# Calculate the y coordinate
point_y = ((tile_size / 2) + 0.5 * math.log((1 + sin_y) / (1 - sin_y)) *
- (tile_size / (2 * math.pi))) * num_tiles // tile_size
return int(point_x), int(point_y) | eca13cf7d5ba4ba8b3799d80d6d71c7e72eb4402 | 41,293 |
import functools
def traceproperty(fn):
"""Trace all basic torcharrow functions operating on given types."""
@functools.wraps(fn)
def wrapped(*args, **kwargs):
# # same code as above, except for this line...
# fn._is_property = True
# #find the scope::
# # at least one positional argument must be an Column
# for arg in args:
# if isinstance(arg,Column):
# scope = arg.scope
# # existing functions
# trace._nesting_level += 1
res = fn(*args, **kwargs)
# trace._nesting_level -= 1
# # handle top level primitive functions
# if trace._nesting_level == 0 and trace.is_on():
# # print("TRACE PROP", fn, [str(a) for a in args], kwargs)
# args_ = []
# for a in args:
# if isinstance(a, trace._types):
# args_.append(Var(a.id))
# else:
# args_.append(a)
# # print("TRACE PROP", fn, [str(a) for a in args_], kwargs)
# kwargs_ = {}
# for k, v in kwargs.items():
# if isinstance(a, trace._types):
# kwargs_[k] = args_.Var(v.id)
# else:
# kwargs_[k] = v
# out = "_"
# if isinstance(res, trace._types):
# out = res.id
# trace.append((out, Call(fn, tuple(args_), kwargs_)))
return res
return wrapped | 806cda3b5a7fdd2a89bd0b18a491c4e2029a667d | 41,294 |
import ast
def eval_condition(condition, locals):
"""Evaluates the condition, if a given variable used in the condition
isn't present, it defaults it to None
"""
condition_variables = set()
st = ast.parse(condition)
for node in ast.walk(st):
if type(node) is ast.Name:
condition_variables.add(node.id)
for v in condition_variables:
if v not in locals:
locals[v] = None
result = eval(condition, {}, locals)
return result | f3fb7a871c16f22b2cd5f9d5087aec364772f6bb | 41,296 |
def echo(data):
"""
Just return data back to the client.
"""
return data | 80655150d1578c12b2f196b664df8935bae569f1 | 41,297 |
from typing import Sequence
from typing import List
from typing import Any
def generate_by_char_at(attr_ind: int, dtuple: Sequence, pos: List[Any]):
""" Generate signatures by select subset of characters in original features.
>>> res = generate_by_char_at(2, ('harry potter', '4 Privet Drive', 'Little Whinging', 'Surrey'), [0, 3])
>>> assert res == 'Lt'
>>> res = generate_by_char_at(2, ('harry potter', '4 Privet Drive', 'Little Whinging', 'Surrey'), [":4"])
>>> assert res == 'Litt'
"""
sig = []
feature = dtuple[attr_ind]
# missing value
if feature == '':
return None
max_ind = len(feature)
for p in pos:
if type(p) == int:
p = min(p, max_ind - 1)
sig.append(feature[p])
elif ':' not in p:
p = int(p)
p = min(p, max_ind - 1)
sig.append(feature[p])
else:
start_ind, end_ind = p.split(":")
if start_ind != '' and end_ind != '':
start_ind = int(start_ind)
end_ind = int(end_ind)
assert start_ind < end_ind, "Start index should be less than End index in {}".format(p)
start_ind = min(start_ind, max_ind - 1)
end_ind = min(end_ind, max_ind)
c = feature[start_ind: end_ind]
elif start_ind == '' and end_ind != '':
end_ind = int(end_ind)
end_ind = min(end_ind, max_ind)
c = feature[:end_ind]
elif start_ind != '' and end_ind == '':
start_ind = int(start_ind)
start_ind = min(start_ind, max_ind)
c = feature[start_ind:]
else:
raise ValueError('Invalid pos argument: {}'.format(p))
sig.append(c)
return ''.join(sig) | b455b46751e8afda705dc4ff7da3a26ff1404097 | 41,298 |
def style_function_color_map(item, style_dict):
"""Style for geojson polygons."""
feature_key = item['id']
if feature_key not in style_dict:
color = '#d7e3f4'
opacity = 0.0
else:
color = style_dict[feature_key]['color']
opacity = style_dict[feature_key]['opacity']
style = {
'fillColor': color,
'fillOpacity': opacity,
'color': '#262626',
'weight': 0.5,
}
return style | b7c5a41b96eb550087819e079397f191bdb4de1c | 41,299 |
def _to_date(date_s):
"""
.. note::
Errata issue_date and update_date format: month/day/year, e.g. 12/16/10.
>>> _to_date("12/16/10")
(2010, 12, 16)
>>> _to_date("2014-10-14 00:00:00")
(2014, 10, 14)
"""
if '-' in date_s:
return tuple(int(x) for x in date_s.split()[0].split('-'))
else:
(month, day, year) = date_s.split('/')
return (int("20" + year), int(month), int(day)) | a331d54394a7d59a1ab7b9441e554ef8fe6c4c18 | 41,300 |
def uhex(num: int) -> str:
"""Uppercase Hex."""
return "0x{:02X}".format(num) | f6025d7aa2a3b1cbf8286a878b5bc6f9dcc87f4c | 41,301 |
import os
def is_git_repository(directory='.'):
"""Returns whether the current working directory is a Git repository."""
return os.path.exists('{}/.git'.format(directory.rstrip('/'))) | 635e9dc6a9c2a1446a1598694d3d585805787907 | 41,302 |
from operator import mul
from functools import reduce
def _make_member_list(n):
"""Takes the length of three cyclic group and constructs the member
list so that the permutations can be determined. Each member has
three components, corresponding to the entries for each of the
three cyclic groups.
:args n: Integer array containing the diagonal elements of the SNF.
"""
depth = int(round(reduce(mul,n,1)))
p = []
for i in range(depth):
p.append([0,0,0])
for im in range(1,depth): # Loop over the members of the translation group
p[im] = list(p[im-1]) # Start with the same digits as in the previous increment
p[im][2] = (p[im-1][2]+1)%n[2] # Increment the first cyclic group
if (p[im][2]==0): # If it rolled over then
p[im][1] = (p[im-1][1] +1) % n[1] # increment the next cyclic group
if (p[im][1]==0): # If this one rolled over too
p[im][0] = (p[im-1][0]+1)%n[0] # Then increment the third one
return p | 45245adc903478b557a443c460d9bc78376d273f | 41,303 |
import torch
def rae(target, predictions: list, total=True):
"""
Calculate the RAE (Relative Absolute Error) compared to a naive forecast that only
assumes that the future will produce the average of the past observations
Parameters
----------
target : torch.Tensor
The true values of the target variable
predictions : list
- predictions[0] = y_hat_test, predicted expected values of the target variable (torch.Tensor)
total : bool, default = True
Used in other loss functions to specify whether to return overall loss or loss over
the horizon. This function only supports the former.
Returns
-------
torch.Tensor
A scalar with the overall RAE (the lower the better)
Raises
------
NotImplementedError
When 'total' is set to False, as rae does not support loss over the horizon
"""
y_hat_test = predictions[0]
y_hat_naive = torch.mean(target)
if not total:
raise NotImplementedError("rae does not support loss over the horizon")
# denominator is the mean absolute error of the preidicity dependent "naive forecast method"
# on the test set -->outsample
return torch.mean(torch.abs(target - y_hat_test)) / torch.mean(torch.abs(target - y_hat_naive)) | 6f3650873d00fcd237bb608eed58593a927d8815 | 41,304 |
import operator
def count_words(s, n):
"""Return the n most frequently occurring words in s."""
# Count the number of occurrences of each word in s
dictionary = {}
s = s.split(" ")
for word in s:
if word in dictionary:
dictionary[word] += 1
else:
dictionary[word] = 1
# Sort the occurences in descending order (alphabetically in case of ties)
sorted_x = sorted(dictionary.items(), key=operator.itemgetter(0))
sorted_x = sorted(sorted_x, key=operator.itemgetter(1), reverse=True)
# Return the top n words as a list of tuples (<word>, <count>)
output = []
for number in range(n):
output.append(sorted_x[number])
return output | f5d2595ffadc1eebf671d3a11ee2ead45466a732 | 41,305 |
import time
def timeFunction(f, k, V, S):
""" (int x list[int] x int x bool -> tuple[int, _]) x int x list[int] x int ->
tuple[float, int]
returns the execution time of the function
<f> for the given arguments, and the number
of jars of the its solution
"""
# t0, tf: float
t0 = time.process_time()
# n: int
n,_ = f(k, V, S, display=False)
tf = time.process_time()
return (tf - t0), n | f2e50399f51e38b5d2e146cc343ecd6ceef3138c | 41,306 |
def server_address(http_server):
"""IP address of the http server."""
return http_server.server_address[0] | ff5c9e56f7db02924913c1f9484c83d8091c9d67 | 41,308 |
def gen_bad_labels(badcase_type, frame_objs):
"""
生成每帧所对应的bad_labels 检测框以及其对应的信息
"""
bad_labels = []
for j in range(len(badcase_type)):
if badcase_type[j] == -1:
continue
tlwh = frame_objs[j][0]
label_name = frame_objs[j][2]
loc_x1 = tlwh[0]
loc_y1 = tlwh[1]
loc_x2 = loc_x1 + tlwh[2]
loc_y2 = loc_y1 + tlwh[3]
bad_labels.append({
'x1': int(loc_x1),
'y1': int(loc_y1),
'x2': int(loc_x2),
'y2': int(loc_y2),
'pred_label_name': label_name,
'id': int(frame_objs[j][1]),
'bad_case_type': int(badcase_type[j]),
'score': float(frame_objs[j][3])
})
return bad_labels | 2e42e5e159ddd99adcdcb5d42c00ace50623eb7c | 41,309 |
def three_points_solve1(li, lj, lk, a, b, eps=1e-6):
"""
(1) + (2)
"""
lkj, lji = lk - lj, lj - li
w2 = (a + b) / (lkj / b - lji / a + eps)
dx = -(w2 * lji / a + a) / 2
# dx = (lkj * a * a + lji * b * b) / (lji*b - lkj * a) / 2
return w2, dx | 54d17e22b47de95dbd2b00606668050fccb9cbc1 | 41,312 |
def isNestedInstance(obj, cl):
""" Test for sub-classes types
I could not find a universal test
Parameters
----------
obj: object instance
object to test
cl: Class
top level class to test
returns
-------
r: bool
True if obj is indeed an instance or subclass instance of cl
"""
tree = [cl]
if hasattr(cl, "__subclasses"):
for k in cl.__subclasses():
if hasattr(k, "__subclasses"):
tree += k.__subclasses__()
return issubclass(obj.__class__, tuple(tree)) | bca1adb3ba93605b55ed6d204e89210d6b570882 | 41,314 |
def _transform_metric(metrics):
"""
Remove the _NUM at the end of metric is applicable
Args:
metrics: a list of str
Returns:
a set of transformed metric
"""
assert isinstance(metrics, list)
metrics = {"_".join(metric.split("_")[:-1]) if "_cut" in metric or "P_" in metric else metric for metric in metrics}
return metrics | 13747864d70f7aae6aaec5c9139724ff6c8cb7fb | 41,315 |
def ProgressBar(percent, prefix=None, notches=50, numericalpercent=True, unicode=False):
"""Accepting a number between 0.0 and 1.0 [percent], returns a string containing a UTF-8
representation of a progress bar of x segments [notches] to the screen, along with an
optional indication of the progress as the given percentage rounded to two places
[numericalpercent], and, if given one, a custom string preceding the progress bar
[prefix]. By default, common number symbols and periods are used to draw the bar's full
and empty portions, respectively; [unicode] can be set to True to use full and empty
blocks from the Unicode character set instead, which are not defined in all fonts."""
outString = u"" # Unicode string.
if prefix:
prefix = "{} ".format(prefix)
outString = outString + prefix
x_of_notches = int(round(percent * notches))
startCap = "["
endCap = "]"
fullSegment = "#"
blankSegment = "."
if unicode:
fullSegment = "\u25AE" # Full block in Unicode
blankSegment = "\u25AF" # Empty block in Unicode
outString = outString + startCap
for i in range(x_of_notches):
outString = outString + fullSegment # Full block
for i in range(notches - x_of_notches):
outString = outString + blankSegment
outString = outString + endCap
if numericalpercent:
outString = outString + " [{}%]".format(str(round(percent * 100, 2)))
return outString | 1ad220f55d9dd242778f879c75905a0484bdbd73 | 41,316 |
import re
def get_inputs( filename ):
"""
Each line in the input file contains directions to a tile
starting from the origin.
This function returns a list of lists with directions to each tile.
"""
with open( filename, 'r' ) as input_file:
raw_data = input_file.read().splitlines()
directions = 'e|se|sw|w|nw|ne'
tiles = []
for line in raw_data:
tile = re.findall( directions, line )
tiles.append( tile )
return tiles | d4171a45d93db37959d9422a3d12c193249856a1 | 41,318 |
import torch
def compute_overlaps(batch):
"""Compute groundtruth overlap for each point+level. Note that this is a
approximation since
1) it relies on the pooling indices from the preprocessing which caps the number of
points considered
2) we do a unweighted average at each level, without considering the
number of points used to generate the estimate at the previous level
"""
overlaps = batch['src_overlap'] + batch['tgt_overlap']
kpconv_meta = batch['kpconv_meta']
n_pyr = len(kpconv_meta['points'])
overlap_pyr = {'pyr_0': torch.cat(overlaps, dim=0).type(torch.float)}
invalid_indices = [s.sum() for s in kpconv_meta['stack_lengths']]
for p in range(1, n_pyr):
pooling_indices = kpconv_meta['pools'][p - 1].clone()
valid_mask = pooling_indices < invalid_indices[p - 1]
pooling_indices[~valid_mask] = 0
# Average pool over indices
overlap_gathered = overlap_pyr[f'pyr_{p-1}'][pooling_indices] * valid_mask
overlap_gathered = torch.sum(overlap_gathered, dim=1) / torch.sum(valid_mask, dim=1)
overlap_gathered = torch.clamp(overlap_gathered, min=0, max=1)
overlap_pyr[f'pyr_{p}'] = overlap_gathered
return overlap_pyr | 7fdc3492981a7c52090a887c643cb60e4445894c | 41,319 |
def _is_ethaddr(input):
"""判断是否是合法 eth 地址
Args:
input (str): 用户输入
"""
if len(input) != 42 or not input.startswith("0x"):
return False
try:
int(input, 16)
except:
return False
return True | 93340d93f12b73e46d632d6e2fe0cf26aa413bc3 | 41,320 |
def xor(x, y):
"""N.B.: not lazy like `or` ..."""
return bool(x) != bool(y) | 248f5aecfdc20eb331ce7bcd70192d74522e797c | 41,321 |
import sys
def get_sequence(fasta):
"""get the description and trimmed dna sequence"""
in_file = open(fasta, 'r')
content = in_file.readlines()
in_file.close()
content2 = []
for i in content:
if i != "":
content2.append(i)
content = content2
while content[0] == "" or content[0] == "\n":
content = content[1:]
header = content[0]
content = content[1:]
content = [x.rstrip() for x in content]
seq = "".join(content)
if ">" not in header or ">" in seq:
print(sys.stderr, "FASTA file not properly formatted; should be single sequence starting with '>' and sequence name.")
# print >> sys.stderr, "FASTA file not properly formatted; should be single sequence starting with '>' and sequence name."
# logfile.write("FASTA file not properly formatted; should started with '>' and sequence name on first line.\n")
# logfile.close()
sys.exit(1)
return seq | 7e4b162b44fa77b4ec0672c420174024fb6812b3 | 41,322 |
import socket
def hostname():
"""Get the name of the host system.
Returns
-------
str
The domain name of the current host.
"""
return socket.getfqdn() | 23d18c81a785e74e3d632bfc7beb32d359ed9544 | 41,323 |
def is_boundary(loop):
"""Is a given loop on the boundary of a manifold (only connected to one face)"""
return len(loop.link_loops) == 0 | 4d4df7e552c6a57b42fa3e9c43682368ae5091c1 | 41,325 |
import os
import random
import string
def get_secret_key(app, filename='secret_key'):
"""Get, or generate if not available, secret key for cookie encryption.
Key will be saved in a file located in the application directory.
"""
filename = os.path.join(app.root_path, filename)
try:
return open(filename, 'r').read()
except IOError:
k = ''.join([
random.choice(string.punctuation + string.ascii_letters +
string.digits) for i in range(64)
])
with open(filename, 'w', encoding='utf-8') as f:
f.write(k)
return k | 54dbb6cbcdd8fbf6a5a060fa88ba9c3ef48d908c | 41,326 |
import itertools
def main():
"""Generates SQL for comparing rows in a table. Takes a list of fields to compare,
generates all combinations of those fields, compares each combination and counts amount"""
sql = '''DECLARE @resultants TABLE
(
[Duplicate Count] NVARCHAR(64)
, [Matched on] NVARCHAR(128)
, Link1 NVARCHAR(100)
, Link2 NVARCHAR(100)
, ID bigint
, DupID bigint
, DOB date
, SiteID bigint
, PostCode NVARCHAR(8)
, FirstName NVARCHAR(128)
, LastName NVARCHAR(64)
, Female bit
, PinNumber NVARCHAR(8)
)'''
fields = ['DOB', 'SiteID', 'PostCode', 'FirstName', 'LastName', 'Female', 'PinNumber']
for length in range(len(fields), 3, -1): # working backwards from most to least combinations, stopping at 4 matching items
for combinations in itertools.combinations(fields, length):
sql += f"\n\nINSERT INTO @resultants ([Duplicate Count], [Matched on], Link1, Link2, ID, DupID, DOB, SiteID, PostCode, FirstName, LastName, Female, PinNumber)\n" \
+ f"SELECT '{len(combinations)}' 'Duplicate Count', "
# Matched on
matched_on = ''
for _, field in enumerate(fields):
# written = False
for combination in combinations:
if field == combination: # entering if it exists
matched_on += (f'{combination}' if len(matched_on) == 0 else f', {combination}')
break
matched_on = matched_on.strip()
sql += f"'{matched_on}' 'Matched on', 'http://management.consol.eu/Member/Edit/' + CONVERT(varchar, first.ID), 'http://management.consol.eu/Member/Edit/' + CONVERT(varchar, second.ID), first.ID, second.ID 'DupID', first.DOB, first.SiteID, first.PostCode, first.FirstName, first.LastName, first.Female, first.PinNumber"
sql += '\nFROM tblAccount first WITH(NOLOCK), tblAccount second WITH(NOLOCK)\nWHERE first.ID < second.ID AND first.Deleted IS NULL AND second.Deleted IS NULL AND \n\tNOT EXISTS (select * from @resultants r WHERE r.ID = first.ID and r.DupID = second.ID)'
# WHERE first.field = second.field
for field in fields:
for combination in combinations:
if field == combination:
sql += f" \n\tAND first.{combination} IS NOT NULL AND first.{combination} != '' AND second.{combination} IS NOT NULL AND second.{combination} != ''"
sql += f" \n\tAND first.{combination} = second.{combination}"
break
sql += '\n\nSELECT * from @resultants'
return(sql) | c1ad81ec0ffbed6611f3219e24f17c56547e3d6f | 41,327 |
def get_semantic_feature_path():
"""Add here the path to the semantic features file.
This is a file that contains the stimuli and question representations."""
return 'data/MTurk_semantic_features.npz' | a738fe399c3022af3b8ed9a9fc1ef0b040ace465 | 41,328 |
def spdk_kill_instance(client, sig_name):
"""Send a signal to the SPDK process.
Args:
sig_name: signal to send ("SIGINT", "SIGTERM", "SIGQUIT", "SIGHUP", or "SIGKILL")
"""
params = {'sig_name': sig_name}
return client.call('spdk_kill_instance', params) | 16ac6264d0e26de8e6dad86b0107f0ba21d62973 | 41,329 |
def _resolve_none(inp):
""" Helper function to allow 'None' as input from argparse
"""
if inp == "None":
return
return inp | 452ee9b4ec24f08126b533922d21ed1cf32db3eb | 41,330 |
import sys
def is_notebook() -> bool:
"""Returns True if running in a notebook (Colab, Jupyter) environment."""
# Use sys.module as we do not want to trigger an import (slow)
IPython = sys.modules.get('IPython') # pylint: disable=invalid-name
# Check whether we're not running in a IPython terminal
if IPython:
get_ipython_result = IPython.get_ipython()
if get_ipython_result and 'IPKernelApp' in get_ipython_result.config:
return True
return False | eb2f450d89b61fb1f75967f0850f6389468e06b0 | 41,331 |
def collapse(array):
"""
Collapse a homogeneous array into a scalar; do nothing if the array
is not homogenous
"""
if len(set(a for a in array)) == 1: # homogenous array
return array[0]
return array | 1573cbcfe3691b83be4710e3d2ea1ff3791bc098 | 41,334 |
from typing import Union
import time
def time_ms(as_float: bool = False) -> Union[int, float]:
"""Convert current time to milliseconds.
:param as_float: result should be float, default result is int
:return: current time in milliseconds
"""
_time_ms = time.time() * 1000
if not as_float:
return int(_time_ms)
return _time_ms | 9e9dd47636182935d2a6f52156fc987996c75ec3 | 41,335 |
def show_user_details2():
"""
103
Adapt program 102 to display the names and ages of all the people in the list but do not show their shoe
size.
:return: user details without shoe size
"""
users, counter = {}, 4
while counter:
name = input(f"{counter} to go; Enter user name: ").title()
age = int(input("Enter user's age: "))
shoe_size = float(input("Enter show size: "))
users[name] = {
"age": age,
"show_size": shoe_size
}
counter -= 1
for name in users:
print(name, users[name]["age"])
return "" | c43d87c1b5ec3bd760f9b488d5cb6d6a8f674873 | 41,336 |
import re
def char_replace(output, content_data, modification_flag):
"""
Attempts to convert PowerShell char data types using Hex and Int values into ASCII.
Args:
output: What is to be returned by the profiler
content_data: [char]101
modification_flag: Boolean
Returns:
content_data: "e"
modification_flag: Boolean
"""
# Hex needs to go first otherwise the 0x gets gobbled by second Int loop/PCRE (0x41 -> 65 -> "A")
for value in re.findall(r"\[char]0x[0-9a-z]{1,2}", content_data):
char_convert = int(value.split("]")[1], 0)
if 10 <= char_convert <= 127:
content_data = content_data.replace(value, '"%s"' % chr(char_convert))
modification_flag = True
# Int values
for value in re.findall(r"\[char][0-9]{1,3}", content_data, re.IGNORECASE):
char_convert = int(value.split("]")[1])
if 10 <= char_convert <= 127:
content_data = content_data.replace(value, '"%s"' % chr(char_convert))
modification_flag = True
if modification_flag:
output["modifications"].append("CharReplace")
return content_data, modification_flag | 58f433f5c28d26be18853a4414be0acf9176acc6 | 41,337 |
def is_worth_living(Life):
"""
未经审视的人生不值得度过。 -- 苏格拉底
"""
return Life.be_examined | 143cc3299b016bb3a296b427400d4420d93c3fec | 41,338 |
def get_name(self):
"""
Get dataframe name
:param self:
:return:
"""
return self._name | e539c4753f0e5a73191ab3c444e3c8160d09c937 | 41,339 |
def getcode(line):
""" Extract out the Geonames reference code for searching. """
split_line = line.split('\t')
head = split_line[0][2:]
desc = split_line[1]
return (head, desc) | fd647765934571c2bf1f4d55e94f572a26bf5250 | 41,342 |
import os
def get_mp3_filenames(directory="../data/fma_small"):
"""
Get the path of each mp3 file under the given root directory
:param directory: root directory to search
:return: List[String]
"""
filenames = [
os.path.join(root, file) for root, _, f in os.walk(directory) for file in f
]
return list(filter(lambda s: s.endswith(".mp3"), filenames)) | 3cb099cce7a42f075f417fae9ad53d1fe27073ce | 41,343 |
import re
def verify_pattern(pattern):
"""Verifies if pattern for matching and finding fulfill expected structure.
:param pattern: string pattern to verify
:return: True if pattern has proper syntax, False otherwise
"""
regex = re.compile("^!?[a-zA-Z]+$|[*]{1,2}$")
def __verify_pattern__(__pattern__):
if not __pattern__:
return False
elif __pattern__[0] == "!":
return __verify_pattern__(__pattern__[1:])
elif __pattern__[0] == "[" and __pattern__[-1] == "]":
return all(__verify_pattern__(p) for p in __pattern__[1:-1].split(","))
else:
return regex.match(__pattern__)
return all(__verify_pattern__(p) for p in pattern.split("/")) | 75fbef4839bedce8ef70c44ff137896f3caf7a25 | 41,345 |
def turn(guti,guti2):
"""Set the turn in the player one."""
if guti.guti_rect.left == 147 and guti.guti_rect.bottom == 601:
if guti2.guti_rect.left == 147 and guti.guti_rect.bottom == 601:
return 1 | 62b5809e11016b8119da8bbe55081d5af7248ce3 | 41,346 |
def path_add_str(path_):
""" Format path_ for console printing """
return '+ {}'.format(path_) | 0f1edde223e432560482edd68f78cb2b42a6bc84 | 41,347 |
def get_time_in_min(timestamp):
"""
Takes a timestamp, for example 12:00 and splits it, then converts it into minutes.
"""
hours, minutes = timestamp.split(":")
total_minutes = int(hours)*60+int(minutes)
return total_minutes | ef7f8418ad50a2ac0c2814610004aec48236f5a8 | 41,348 |
def is_leaf(tree):
"""
:param tree: a tree node
:return: True if tree is a leaf
"""
if tree.left is None and tree.right is None:
return True
return False | 5db41c7c31ba9edd03d86d8463ef23b5e289e38b | 41,349 |
def removeForwardSlash(path):
"""
removes forward slash from path
:param path: filepath
:returns: path without final forward slash
"""
if path.endswith('/'):
path = path[:-1]
return path | bbba3cd1d3c051f805bda075227ce1ba4428df8c | 41,351 |
def keyvalue2str(k, v):
"""
A function to convert key - value convination to string.
"""
body = ''
if isinstance(v, int):
body = "%s = %s " % (k, v)
else:
body = """%s = "%s" """ % (k, v)
return body | b5ac3f07a71c5fe9e223316d637a252dc0791d15 | 41,352 |
def getAgnData(hd_agn, agn_FX_soft, redshift_limit):
"""
Function to get the relavant data for AGNs
@hd_agn :: table file with all relevant info on AGNs
@AGN_FX_soft, AGN_SDSS_r_magnitude :: limits on flux, and brightness to be classified as an AGN
@redshift_limit :: decides until which AGNs to consider in the sample
--> typically, we are interested in the low-z universe for this project
Returns:: @pos_z :: positions (ra, dec) and redshifts of the downsampled AGNs
"""
# criteria on flux and brightness for selection of AGNs
downsample_agn = (hd_agn['FX_soft']>agn_FX_soft) & (hd_agn['redshift_R']<redshift_limit)
# get the ra, dec and z for the AGNs
ra_AGN = hd_agn['RA'][downsample_agn]
dec_AGN = hd_agn['DEC'][downsample_agn]
z_AGN = hd_agn['redshift_R'][downsample_agn]
pos_z = [ra_AGN, dec_AGN, z_AGN]
# scale factor of last major merger
scale_merger = hd_agn['HALO_scale_of_last_MM'][downsample_agn]
return pos_z, scale_merger, downsample_agn | b9f048e8ff2055a38f66959b8465279d0fa34609 | 41,353 |
def extract_name_and_link(a_object):
"""
Get the source name and url if it's present.
Parameters:
----------
- a_object (bs4.element.Tag - `a`) : an a object html tag parsed by beautiful soup 4
Returns:
----------
- source_name (str) : the plain text source name as included by Ad Fontes Media
- link (str) : the url location where Ad Fontes Media stores the reliability and bias data
"""
if (a_object is not None) and (a_object["href"] is not None) and (a_object["href"].startswith("https://adfontesmedia.com")):
source_name, link = a_object.get_text(), a_object["href"]
source_name = source_name.replace(" Bias and Reliability", "")
return source_name, link
else:
return None, None | aebe743e27c2150cd81b582075091386253939e5 | 41,354 |
def my_potential_energy(rij, rc):
""" Calculate total potential energy.
Args:
rij (np.array): distance table, shape (natom, natom)
Return:
float: total potential energy
"""
vshift = 4 * rc ** (-6) * (rc ** (-6) - 1)
potential = 0.0
for i in range(len(rij)):
for j in range(i + 1, len(rij[0])):
r = rij[i][j]
if r <= rc:
potential += 4 * r ** (-6) * (r ** (-6) - 1) - vshift
return potential | 6fbf172f2c9cda19edd1222f7efdf384f6d67fc1 | 41,355 |
import random
import copy
def make_steps(first_step, tasks):
"""
"""
step = first_step
fail_count = 0
steps = []
while True:
# details = copy.deepcopy(tasks[step])
details = tasks[step]
if 'yield' in details.keys():
if random.random() < details['yield']:
details['result'] = 'pass'
details['route_to'] = details['route_to_pass']
else:
details['result'] = 'fail'
details['route_to'] = details['route_to_fail']
elif 'fail_count' in details.keys():
fail_count += 1
if fail_count == details['fail_count']:
details['route_to'] = details['route_to_pass']
else:
details['route_to'] = details['route_to_fail']
steps.append(details)
if isinstance(details['route_to'], str):
step = copy.deepcopy(details['route_to'])
else:
break
return steps | bc4a38318c63bbfa2fd21d256cf339b61e16645d | 41,359 |
def async_get_pin_from_uid(uid):
"""Get the device's 4-digit PIN from its UID."""
return uid[-4:] | 7556416888dbeaabd39c368458a8b64927a7a13a | 41,360 |
import json
import re
def sd_tasktype_mapping(XNAT, project):
"""
Method to get the Task type mapping at Project level
:param XNAT: XNAT interface
:param project: XNAT Project ID
:return: Dictonary with scan_type/series_description and tasktype mapping
"""
tk_dict = {}
if XNAT.select('/data/projects/' + project + '/resources/BIDS_tasktype').exists():
for res in XNAT.select('/data/projects/' + project + '/resources/BIDS_tasktype/files').get():
if res.endswith('.json'):
with open(XNAT.select('/data/projects/' + project + '/resources/BIDS_tasktype/files/'
+ res).get(), "r+") as f:
datatype_mapping = json.load(f)
tk_dict = datatype_mapping[project]
else:
print('\t\t>WARNING: No BIDS task type mapping in project %s - using default mapping' % (project))
scans_list_global = XNAT.get_project_scans('LANDMAN')
for sd in scans_list_global:
c = re.search('rest|Resting state|Rest', sd['scan_type'], flags=re.IGNORECASE)
if not c == None:
sd_func = sd['scan_type'].strip().replace('/', '_').replace(" ", "").replace(":", '_')
tk_dict[sd_func] = "rest"
with open("global_tk_mapping.json", "w+") as f:
json.dump(tk_dict, f, indent=2)
return tk_dict | 5530c3fe8ffc5a9eef2c3b3c0f4304072d3c84fd | 41,362 |
def insert_ordered(value, array):
"""
This will insert the value into the array, keeping it sorted, and returning the
index where it was inserted
"""
index = 0
# search for the last array item that value is larger than
for n in range(0,len(array)):
if value >= array[n]: index = n+1
array.insert(index, value)
return index | 9f491aab83fcd3716eb5894d675ec4ba90bbbae9 | 41,363 |
def is_normal_meter(feet):
"""
Checks if the meter is normal (first syllables are at the beginning of a foot)
:param feet: a list of feet ["vesi", "vanhin" "voite", "hista"]
:return: True or False
"""
for foot in feet:
for i in range(len(foot)):
syllable = foot[i]
if "*" in syllable and i != len(foot)-1:
#First syllable in another position than at the beginning of a foot
return False
return True | 71bf08264cc31871e696a717d4824a58632c80dc | 41,364 |
def compute_linenumber_stats(file_name):
""" Collect data on number of total lines in the current file """
x = 0
with open(file_name) as input:
for line in input:
x = x + 1
return(x) | 39a170010e0987903c080d2ebab1c37d7099af0b | 41,365 |
def find_following_duplicates(array):
"""
Find the duplicates that are following themselves.
Parameters
----------
array : list or ndarray
A list containing duplicates.
Returns
----------
uniques : list
A list containing True for each unique and False for following duplicates.
Example
----------
>>> import neurokit as nk
>>> mylist = ["a","a","b","a","a","a","c","c","b","b"]
>>> uniques = nk.find_following_duplicates(mylist)
>>> indices = np.where(uniques) # Find indices of uniques
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
"""
array = array.copy()
uniques = []
for i in range(len(array)):
if i == 0:
uniques.append(True)
else:
if array[i] == array[i-1]:
uniques.append(False)
else:
uniques.append(True)
return(uniques) | aafd4bb76c318ed907732549e3650df063c8c5b5 | 41,366 |
def model_train(model, X, y):
""" 模型训练 """
print(model)
model.fit(X, y)
return model | 42548120473bba13147c57b4907dcdbbc3176014 | 41,369 |
import argparse
def get_input_args():
"""
Inputs are introduced through command line
Args:
None
Inputs:
--pedigreeFile path to the PED file
--resultsDirectory save results to this directory
"""
parser = argparse.ArgumentParser()
parser.add_argument('--pedigreeFile', default = "test_pedigree.ped", type = str,
help = 'path to the PED file')
parser.add_argument('--resultsDirectory', type = str, default = 'results/',
help = 'save results to this directory')
return parser.parse_args() | 95c1a0e9baebadc7bbec7de5d7a11b2b091ea57f | 41,370 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.