content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def replace_negative(l, default_value=0):
"""
Replaces all negative values with default_value
:param l: Original list
:param default_value: The value to replace negatives values with. Default is 0.
:return: Number of values replaced
"""
n_replaced = 0
for i in range(len(l)):
if l[i] < 0:
l[i] = default_value
n_replaced += 1
return n_replaced
|
431781a48a36a00329537b92b589cf223b945ca4
| 13,536
|
from typing import Union
import pathlib
import shutil
import subprocess
def git_commit(path: Union[str, pathlib.Path], error_message: str = 'parent directory is not a repository') -> str:
"""
Finds the current commit of a git repository.
Parameters
----------
path: str or pathlib
The path to the git repository.
error_message: str
Message returned when the above directory is not a git repository.
Returns
-------
out: str
git commit
"""
path_to_git = shutil.which('git')
if not path_to_git:
return 'git no present'
path = pathlib.Path(path)
try:
git_commit = subprocess.check_output([path_to_git, 'rev-parse', 'HEAD'], cwd=path, text=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
git_commit = error_message
else:
git_commit = git_commit.rstrip()
return git_commit
|
94839ba6e4ff6c2a23078468b1a920090470a7e8
| 13,537
|
def swap_64b(val):
""" Byte swap val (unsigned int64)
:param val: 64b value
:return: swapped 64b """
tmp = ((val << 8) & 0xFF00FF00FF00FF00) | ((val >> 8) & 0x00FF00FF00FF00FF)
tmp = ((tmp << 16) & 0xFFFF0000FFFF0000) | ((tmp >> 16) & 0x0000FFFF0000FFFF)
tmp = (tmp << 32) | (tmp >> 32)
return tmp & 0xFFFFFFFFFFFFFFFF
|
5b7fa81d352e94db03f8089f50fedd0ab7a3903e
| 13,538
|
def getClassAttendance(moduleCode):
"""
Will take in the module code and use the code to open the file of the specified module and read it to created
the list of names, presents, absents and excuses. To be return for future use
:param moduleCode:
:return: (list)
"""
classData = open(f"{moduleCode}.txt", 'r')
studentNameList = []
presentList = []
absentList = []
excuseList = []
while True:
line = classData.readline().strip()
if line == "":
break
lineData = line.split(',')
studentNameList.append(lineData[0])
presentList.append(int(lineData[1]))
absentList.append(int(lineData[2]))
excuseList.append(int(lineData[3]))
classData.close()
# print(presentList)
# print(absentList)
# print(excuseList)
return studentNameList, presentList, absentList, excuseList
|
a618b90bd6be84fbe23c28ab8d71a57c108b76dc
| 13,540
|
def layer_function2(x):
""" lambda function """
return x[0] + x[1]
|
62a0ab30432fecad48afa18ecaf7d946ffb0d83b
| 13,541
|
import yaml
def get_config(config):
"""Loads a yaml configuration file."""
with open(config, 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
|
58937acf5984b08193c78877fbb94c07c6d779df
| 13,542
|
def is_rgb(image):
"""
Return True if image is RGB (ie 3 channels) for pixels in WxH grid
"""
return len(image.shape) == 3 and image.shape[-1] == 3
|
f0452fc5f9b6eb69917f8b5de76329eb2e4f03b2
| 13,543
|
def get_method_args_as_dict(locals, should_remove_none=False):
"""
Use inside a class method to get all method arguments as a dictionary.\n
Must pass locals() in at top of method\n
"""
locals.pop("self")
if should_remove_none:
for k, v in locals.items():
if v is None:
locals.pop(k)
return locals
|
e9903eb6216b81cb95fbeecaada8f3e34db864ee
| 13,545
|
def _attr_list_to_dict(attr_list):
"""
_attr_list_to_dict -- parse a string like: host:ami, ..., host:ami into a
dictionary of the form:
{
host: ami
host: ami
}
if the string is in the form "ami" then parse to format
{
default: ami
}
raises ValueError if list can't be parsed
"""
attr_dict = {}
for host_attr in attr_list.split(","):
host_attr = host_attr.split(":")
if len(host_attr) == 1:
attr_dict["default"] = host_attr[0].strip()
elif len(host_attr) == 2:
attr_dict[host_attr[0].strip()] = host_attr[1].strip()
else:
raise ValueError("Can't split '%s' into suitable host"
" attribute pair" % host_attr)
return attr_dict
|
378b1a55d908750eea2b457e35abce0a17364a41
| 13,547
|
def get_job_locations_from_db(loc_list):
"""Get the number of jobs by country as a dictionary."""
countries = {}
for loc in loc_list:
country = loc.split()[-1].lower()
if country == 'usa' or country == 'states' or country == 'us':
countries.setdefault('usa', 0)
countries['usa'] += 1
elif country == 'uk' or country == 'kingdom' or country == 'england':
countries.setdefault('uk', 0)
countries['uk'] += 1
else:
countries.setdefault(country, 0)
countries[country] += 1
return countries
|
dd394bc6889be4e87f55bc523e4a9bc5d4ed617a
| 13,548
|
def humansize(nbytes):
"""
Translates a size in bytes to a human readable size format
:param int nbytes: integer of size of torrent file
:return: size in bytes in a human readable format
:rtype: str
"""
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '{} {}'.format(f, suffixes[i])
|
98390c0f5c471b501caf2fa2bba1e593c8a17eb6
| 13,549
|
def pad_sents(sents, padding_token_index):
"""
Pad the sents(in word index form) into same length so they can form a matrix
# 15447
>>> sents = [[1,2,3], [1,2], [1,2,3,4,5]]
>>> pad_sents(sents, padding_token_index = -1)
[[1, 2, 3, -1, -1], [1, 2, -1, -1, -1], [1, 2, 3, 4, 5]]
"""
max_len_sent = max(sents,
key = lambda sent: len(sent))
max_len = len(max_len_sent)
get_padding = lambda sent: [padding_token_index] * (max_len - len(sent))
padded_sents = [(sent + get_padding(sent))
for sent in sents]
return padded_sents
|
0063d8716f7081644e4353de662d58f0dc04e8fe
| 13,550
|
import os
def file_extension_detect(filename):
"""Determine if file is notebook or RDF,
based on the file extension.
"""
_, extension = os.path.splitext(filename)
rdf_exts = ['.ttl', '.nt', '.jsonld', '.json']
nb_exts = ['.ipynb']
if extension in rdf_exts:
return 'RDF'
elif extension in nb_exts:
return 'notebook'
else:
return None
|
430cf1b12ae4bde6cc336f58f0a1fdc727369209
| 13,551
|
def clamp(minimum, n, maximum):
"""Return the nearest value to n, that's within minimum to maximum (incl)
"""
return max(minimum, min(n, maximum))
|
a3db191a733041196b8a3e0cfc83731e839a14aa
| 13,552
|
import sys
def move(start, dir):
"""
"""
if dir == "U":
mv = (-1, 0)
elif dir == "D":
mv = (1, 0)
elif dir == "L":
mv = (0, -1)
elif dir == "R":
mv = (0, 1)
else:
print("Direction needs to be U D L or R. Exiting")
sys.exit(1)
end = (start[0] + mv[0], start[1] + mv[1])
if end[0] < 0 or end[0] > 4:
return start
elif end[1] < 0 or end[1] > 4:
return start
else:
return end
|
7e40bc61eb3d765985207ad62d69683b0546321f
| 13,553
|
import operator
def normalize_data(data):
"""Normalizes a sequence of input data according to the heuristic
laid out in the module documentation.
"""
data = data or []
if isinstance(data, dict):
data = [{"key": key, "value": value} for key, value in data.items()]
data.sort(key=operator.itemgetter("key"))
elif isinstance(data, str):
data = [data]
elif not isinstance(data, list):
try:
data = list(data)
except TypeError:
data = [data]
return data
|
d1cacf020cd8df2762f747a85a41a758e1e12ba8
| 13,554
|
def get_vehicle_mass(vehicle_info):
"""
Get the mass of a carla vehicle (defaults to 1500kg)
:param vehicle_info: the vehicle info
:type vehicle_info: carla_ros_bridge.CarlaEgoVehicleInfo
:return: mass of a carla vehicle [kg]
:rtype: float64
"""
mass = 1500.0
if vehicle_info.mass:
mass = vehicle_info.mass
return mass
|
216f109e9f963ba6de92cf168055b1a7e516d777
| 13,555
|
def add_modules_to_metadata(modules, metadata):
"""
modules is a list of lists of otus, metadata is a dictionary of dictionaries where outer dict keys
are features, inner dict keys are metadata names and values are metadata values
"""
for module_, otus in enumerate(modules):
for otu in otus:
try:
metadata[str(otu)]['module'] = module_
except KeyError:
metadata[str(otu)] = dict()
metadata[str(otu)]['module'] = module_
return metadata
|
b1dcd3e179a01c11d5a847094ceef4240d2b899e
| 13,556
|
def longest_word(list_name):
"""Stores the number of rows. """
long_word = [0] * len(list_name) #Stores number of items in each sublist
i = 0
for column in list_name: #counts words in each sublist
l_word = 0
for word in column:
l_word += 1
long_word[i] = l_word
i += 1
return max(long_word)
|
932f250b50fc3c8161545e7c4741b255483923e5
| 13,557
|
def uvm_object_value_str(v):
"""
Function- uvm_object_value_str
Args:
v (object): Object to convert to string.
Returns:
str: Inst ID for `UVMObject`, otherwise uses str()
"""
if v is None:
return "<null>"
res = ""
if hasattr(v, 'get_inst_id'):
res = "{}".format(v.get_inst_id())
res = "@" + res
else:
res = str(v)
return res
|
e56bd12094923bf052b10dcb47c65a93a03142a2
| 13,558
|
import tqdm
import json
def download_all_dicts_to_file(filename, search, mode='w'):
""" Download data from elastic search server
:param filename: str, name of file to save data
:param search: elasticsearch search object to query
:param mode, char, file write mode (w, a)
:return filename, str:
"""
print('Download Data Write to File')
print('ElasticSearch Download Scan Query: ', str(search.to_dict())[0:1000], ' ...')
generator = search.scan()
with open(filename, mode) as f:
for obj in tqdm.tqdm(generator):
json.dump(obj.to_dict(), f)
f.write('\n')
print('Updates Downloaded - File {0} written'.format(filename))
return filename
|
4abea20e5cd58fa05bd498e4f7462530a2b677fe
| 13,561
|
import os
def runcmd(cmd):
"""run command"""
pipe = os.popen('"' + '" "'.join(cmd) + '"', 'r')
std_out = pipe.read()
pipe.close()
return std_out.rstrip('\n')
|
45fd11b886bcc37fa927527c67eb4eaaeee5c60d
| 13,563
|
def _example_word(lang):
"""
Returns an example word for a given language
:param lang: str Language abbreviation
:return: A word. Should be one early in the vocab for that language
"""
return {
"de": "mann",
"es": "hombre"
}.get(lang)
|
e9d60f31cb0c5f2b1dd397e19b60a52a43fc41d2
| 13,564
|
def get_object_name(object):
""" Retrieves the name of object.
Parameters:
object (obj): Object to get name.
Returns:
str: Object name.
"""
if object.name[-5:-3] == '}.':
return object.name[:-4]
return object.name
|
fc695c8bf19817c3217bf6000358695c88de07aa
| 13,565
|
def create_nine_digit_product(num):
""" Create a nine digit string resulting from the concatenation of
the product from num and multipliers (1, 2, 3,).... Return 0 if string
cannot be length 9.
"""
result = ''
counter = 1
while len(result) < 9:
result += str(num * counter)
counter += 1
if len(result) > 9:
result = 0
return result
|
9c6765349edfa7e03dc8d2ffe7bf6a45155f3ad0
| 13,566
|
def peak_indices_to_times(time, picked_peaks):
"""
Converts peak indices to times.
Parameters
----------
time: ndarray
array of time, should match the indices.
picked_peaks: dict
dictionary containing list of indices of peak start, center, and end.
Returns
-----------
peak_features: list
list of lists containing times of peak start, center, and end.
"""
peak_features = []
for x in range(0, len(picked_peaks["Peak_indices"])):
rt_ind = picked_peaks["Peak_indices"][x]
start_ind = picked_peaks["Peak_start_indices"][x]
end_ind = picked_peaks["Peak_end_indices"][x]
retention_time = time[rt_ind]
start = time[start_ind]
end = time[end_ind]
peak_params = [start, retention_time, end]
peak_features.append(peak_params)
return peak_features
|
99aa76fbc399b2b99d4dc9c8d3e4b67e4ee2d3af
| 13,567
|
def es_annotation_doc():
"""
Minimal JSON document for an annotation as returned from Elasticsearch.
This contains only fields which can be assumed to exist on all annotations.
"""
return {
"_id": "annotation_id",
"_source": {
"authority": "hypothes.is",
"target": [{"source": "http://example.com/example.html"}],
"group": "__world__",
"shared": True,
},
}
|
085f8077d5f5cbf278a3b1be1734de081a55b769
| 13,568
|
def incmean(prevmean, n, x):
"""Calculate incremental mean"""
newmean = prevmean + int(round((x - prevmean) / n))
return newmean
|
4176a07d0ad8eb96d3dd5d8b2cb46b8aecd1da20
| 13,569
|
def evenFibSum(limit):
"""Sum even Fib numbers below 'limit'"""
sum = 0
a,b = 1,2
while b < limit:
if b % 2 == 0:
sum += b
a,b = b,a+b
return sum
|
cdf9cdb1cfb419713e794afff4d806945994692e
| 13,570
|
def get_n_first(file_names, checksums, N_first):
"""
Given a list of file_names and a list of checksums, returns the 'N_first'
items from both lists as a zip object.
:param file_names: list of file names
:param checksums: list of sha256 checksums
:param N_first: int or None. If None, all items are returned
:return: zipped N_first first items of file_names and checksums
"""
zipped = list(zip(file_names, checksums))
N_first = int(N_first) if N_first is not None else len(file_names)
zipped = zipped[:N_first]
return zipped
|
13a8157dcd55fa43b0cd71eb877abddc832ff143
| 13,571
|
def get_library(gi, lib_name, lib_desc="", lib_synopsis="", create=True):
"""
Get the id corresponding to given library, and create it if it doesn't exist yet
"""
print("Looking for lib '" + lib_name + "'")
libs = gi.libraries.get_libraries()
found_lib = None
for lib in libs:
if not found_lib and lib['name'] == lib_name and lib['deleted'] is False:
print("Found library '" + lib_name + "'")
found_lib = lib['id']
if not found_lib:
if create:
print("Did not find library '" + lib_name + "', creating it")
create = gi.libraries.create_library(lib_name, lib_desc, lib_synopsis)
found_lib = create['id']
else:
raise Exception("Did not find library '" + lib_name + "'")
return found_lib
|
b8f9c6fff4c0162185fc71c08de9ff7b03272d4b
| 13,572
|
def state_to_index(valuation, base):
"""
Maps states to an decimal integers.
"""
factor = 1
integer = 0
for i in range(len(valuation)):
integer += factor * valuation[i]
factor *= base
return integer
|
c05deefd07d0faf2749489fbc6f37576e1433c8d
| 13,573
|
def get_lock_path_from_repo(git_repo):
"""
>>> app_module = {'git_repo': 'git@github.com:claranet/ghost.git'}
>>> get_lock_path_from_repo(app_module['git_repo'])
'/ghost/.mirrors/.locks/git@github.com:claranet/ghost.git'
>>> app_module = {'git_repo': ' git@github.com:claranet/spaces.git '}
>>> get_lock_path_from_repo(app_module['git_repo'])
'/ghost/.mirrors/.locks/git@github.com:claranet/spaces.git'
"""
return "/ghost/.mirrors/.locks/{remote}".format(remote=git_repo.strip())
|
b722e69dcaa5fd6a6299ee42ddaa7f14097a25a4
| 13,574
|
def get_unique_tasks(*task_groups):
"""
Compare input JSON objects containing tasks and return a unique set
:param json_objects: lists of task objects
:type json_objects: list
:rtype: list
"""
# Combine all sub-json objects into a single group
unique_tasks = []
# Get a list of tasks from the list of groups
for task_set in task_groups:
# Get a single task from the group of tasks we're currently working with
for task in task_set:
# Append task to output if its not already in there
if task not in unique_tasks:
unique_tasks.append(task)
# If task is already in the set of tasks that is supposed to be unique, its not unique - remove from output
else:
unique_tasks.remove(task)
# Trash potentially giant input object to save memory
task_groups = None
return unique_tasks
|
c9eb32ea0a953e797ea28021c3a434941f7b05ff
| 13,575
|
def directorio_imagen(instance, filename):
"""Obtiene el directorio para guardar las imagenes de los platillos"""
# Obtenemos la extension del archivo para guardar la imagen solo con el id y nombre del platillo
extension = filename.rsplit('.', 1)[1]
return f"platillo/imagen/{instance.id}_{instance.nombre}.{extension}"
|
90db760562327a16858e1bf6419fb1ed6e9eeefa
| 13,576
|
def cicle_move(A):
"""
move first to last and move elements between first and last 1 element back
:param A: list to execute cicle move
:return:
"""
tmp = A[0]
for i in range(len(A) - 1):
A[i] = A[i + 1]
A[len(A) - 1] = tmp
return A
|
4948b863829d77a879ad0ec9745bb3a35e580026
| 13,577
|
def Words_Before_and_after(words, item, nbr):
"""
:param words: list of words
:param item: word of interest
:param nbr: number of words wanted before and after the word of interest
:return: list of words before and after the word of interest
"""
liste = [[words[item], item]]
for n in range(nbr):
try:
liste.append([words[item + n +1], item + n +1])
except:
pass
try:
liste.insert(0, [words[item - (n+1)], item - (n+1)])
except:
pass
return liste
|
6dc874565a308cc642bf0510a3be56264c6d89e9
| 13,578
|
import yaml
def df_to_yaml(df):
""" conert DataFrame to yaml string """
d = df.to_dict(orient='records')
return yaml.dump(d)
|
285b4cad92e73fbaec19e631664bb474ecaf309f
| 13,579
|
def get_polar(d, Re=6371.):
""" Convert to polar coordinates """
th = d.grange / Re
r = d.height + Re
dop, sth, dth = d.dop, d.sth, d.dth
return th, r, dop, sth, dth
|
1d4710152b5dbbbfd09cdc5cbf934f3adb94deeb
| 13,580
|
def _is_new_prototype(caller):
"""Check if prototype is marked as new or was loaded from a saved one."""
return hasattr(caller.ndb._menutree, "olc_new")
|
1282db1ce6ae2e5f0bb570b036d7166a53287229
| 13,581
|
def grid(gdf):
"""
sampling the GNSS data by grids
Parameters
----------
gdf: geodataframe
geodataframe of all gnss data(population)
Returns
-------
list:
list of each grid which contains all related gnss data
"""
#create new column to determine grid
gdf['grid'] = gdf.apply(lambda x:(x['lon_id'],x['lat_id']),axis=1)
#sort by longitude number first and then latituide number
gdf = gdf.sort_values(by = ['lon_id','lat_id'])
#initialze grid list
grid = [0]*len(gdf['grid'].unique())
#for loop to add all gnss data included in each grid
#as the elements in the list
for grids in gdf['grid'].unique():
grid_data = gdf[gdf['grid'].isin([grids])]
grid.append(grid_data)
#Remove values that do not meet the criteria
grid = list(filter(lambda x: not isinstance(x, int), grid))
return grid
|
f613359a0061fd2975607044ffcaa8b25b53dcfc
| 13,584
|
def get_tasks_for_weekly_report(model_obj, company, date_from, date_to):
"""Отдает отфильтрованный кверисет с уникальными датой и описанием"""
return model_obj.objects.select_related(
'tag', 'employees').filter(
date__gte=date_from).filter(
date__lte=date_to).filter(
company=company).order_by('date').values(
'date', 'description').distinct()
|
3b46205d3c1bb5b5d472b350371561167aa2fb5b
| 13,585
|
def detectFallingSeq(points_list):
"""Find falling temperatures sequences in given list and stores beginnings and ends of them in list.
Always with format [seq1 start index, seq1 end index, seq2 start index, seq2 end index, ...]
:param points_list: list of pipe temperatures
:return: list of indexes
"""
falling = False
index_list = []
for i in range(2, (len(points_list) - 1)):
actual = points_list[i]['value']
prev = points_list[i - 1]['value']
prev_prev = points_list[i - 2]['value']
if float(actual) < float(prev) and float(actual) < float(prev_prev) and (not falling):
if (float(prev_prev) - float(actual)) >= 0.2: # Calibration to not detect falling air temperature
index_list.append(i - 2)
falling = True
elif float(actual) > float(prev) and float(actual) > float(prev_prev) and falling:
index_list.append(i)
falling = False
return index_list
|
e940497b72745af1669062373a6aca025ac0626c
| 13,586
|
def count_symbols(atoms, exclude=()):
"""Count symbols in atoms object, excluding a set of indices
Parameters:
atoms: Atoms object to be grouped
exclude: List of indices to be excluded from the counting
Returns:
Tuple of (symbols, symbolcount)
symbols: The unique symbols in the included list
symbolscount: Count of symbols in the included list
Example:
>>> from ase.build import bulk
>>> atoms = bulk('NaCl', crystalstructure='rocksalt', a=4.1, cubic=True)
>>> count_symbols(atoms)
(['Na', 'Cl'], {'Na': 4, 'Cl': 4})
>>> count_symbols(atoms, exclude=(1, 2, 3))
(['Na', 'Cl'], {'Na': 3, 'Cl': 2})
"""
symbols = []
symbolcount = {}
for m, symbol in enumerate(atoms.symbols):
if m in exclude:
continue
if symbol not in symbols:
symbols.append(symbol)
symbolcount[symbol] = 1
else:
symbolcount[symbol] += 1
return symbols, symbolcount
|
31f7c116f07788171828f0e116ef28a43eb0e313
| 13,587
|
import ntpath
def lookup_folder(event, filesystem):
"""Lookup the parent folder in the filesystem content."""
for dirent in filesystem[event.parent_inode]:
if dirent.type == 'd' and dirent.allocated:
return ntpath.join(dirent.path, event.name)
|
e18df4610bba9cf71e85fe0038a5daf798822bd3
| 13,588
|
def extract_user(ops, json_data):
""" Extract user from JSON data.
"""
if json_data is None:
return None
if 'required_posting_auths' not in ops or 'required_auths' not in ops:
return None
if ops['required_posting_auths'] is None or ops['required_auths'] is None:
return None
if len(ops['required_posting_auths']) > 0:
return ops['required_posting_auths'][0]
elif len(ops['required_auths']) > 0:
return ops['required_auths'][0]
else:
print("Cannot parse transaction, as user could not be determined!")
return None
|
0722afa2f174628670658fbbc0f1a0ab80cd36fe
| 13,591
|
def ParseDeviceBlocked(blocked, enable_device):
"""Returns the correct enabled state enum based on args."""
if enable_device is None and blocked is None:
return None
elif enable_device is None or blocked is None:
# In this case there are no default arguments so we should use or.
return blocked is True or enable_device is False
else:
# By default blocked is false, so any blocked=True value should override
# the default.
return not enable_device or blocked
|
4554ca5e33194fea77516e910516f3937cd61f3a
| 13,592
|
import re
def getNormform(synonym):
"""
"""
a = re.sub("[^a-z0-9]", "", synonym[0:255].lower())
return " ".join(a.split())
|
36d60ad20ff8adee5e4d6f009119026db8a1b874
| 13,593
|
def hidden_digits(time_string):
""" converts time with unknown digits to max time with respect to hours and minutes given starting digits
Args:
time_string (str): a string representing an unknown time
Returns:
max_time_string (str): a string with filled in missing digits for max time
"""
mapper = {'first': lambda x,y: '2' if x == '?' and y == '?' else '2' if y < '4' and x == '?' else '1' if x == '?' else x,
'second': lambda x,y: '9' if x < '2' and y == '?' else '3' if y == '?' else y,
'third': lambda x: '5' if x == '?' else x,
'fourth': lambda x: '9' if x == '?' else x}
first = mapper['first'](time_string[0], time_string[1])
second = mapper['second'](time_string[0], time_string[1])
third = mapper['third'](time_string[3])
fourth = mapper['fourth'](time_string[4])
return ''.join([first, second, ':', third, fourth])
|
15fdefb340ab5fe66b562f0fa642210db138c590
| 13,596
|
def action(method):
""" Marks an agent instance method to be registered as an action
:param method:
:return:
"""
method._register_action = True
return method
|
15f32f6acd7e31e50ef3b61d0de04e029e65249a
| 13,597
|
import re
def filter_illegal_chars(dirty, repl=''):
"""Remove character which allow to inject code which would be run by
displaying on the page
"""
illegal_chars_re = re.compile('[<>="]')
return illegal_chars_re.sub(repl, dirty)
|
d389df644959850ccd4de6dd7ab22f0276b47435
| 13,598
|
def exp_np_array_list(lst, power):
"""
Raise elements of a list (of np.arrays - elementwise) to power
@param lst: list of np.arrays
@param power: scalar, to which power the elements should be raised
@rtype list of np.arrays
"""
result = []
for nparr in lst:
result.append(nparr ** power)
return result
|
8c8b80c217f4a381e8514c7fff06dc98114087d9
| 13,599
|
import os
import re
def get_ch_names(path, dir):
"""获取文件中文名称,如无则返回文件名"""
file_ch_names = []
reg = r"new Env\(\"[\S]+?\"\)"
ch_name = False
for file in dir:
try:
if os.path.isdir(f"{path}/{file}"):
file_ch_names.append(file)
elif file.endswith(".js") \
and file != "jdCookie.js" \
and file != "getJDCookie.js" \
and file != "JD_extra_cookie.js" \
and "ShareCode" not in file:
with open(f"{path}/{file}", "r", encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
if "new Env" in line:
line = line.replace('"', "'")
res = re.findall(reg, line)
if len(res) != 0:
res = res[0].split("'")[-2]
file_ch_names.append(f"{res}--->{file}")
ch_name = True
break
if not ch_name:
file_ch_names.append(f"{file}--->{file}")
ch_name = False
else:
continue
except:
continue
return file_ch_names
|
bae20e2a03b188aeb1f6dae33f6c3a66d26ca48e
| 13,600
|
def check_if_significant(data, thresh=1e-5):
"""
trim data based on threshold.
Args:
pandas DataFrame: data needs to be trimmed.
threshold (float): a constant. Default = 1e-5.
Returns:
pandas DataFrame: data after trimming.
pandas DataFrame: corrsponding index of data.
"""
data_out = data.drop(data.var()[data.var()<thresh].index.values, axis=1)
indices = data.var()[data.var() > thresh].index.values
return data_out, indices
|
62494c97559632f2d8d3e542320cb72f3da90eab
| 13,601
|
import datetime
import calendar
def getWeekday(year, month, day):
"""
input: integers year, month, day
output: name of the weekday on that date as a string
"""
date = datetime.date(year, month, day)
return calendar.day_name[date.weekday()]
|
ca5164b6d7243033f57c3a803301ff3c3ec13d29
| 13,603
|
def rivers_with_station(stations):
"""Creates a list of rivers with at least 1 station"""
rivers = []
for i in stations:
if i.name != None:
if i.river not in rivers:
rivers.append(i.river)
return rivers
|
f935e97dca96a94d7d3e839b9b5178b701799e08
| 13,605
|
def config_section_data():
"""Produce the default configuration section for app.config,
when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[fn_create_webex_meeting]
# this may also be the conference id for the developer sandbox
webex_email=
webex_password=
webex_site=
webex_site_url=
webex_timezone=
# use these as an alternative to webex_site (i.e. developer sandbox)
#webex_site_id=
#webex_partner_id=
# Optional proxy parameters
#http_proxy=
#https_proxy=
"""
return config_data
|
5e5a1ca7e3402ab177c5805404beb8baed318aaf
| 13,607
|
from typing import List
import inspect
def get_function_parameters_list(func) -> List[str]:
"""
Get parameter list of function `func`.
Parameters
----------
func : Callable
A function to get parameter list.
Returns
-------
List[str]
Parameter list
Examples
--------
>>> def test_func(a, b) -> int:
... return a+1
>>> get_function_parameters_list(test_func)
['a', 'b']
"""
return inspect.getfullargspec(func).args
|
bff2ec37a5564b87e48abf964d0d42a55f809a16
| 13,608
|
def getSectionText(cd, sectionLabel=None ):
"""
"""
markups = cd.getSectionMarkups(sectionLabel, returnSentenceNumbers=False)
txt = " ".join([m.getText() for m in markups])
return txt
|
d4bcaa109f5774bd3f5a9abb5a11829061b0ca8f
| 13,609
|
def dp_make_weight(egg_weights, target_weight, memo={}):
"""
Find number of eggs to bring back, using the smallest number of eggs. Assumes there is
an infinite supply of eggs of each weight, and there is always a egg of value 1.
Parameters:
egg_weights - tuple of integers, available egg weights sorted from smallest to largest
value (1 = d1 < d2 < ... < dk)
target_weight - int, amount of weight we want to find eggs to fit
memo - dictionary, OPTIONAL parameter for memoization (you may not need to use this
parameter depending on your implementation)
Returns: int, smallest number of eggs needed to make target weight
"""
# This will be the key used to find answers in the memo
subproblem = (egg_weights, target_weight)
# If we've already stored this answer in the memo, return it
if subproblem in memo:
return memo[subproblem]
# If no eggs are left or no space is left on ship, there's nothing left to do
if egg_weights == () or target_weight == 0:
return 0
# If the next heaviest egg is too heavy to fit, consider subset of lighter eggs
elif egg_weights[-1] > target_weight:
result = dp_make_weight(egg_weights[:-1], target_weight, memo)
else:
# Find the minimum number of eggs by testing both taking heaviest egg and not
# taking heaviest egg.
this_egg = egg_weights[-1]
num_eggs_with_this_egg = 1 + dp_make_weight(
egg_weights,
target_weight - this_egg,
memo)
num_eggs_without_this_egg = dp_make_weight(egg_weights[:-1], target_weight, memo)
if num_eggs_without_this_egg != 0:
result = min(num_eggs_with_this_egg, num_eggs_without_this_egg)
else:
result = num_eggs_with_this_egg
# Store this answer in the memo for future use.
memo[subproblem] = result
return result
|
8546ab2dd0394d2864c23a47ea14614df83ec2f7
| 13,612
|
def get_impact_dates(previous_model, updated_model, impact_date=None,
start=None, end=None, periods=None):
"""
Compute start/end periods and an index, often for impacts of data updates
Parameters
----------
previous_model : MLEModel
Model used to compute default start/end periods if None are given.
In the case of computing impacts of data updates, this would be the
model estimated with the previous dataset. Otherwise, can be the same
as `updated_model`.
updated_model : MLEModel
Model used to compute the index. In the case of computing impacts of
data updates, this would be the model estimated with the updated
dataset. Otherwise, can be the same as `previous_model`.
impact_date : {int, str, datetime}, optional
Specific individual impact date. Cannot be used in combination with
`start`, `end`, or `periods`.
start : {int, str, datetime}, optional
Starting point of the impact dates. If given, one of `end` or `periods`
must also be given. If a negative integer, will be computed relative to
the dates in the `updated_model` index. Cannot be used in combination
with `impact_date`.
end : {int, str, datetime}, optional
Ending point of the impact dates. If given, one of `start` or `periods`
must also be given. If a negative integer, will be computed relative to
the dates in the `updated_model` index. Cannot be used in combination
with `impact_date`.
periods : int, optional
Number of impact date periods. If given, one of `start` or `end`
must also be given. Cannot be used in combination with `impact_date`.
Returns
-------
start : int
Integer location of the first included impact dates.
end : int
Integer location of the last included impact dates (i.e. this integer
location is included in the returned `index`).
index : pd.Index
Index associated with `start` and `end`, as computed from the
`updated_model`'s index.
Notes
-----
This function is typically used as a helper for standardizing start and
end periods for a date range where the most sensible default values are
based on some initial dataset (here contained in the `previous_model`),
while index-related operations (especially relative start/end dates given
via negative integers) are most sensibly computed from an updated dataset
(here contained in the `updated_model`).
"""
# There doesn't seem to be any universal default that both (a) make
# sense for all data update combinations, and (b) work with both
# time-invariant and time-varying models. So we require that the user
# specify exactly two of start, end, periods.
if impact_date is not None:
if not (start is None and end is None and periods is None):
raise ValueError('Cannot use the `impact_date` argument in'
' combination with `start`, `end`, or'
' `periods`.')
start = impact_date
periods = 1
if start is None and end is None and periods is None:
start = previous_model.nobs - 1
end = previous_model.nobs - 1
if int(start is None) + int(end is None) + int(periods is None) != 1:
raise ValueError('Of the three parameters: start, end, and'
' periods, exactly two must be specified')
# If we have the `periods` object, we need to convert `start`/`end` to
# integers so that we can compute the other one. That's because
# _get_prediction_index doesn't support a `periods` argument
elif start is not None and periods is not None:
start, _, _, _ = updated_model._get_prediction_index(start, start)
end = start + (periods - 1)
elif end is not None and periods is not None:
_, end, _, _ = updated_model._get_prediction_index(end, end)
start = end - (periods - 1)
elif start is not None and end is not None:
pass
# Get the integer-based start, end and the prediction index
start, end, out_of_sample, prediction_index = (
updated_model._get_prediction_index(start, end))
end = end + out_of_sample
return start, end, prediction_index
|
b2e6966e0fe2213e504e913d8cd64dbe84fa815b
| 13,613
|
def unwrap_value(metadata, attr, default=None):
"""Gets a value like dict.get() with unwrapping it."""
data = metadata.get(attr)
if data is None:
return default
return data[0]
|
fee5f12f0fba86e221fe722b5829a50706ccd5dc
| 13,614
|
import os
def parse_row(row, csv_path):
"""
:param row:
:param csv_path:
:return:
Parse a row of the CSV master file.
"""
table = row["table"]
inputs_dir = os.path.join(csv_path, row["path"])
project_flag = True if int(row["project_input"]) else False
project_is_tx = True if int(row["project_is_tx"]) else False
cols_to_exclude_str = str(row["cols_to_exclude_str"])
custom_method = str(row["custom_method"])
subscenario_type = row["subscenario_type"]
filename = row["filename"]
return table, inputs_dir, project_flag, project_is_tx, \
cols_to_exclude_str, custom_method, subscenario_type, filename
|
afdabc7dd28f15d7a50e4e81af3434dbed588c80
| 13,615
|
def getTimestamp(self):
"""Get timestamp (sec)"""
return self.seconds + self.picoseconds*1e-12
|
0c913fdcd9a3ce07e31a416208a8488bd41cea81
| 13,616
|
def point_in_polygon(points, x, y):
""" Ray casting algorithm.
Determines how many times a horizontal ray starting from the point
intersects with the sides of the polygon.
If it is an even number of times, the point is outside, if odd, inside.
The algorithm does not always report correctly when the point is very close to the boundary.
The polygon is passed as a list of (x,y)-tuples.
"""
odd = False
n = len(points)
for i in range(n):
j = i<n-1 and i+1 or 0
x0, y0 = points[i][0], points[i][1]
x1, y1 = points[j][0], points[j][1]
if (y0 < y and y1 >= y) or (y1 < y and y0 >= y):
if x0 + (y-y0) / (y1-y0) * (x1-x0) < x:
odd = not odd
return odd
|
26ab72c6b545f94d41b0d1228dd312d590ad208b
| 13,619
|
def task3(ob):
"""
Function that take an object as input and prints its docs (__doc__) every sentence in one line.
Input: name of object (e.g. int)
Output: string ready to write.
"""
return ob.__doc__
|
859996692629a61664acf39309741020192f3da5
| 13,620
|
def get_finger_joint_limit_state(x,m):
"""Gets the joint limit state of the finger from a hybrid state"""
return m[3:6]
|
90fa8e3dce6c9f9dc08e3e7c2fd9e447690202c1
| 13,622
|
def test_id(id):
"""Convert a test id in JSON into an immutable object that
can be used as a dictionary key"""
if isinstance(id, list):
return tuple(id)
else:
return id
|
11e18a57648cb09f680751d1596193020523e5e1
| 13,623
|
import re
def MakeHeaderToken(filename):
"""Generates a header guard token.
Args:
filename: the name of the header file
Returns:
the generated header guard token.
"""
return re.sub('[^A-Z0-9_]', '_', filename.upper()) + '__'
|
d29f756e30c3214aac174302175c52ca28cad6cb
| 13,624
|
def check_byte(b):
"""
Clamp the supplied value to an integer between 0 and 255 inclusive
:param b:
A number
:return:
Integer representation of the number, limited to be between 0 and 255
"""
i = int(b)
if i < 0:
i = 0
elif i > 255:
i = 255
return i
|
374e0ffbe1d0baa80c56cafd3650fba8441c5ea0
| 13,625
|
import numpy
def get_signal_to_fluctuation_noise_ratio(signal_array, tfn_array,
roi_size=10):
""" The SFNR image is is obtained by dividing, voxel by voxel,
the mean fMRI signal image by the temporal fluctuation image.
A 21 x 21 voxel ROI, placed in the center of the image, is created.
The average SFNR across these voxels is the SFNR summary value.
Parameters
----------
signal_array: array [X,Y,Z]
the fmri signal.
tfn_array: array [X,Y,Z]
the temporal fluctuation noise array.
roi_size: int (default 10)
the size of the central roi used to get the summary indice.
Returns
-------
sfnr_array: array [X,Y,Z]
the signal to fluctuation noise ratio array.
sfnr_summary: float
the signal to fluctuation noise ratio average on a central roi.
"""
# Compute the signal to fluctuation noise ratio
sfnr_array = signal_array / (tfn_array + numpy.finfo(float).eps)
# Create a central roi
center = numpy.round(sfnr_array.shape / 2)
roi = sfnr_array[center[0] - roi_size: center[0] + roi_size,
center[1] - roi_size: center[1] + roi_size,
center[2]]
# Compute the signal to fluctuation noise ratio summary
sfnr_summary = numpy.average(roi)
return sfnr_array, sfnr_summary
|
4240adee5062b7a1302f010c92b91ed272287df7
| 13,627
|
def insert(rcd, insert_at_junctions, genotype):
"""
Given the genotype (ie the junction that was chosen), returns the corresponding insert
"""
junctions = ["x", "y", "z"]
if genotype[1] != "-":
j = junctions.index(genotype[1])
return insert_at_junctions[j]
else:
return "-"
|
1bf3d7e8bb84659c3992e55fc51490c19701cff0
| 13,628
|
def clone_model(model, **kwargs):
"""Clone an arbitrary sqlalchemy model object without its primary key values."""
table = model.__table__
non_pk_columns = [k for k in table.columns.keys() if k not in table.primary_key]
data = {c: getattr(model, c) for c in non_pk_columns}
data.update(kwargs)
clone = model.__class__(**data)
return clone
|
5aa3efe0d508390a7f4ec6f22c0d4ffbfe3f04b5
| 13,629
|
def _convert_labels_for_svm(y):
"""
Convert labels from {0, 1} to {-1, 1}
"""
return 2.*y - 1.0
|
eca685bea6fd991245a299999fcbe31cd3b1a9ad
| 13,632
|
import numpy
def rgb2hslv(r:numpy.ndarray, g:numpy.ndarray, b:numpy.ndarray):
"""
Convert RGB to HSLV values
Parameters
----------
r, g, b : ndarray
Arrays with red, green, blue channel values (any dims, must match!)
Returns
-------
(h, sl, l, sv, v) : tuple
Hue, saturation, lightness, and value arrays
"""
if isinstance(r, list):
r = numpy.asarray(r)
if isinstance(g, list):
g = numpy.asarray(g)
if isinstance(b, list):
b = numpy.asarray(b)
if r.shape != g.shape or r.shape != b.shape:
raise ValueError('Invalid shape/dims.')
if r.dtype != g.dtype or r.dtype != b.dtype:
raise ValueError('Invalid datatype combination.')
rm = numpy.logical_and(r >= g, r >= b)
gm = numpy.logical_and(g > r, g >= b)
bm = numpy.logical_and(b > r, b > g)
if r.dtype != numpy.float32 and r.dtype != numpy.float64:
f = (1.0 / 255.0)
r = f * r.astype(numpy.float64)
g = f * g.astype(numpy.float64)
b = f * b.astype(numpy.float64)
rr = r[rm]
rg = r[gm]
rb = r[bm]
gr = g[rm]
gg = g[gm]
gb = g[bm]
br = b[rm]
bg = b[gm]
bb = b[bm]
h = numpy.zeros(r.size).reshape(r.shape)
mx = h.copy()
mn = h.copy()
mx[rm] = rr
mx[gm] = gg
mx[bm] = bb
mn[rm] = numpy.minimum(gr, br)
mn[gm] = numpy.minimum(rg, bg)
mn[bm] = numpy.minimum(rb, gb)
mxmn = (mx == mn)
h[rm] = numpy.divide(gr - br, numpy.maximum(0.0001, rr - mn[rm]))
h[gm] = 2.0 + numpy.divide(bg - rg, numpy.maximum(0.0001, gg - mn[gm]))
h[bm] = 4.0 + numpy.divide(rb - gb, numpy.maximum(0.0001, bb - mn[bm]))
h[mxmn] = 0.0
h[h<0.0] = h[h<0.0] + 6.0
h /= 6.0
l = 0.5 * (mx + mn)
sl = numpy.divide(mx - mn, numpy.maximum(0.0001, 1.0 - numpy.abs(2.0 * l - 1.0)))
sl[mx==0] = 0.0
sl[mn==1] = 0.0
sv = numpy.divide(mx - mn, numpy.maximum(0.0001, mx))
sv[mx==0] = 0.0
return (h, sl, l, sv, mx)
|
f25148f97139315cc7e6d49fa4eacdef286c05b2
| 13,633
|
import math
def chunks(l, n):
"""Divide l into n approximately-even chunks."""
chunksize = int(math.ceil(len(l) / n))
return [l[i:i + chunksize] for i in range(0, len(l), chunksize)]
|
c7b395dec7939b863097b3da9cdd49fbe2a47740
| 13,634
|
def calcPptm(freq, T):
""" payment time to maturity """
return freq * T
|
82c9935494ba32e9f990bdcd87ff86dc34ce7232
| 13,636
|
import re
def isXML(file):
"""Return true if the file has the .xml extension."""
return re.search("\.xml$", file) != None
|
7fcfbb105a59f7ed6b14aa8aa183aae3fdbe082d
| 13,637
|
def abs(n):
"""
this is abs function
example:
>>> abs(1)
1
>>> abs(-1)
1
>>> abs(0)
0
"""
return n if n>=0 else (-n)
|
0e2c42526deca4658e65cbe6c9954e88cb5d9ff6
| 13,638
|
def add1(arr1, arr2):
"""
This version is hard-coded to accept 2 arrays only.
"""
output = []
for inner_list1, inner_list2 in zip(arr1, arr2):
inner_output = []
for num1, num2 in zip(inner_list1, inner_list2):
inner_output.append(num1 + num2)
output.append(inner_output)
return output
|
7d90bb432f77c34e743282e391c4ffe38623d659
| 13,639
|
def aggPosition(x):
"""Aggregate position data inside a segment
Args:
x: Position values in a segment
Returns:
Aggregated position (single value)
"""
return x.mean()
|
e9305d26f05710cc467a7fa9fd7b87737b8aa915
| 13,641
|
import functools
def wraps(wrapped, wrapper):
"""Contionally copies all the attributes a Wapi function can define"""
assigned = ('__module__', '__name__', '__doc__')
conditionally_assigned = ('_required_parameters_',
'_optional_parameters_',
'_read_only_',
'_write_only_',
'_private_',
)
for attr in conditionally_assigned:
if hasattr(wrapped, attr):
assigned += (attr, )
return functools.wraps(wrapped, assigned=assigned)(wrapper)
|
0ffeebc62b9c5569ff991fcd09cfbdb425d9f267
| 13,642
|
from typing import Tuple
def empty_handler() -> Tuple[()]:
""" A stub function that represents a handler that does nothing
"""
return ()
|
1450ea04fefc4ad432e5d66a765bda0f5239b002
| 13,643
|
import torch
from typing import Tuple
from typing import Union
import warnings
def data_parallel(raw_model: torch.nn.Module, *args, **kwargs) -> Tuple[Union[torch.nn.Module, torch.nn.parallel.DataParallel], bool]:
"""
Make a `torch.nn.Module` data parallel
- Parameters:
- raw_model: A target `torch.nn.Module`
- Returns: A `tuple` of either data paralleled `torch.nn.parallel.DataParallel` model if CUDA is available or a raw model if not, and a `bool` flag of if the model data paralleled successfuly.
"""
if torch.cuda.is_available():
model = torch.nn.parallel.DataParallel(raw_model, *args, **kwargs)
return model, True
else:
warnings.warn(f"[Device Warning]: CUDA is not available, unable to use multi-GPUs.", ResourceWarning)
return raw_model, False
|
5b98f0e7c67ac067aba9a9c5202cceded91827ac
| 13,644
|
def collide_circle(left, right):
"""detect collision between two sprites using circles
pygame.sprite.collide_circle(left, right): return bool
Tests for collision between two sprites by testing whether two circles
centered on the sprites overlap. If the sprites have a "radius" attribute,
then that radius is used to create the circle; otherwise, a circle is
created that is big enough to completely enclose the sprite's rect as
given by the "rect" attribute. This function is intended to be passed as
a collided callback function to the *collide functions. Sprites must have a
"rect" and an optional "radius" attribute.
New in pygame 1.8.0
"""
xdistance = left.rect.centerx - right.rect.centerx
ydistance = left.rect.centery - right.rect.centery
distancesquared = xdistance ** 2 + ydistance ** 2
if hasattr(left, 'radius'):
leftradius = left.radius
else:
leftrect = left.rect
# approximating the radius of a square by using half of the diagonal,
# might give false positives (especially if its a long small rect)
leftradius = 0.5 * ((leftrect.width ** 2 + leftrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(left, 'radius', leftradius)
if hasattr(right, 'radius'):
rightradius = right.radius
else:
rightrect = right.rect
# approximating the radius of a square by using half of the diagonal
# might give false positives (especially if its a long small rect)
rightradius = 0.5 * ((rightrect.width ** 2 + rightrect.height ** 2) ** 0.5)
# store the radius on the sprite for next time
setattr(right, 'radius', rightradius)
return distancesquared <= (leftradius + rightradius) ** 2
|
3e55b7b854bcc3032e93cad79f00b28b3f14715d
| 13,645
|
from typing import Optional
from typing import Dict
import os
import sys
def _prepare_build_environment(cross_lib: Optional[str]) -> Dict[str, str]:
"""Prepares environment variables to use when executing cargo build."""
# Make sure that if pythonXX-sys is used, it builds against the current
# executing python interpreter.
bindir = os.path.dirname(sys.executable)
env = os.environ.copy()
env.update(
{
# disables rust's pkg-config seeking for specified packages,
# which causes pythonXX-sys to fall back to detecting the
# interpreter from the path.
"PATH": os.path.join(bindir, os.environ.get("PATH", "")),
"PYTHON_SYS_EXECUTABLE": os.environ.get(
"PYTHON_SYS_EXECUTABLE", sys.executable
),
"PYO3_PYTHON": os.environ.get("PYO3_PYTHON", sys.executable),
}
)
if cross_lib:
env.setdefault("PYO3_CROSS_LIB_DIR", cross_lib)
return env
|
ef861b68dbdbef382afa03358f010ae281746943
| 13,648
|
def get_visibilty(item_container):
""" """
if item_container.visible_start == item_container.visible_end:
return ' <i>[' + u'%s' % item_container.visible_start.strftime('%d.%m.%Y') + ']</i>'
else:
return ' <i>[' + u'%s-%s' % \
( item_container.visible_start.strftime('%d.%m.%Y'),
item_container.visible_end.strftime('%d.%m.%Y')
) + ']</i>'
|
ac3b433651acb5743ed6bcef88594575144312da
| 13,649
|
def get_cube_time_info(cube):
"""Return year, month and day from the cube."""
coord_time = cube.coord('time')
time = coord_time.cell(0).point
time_step = time.strftime("%Y%m%d")
return time_step
|
35ea97aaecd41a4494753c11abd2ba6c9737faad
| 13,650
|
def get_elmo(model, words, batch_size=16):
"""
Get elmo embeddings
:param words: list
:param model: elmo model
:param batch_size: batch size (should be 16)
:return:
"""
vector_list = []
# Create batches
batch = []
for i in range(0, len(words)):
word = words[i]
batch.append([word])
if len(batch) == batch_size or i == len(words) - 1:
# Send sentences to elmo and save the embeddings in a list
embed = model.sents2elmo(batch)
for i in range(0, len(embed)):
vector_list += [embed[i][0].tolist()]
batch = []
return vector_list
|
c258bc53b381bee952cbdda883e882f98ce2597b
| 13,651
|
def comparador_3numeros(a,b,c):
"""int,int,int-->int,int,int
OBJ:comparar los números introducidos"""
if a>b and b>c: sol=str(a > b > c)
elif a>b and c>b: sol=str(a > c > b)
elif a<b and a>c: sol=str(b > a > c)
elif c<b and a<c: sol=str(b > c > a)
elif c>a and a>b: sol=str(c > a > b)
else: sol=str(c > b > a)
return sol
|
6d0fee4cffb72b41641702821491d98cd69bccb0
| 13,652
|
def get_discussions(book_object):
"""
获得本书所有的讨论
:param book_object: Book模型实例
:return: 本书所有讨论的列表
"""
return book_object.discussions.order_by('-pub_date').all()
|
842a158a18e1e7a31579f409997d56a452f1cf61
| 13,655
|
import os
import re
import time
def get_version(package):
"""
Return package version as listed in `__version__` in `__init__.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
mth = re.search("__version__\s?=\s?['\"]([^'\"]+)['\"]", init_py)
if mth:
return mth.group(1) + "." + time.strftime("%Y%m%d.%H%M", time.localtime())
else:
raise RuntimeError("Cannot find version!")
|
776afb3f41e18f2034bbd914b44018795ca13db3
| 13,656
|
def _object_format(o):
""" Object arrays containing lists should be printed unambiguously """
if type(o) is list:
fmt = 'list({!r})'
else:
fmt = '{!r}'
return fmt.format(o)
|
c71c0713d5b44c4733bc934d6026431e919db252
| 13,658
|
import traceback
def handle_error(error):
"""Error handler when a routed function raises unhandled error"""
print(traceback.format_exc())
return 'Internal Server Error', 500
|
d93d3c4bbabb578b28ade961bb46793187a5ee06
| 13,659
|
import os
def pretty_path(input_path):
""" return path string replacing '~' for home directory """
home_path = os.path.expanduser('~')
cwd_path = os.getcwd()
output_path = input_path.replace(home_path, '~').replace(cwd_path, './')
return output_path
|
b10bc85e4fba7ac7c73b53bc2bf5c8a61573e39f
| 13,660
|
import colorsys
def scale_lightness(rgb, scale_l):
"""
Scales the lightness of a color. Takes in a color defined in RGB, converts to HLS, lightens
by a factor, and then converts back to RGB.
"""
# converts rgb to hls
h, l, s = colorsys.rgb_to_hls(*rgb)
# manipulates h, l, s values and returns as rgb
return colorsys.hls_to_rgb(h, min(1, l * scale_l), s = s)
|
2fee635f26419cfe8abc21edb0092a8c916df6ef
| 13,661
|
import torch
def dummy_compute() -> torch.Tensor:
"""
returns a predefined size random Tensor
"""
return torch.rand(100, 100)
|
9b1c05359c9b57573bd2771124e878ccee7eae87
| 13,662
|
def raster2array(rasters,band_no=1):
"""
Arguments:
rast A gdal Raster object
band_no band numerical order
Example :
raster = gdal.Open(rasterfn)
raster2array(raster,1)
"""
bands = rasters.RasterCount
if band_no>0 and band_no <=bands:
band = rasters.GetRasterBand(band_no)
array = band.ReadAsArray()
else:
array = rasters.ReadAsArray()
return array
|
76bdcffbf22a0936fe24c29eba975f21edd7cd41
| 13,663
|
def early_stopping(cost, opt_cost, threshold, patience, count):
"""
Determines if you should stop gradient descent. Early stopping should
occur when the validation cost of the network has not decreased relative
to the optimal validation cost by more than the threshold over a specific
patience count
cost is the current validation cost of the neural network
opt_cost is the lowest recorded validation cost of the neural network
threshold is the threshold used for early stopping
patience is the patience count used for early stopping
count is the count of how long the threshold has not been met
Returns: a boolean of whether the network should be stopped early,
followed by the updated count
"""
if opt_cost - cost > threshold:
count = 0
else:
count += 1
if count == patience:
return True, count
else:
return False, count
|
5baea9f867e8ca8326270f250327494a5c47af46
| 13,664
|
def is_if_then(tokens):
""":note: we assume single-line if have been
transformed in preprocessing step."""
return tokens[0:1+1] == ["if","("]
|
aa1a946ed94299e689534ca98dcf1d0fa9bdca32
| 13,667
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.