content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def get_expected_warning_messages(client):
"""
Returns a list of warning messages that may be shown for operations with a client.
:param client: dict - client data
:return: list[str] - list of warning messages associated with the client
"""
return ['Do you want to send a client registration request for the added client?\n' \
'New subsystem \'' + client['subsystem_code'] + '\' will be submitted for registration for member \'' + \
' '.join([client['name'], client['class'] + ':', client['code']]) + '\'.',
'Do you want to send a client registration request for the added client?']
|
2f166f695027cd1a6b59412fcd352425e3903010
| 17,447
|
def get_instrument_survey_automated_invite(proj, id):
"""Given an instrument id return the automated invite definition if defined"""
xpath = r"./Study/GlobalVariables/redcap:SurveysSchedulerGroup/redcap:SurveysScheduler[@survey_id='%s']" % id
return proj.find(xpath, proj.nsmap)
|
cf458584f6fa6ab46c42bcdb11c5743639607030
| 17,449
|
def search_bt(t, d, is_find_only=True):
"""
Input
t: a node of a binary tree
d: target data to be found in the tree
is_find_only: True/False, specifying type of output
Output
the node that contans d or None if is_find_only is True, otherwise
the node that should be the parent node of d
"""
if t is None:
return
if d < t.data:
next = t.left
else:
next = t.right
if t.data == d:
if is_find_only:
return t
else:
return
if not is_find_only and next is None:
return t
return search_bt(next, d, is_find_only)
|
4ac48982d17dc2d79ce86004a22167f314b8f99b
| 17,450
|
import os
def list_files(path, verbose=False):
"""
List the files inside a specific path
@param path path to the files
@param verbose if True the files path will be printed
"""
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if ".jpg" or ".jpeg" in file:
files.append(os.path.join(r, file))
if verbose:
for f in files:
print(f)
return files
|
aafb376d48e7eef43131ce40195b98de68d057e2
| 17,452
|
def get_timedelta_types():
"""Return {time_delta_unit: timedelta_type} dictionary."""
timedelta_types = {'nanosecond': 'timedelta64[ns]',
'microsecond': 'timedelta64[ms]',
'second': 'timedelta64[s]',
'minute': 'timedelta64[m]',
'hour': 'timedelta64[h]',
'day': 'timedelta64[D]',
'month': 'timedelta64[M]',
'year': 'timedelta64[Y]'}
return timedelta_types
|
8c102b56a574a60786ec666042ebcb035a971344
| 17,453
|
def extract_parse_options(handler, config_section):
"""
Extracts the specific parsing options from the parse section
as given by the [parse] config option in the [process] section
"""
options = {}
options['datadir'] = handler.get(config_section,'datadir')
options['grantregex'] = handler.get(config_section,'grantregex')
options['applicationregex'] = handler.get(config_section, 'applicationregex')
options['years'] = handler.get(config_section,'years')
options['downloaddir'] = handler.get(config_section,'downloaddir')
if options['years'] and options['downloaddir']:
options['datadir'] = options['downloaddir']
return options
|
abbba6f494073beb53ac0a00324ed9d7cf22434f
| 17,454
|
async def extract_userid(message, text: str):
"""
NOT TO BE USED OUTSIDE THIS FILE
"""
def is_int(text: str):
try:
int(text)
except ValueError:
return False
return True
text = text.strip()
if is_int(text):
return int(text)
entities = message.entities
app = message._client
if len(entities) < 2:
return (await app.get_users(text)).id
entity = entities[1]
if entity.type == "mention":
return (await app.get_users(text)).id
if entity.type == "text_mention":
return entity.user.id
return None
|
033a7ee94c5cd2c93784944bbdf0d5b10c0402fe
| 17,455
|
def rgb_to_hex(r, g=0, b=0, a=0, alpha=False):
"""
Returns the hexadecimal string of a color
:param r: red channel
:param g: green channel
:param b: blue channel
:param a: alpha channel
:param alpha: if True, alpha will be used
:return: color in a string format such as #abcdef
"""
if type(r) is not list:
color = [r, g, b, a]
else:
color = r
hex = "#"
for channel in color[:(4 if alpha else 3)]:
temp = channel if type(channel) is int else int(round(channel * 255))
hex += f'{temp:02x}'
return hex
|
90e5854f06948bba35a4ae40a6bb77eaef5a1120
| 17,457
|
def get_supervisors(employee):
"""
Given an employee object, return a list of supervisors. the first
element of list will be the intial employee.
"""
if employee.supervisor:
return [employee] + get_supervisors(employee.supervisor)
else:
return [employee]
|
8b4fe897290834930096654dacdad2f480d11276
| 17,459
|
def mjd2jd(mjd):
"""
Converts Modified Julian Date to Julian Date. Definition of Modified Julian Date (MJD): MJD = JD - 2400000.5
Parameters
----------
mjd : float
The Modified Julian Date
Returns
-------
jd : float
:math:`$mjd + 2400000.5 = jd$`, the corresponding ordinary Julian Date
"""
return mjd + float(2400000.5)
|
e9250a61e1c3374f4989105ff24fd9efc825d7a1
| 17,462
|
import math
def calc_goal_cost(path,goal,config):
"""
计算与目标点的代价
:param path:
:param goal:
:param config:
"""
# 获取轨迹末端的点
path_x = path[-1,0]
path_y = path[-1,1]
diff_x = goal[0] - path_x;
diff_y = goal[1] - path_y
distance = math.sqrt(diff_x**2 + diff_y**2)
# 代价
cost = config.goal_weight * distance
return cost
|
b71268b55fba266d5c2ef4237ad5552e555f12fc
| 17,463
|
import os
def get_file_extension(filename):
"""
Get the extension of the file.
:param filename:
:return:
"""
file_name, extension = os.path.splitext(filename)
return extension
|
d631c0cb0c3068ffaf1c4ed4f51d2f7fba886c7e
| 17,464
|
import os
def pid_exists(pid):
"""
Returns true if pid is still running
"""
return os.path.exists('/proc/%s' % pid)
|
05ef0b7c7287b421c949c36171926d2045746e6a
| 17,466
|
def is_superincreasing(seq):
"""Return whether a given sequence is superincreasing."""
ct = 0 # Total so far
for n in seq:
if n <= ct:
return False
ct += n
return True
|
836be03cb7dbb215baaa9a1ba5fd83c39de843d7
| 17,467
|
def get_component_byName(wall_list, componentname):
"""名前から部位のパラメータを持つ辞書を得る
Args:
wall_list(List<dict>(Wall_direct Wall_accurate Wall_simple Wall_rc Wall_steel)): 窓を除く外皮等のリスト
componentname: 部位の名前
componentname: str
Returns:
dict(Wall_direct Wall_accurate Wall_simple Wall_rc Wall_steel): 部位のパラメータを持つ辞書
"""
for wall_i in wall_list:
if wall_i['Name'] == componentname:
return wall_i
|
22c754c13e0134a1fe5dd9a42e6d84a137c2ef8a
| 17,468
|
def update_next_wps_spd_profile(next_wps, next_wps_idx_start, next_wps_idx_end, next_decel_init_wp_idx, max_spd,
max_spd_chg, dt_btw_wps):
"""
Generate speed profile for ego vehicle for next sequence of waypoints to be published to /final_waypoints.
"""
# Condition 1:
# When next decel init wp is way ahead, just maintain max speed.
if next_decel_init_wp_idx > next_wps_idx_end:
return next_wps
# Condition 2:
# When next stop is close by, prepare to decelerate.
else:
for wp_rel_idx, wp_global_idx in enumerate(range(next_wps_idx_start, next_wps_idx_end)):
if wp_global_idx < next_decel_init_wp_idx:
# Maintain current waypoint speed, no need to change anything
pass
else:
# Decelerate until zero speed at constant change rate
current_wp_spd = next_wps[wp_rel_idx].twist.twist.linear.x
expected_wp_spd = max_spd - max_spd_chg * (wp_global_idx - next_decel_init_wp_idx) * dt_btw_wps * 1.2
next_wps[wp_rel_idx].twist.twist.linear.x = min(current_wp_spd, expected_wp_spd)
return next_wps
|
ec882fe3e7b8af672dbbcd0d6967420b9ab291c2
| 17,471
|
def bawl2(text):
"""
Return text in the following format.
t e x t .
e e
x x
t t
. .
"""
return ' '.join(text) + ''.join('\n' + char + ' '*(2*idx + 1) + char
for idx, char in enumerate(text[1:]))
|
887006a1cb18970ef9889b1b8f1d61ca4aaa69ed
| 17,472
|
def total_messages(s):
"""
Finds the total number of messages sent from an activity dataframe that show messages sent per date.
Takes a pandas.Series where the columns are the dates (and a couple of calculated columns which will be ignored for the purposes of the calculation) and the cells are the number of messages sent on each date. This does not ignore but instead adds up an existing 'Total Messages' column.
"""
just_dates = s.drop(['Earliest Date', 'Latest Date', 'Tenure'], errors='ignore')
return just_dates.sum()
|
f05244236269fddd24bb80b32534b92ddbd0fcea
| 17,473
|
import json
def read_control_file(control_file):
"""Check if there is a control file in order to read in info to be used for the Viewer"""
x_axis_label = None
y_axis_label = None
try:
with open(control_file) as data_file:
data = json.load(data_file)
for c in data['model_info']:
x_axis_label = c["x_axis_label"]
y_axis_label = c["y_axis_label"]
except OSError:
x_axis_label = None
y_axis_label = None
return x_axis_label, y_axis_label
|
a4090ab7cfe89431c38f2697c86812e11ee59904
| 17,474
|
def toDataRange(size, r = all):
"""Converts range r to numeric range (min,max) given the full array size
Arguments:
size (tuple): source data size
r (tuple or all): range specification, ``all`` is full range
Returns:
tuple: absolute range as pair of integers
See Also:
:func:`toDataSize`, :func:`dataSizeFromDataRange`
"""
if r is all:
return (0,size)
if isinstance(r, int) or isinstance(r, float):
r = (r, r +1)
if r[0] is all:
r = (0, r[1])
if r[0] < 0:
if -r[0] > size:
r = (0, r[1])
else:
r = (size + r[0], r[1])
if r[0] > size:
r = (size, r[1])
if r[1] is all:
r = (r[0], size)
if r[1] < 0:
if -r[1] > size:
r = (r[0], 0)
else:
r = (r[0], size + r[1])
if r[1] > size:
r = (r[0], size)
if r[0] > r[1]:
r = (r[0], r[0])
return r
|
fa4a872ac1ca04e94bedfbca1466727e42fd1f39
| 17,475
|
from typing import List
def check_interval_coverage(intvl_upper_limit: int, sub_intvl_positions: List[int], range_length: int = 1) -> bool:
"""
Method that checks if given sub-intervals are correctly situated to cover all the original segment.
:param intvl_upper_limit: upper bound of the interval
:param sub_intvl_positions: middle point positions of the sub-intervals
:param range_length: range of the sub-interval from the middle point
:return: True is the solution is valid, false otherwise
"""
if intvl_upper_limit == 0:
raise ValueError('The upper limit of the interval has to be bigger than 0')
covered = 0
for position in sub_intvl_positions:
lower_bound = position - range_length
upper_bound = position + range_length
if lower_bound <= covered < upper_bound:
covered = upper_bound
if covered >= intvl_upper_limit:
return True
else:
return False
|
cfaa5d60d6caf59edade474946e2b5b0a7b825f1
| 17,476
|
from functools import reduce
import sys
def getCommandUsed():
"""Returns the command-line contents used."""
return reduce(lambda str1, str2: str1 + " " + str2, sys.argv)
|
edf8f4b9fba71101a301a80d4faa05407a6d4fc8
| 17,477
|
import os
def mk_path(path, check=True):
"""
Makes directory at path if none exists.
Parameters
------
path: str
Directory path.
check: bool
Confirm that new directory be created.
Returns
------
path: str
Created directory path.
"""
if os.path.exists(path):
return path
elif check or input("\nRequested directory does not exist:\n\t{}"
"\n\nCreate? (Enter \'Y\' or \'y\' for yes): ".format(path)) in ['Y', 'y']:
os.makedirs(path)
print('\nDirectory created:\n\t{}.'.format(path))
return path
else:
print('Application stopped.')
exit(0)
|
14507410a8b80c228b191a3e7a94b2bfdf2f468f
| 17,478
|
def puntoDeRocio(hr, t):
"""
Calcula el punto de rocio
param: hr: humedad relativa
param: t: temperatura ambiente
"""
pr = (hr / 100.0)**(1/8.0) * (112 + 0.9 * t) + (0.1 * t) - 112
return pr
|
a1b5830e86f9b8d6835c6a4fac6ba18b48716c0c
| 17,481
|
def python_3000_backticks(logical_line):
"""
Backticks are removed in Python 3000.
Use repr() instead.
"""
pos = logical_line.find('`')
if pos > -1:
return pos, "W604 backticks are deprecated, use 'repr()'"
|
2c25077b71bc6b10dca0fb168c711ecdcffeab14
| 17,482
|
def ticklabel_format(value):
"""
Pick formatter for ytick labels. If possible, just print out the
value with the same precision as the branch value. If that doesn't
fit, switch to scientific format.
"""
bvs = str(value)
if len(bvs) < 7:
fp = len(bvs) - (bvs.index(".") + 1) if "." in bvs else 0
return f"%.{fp}f"
else:
return "%.1e"
|
7de96c52c527a5295d7ca6e832313386fd80564c
| 17,483
|
def transfer_function_Rec1886_to_linear(v):
"""
The Rec.1886 transfer function.
Parameters
----------
v : float
The normalized value to pass through the function.
Returns
-------
float
A converted value.
"""
g = 2.4
Lw = 1
Lb = 0
# Ignoring legal to full scaling for now.
# v = (1023.0*v - 64.0)/876.0
t = pow(Lw, 1.0 / g) - pow(Lb, 1.0 / g)
a = pow(t, g)
b = pow(Lb, 1.0 / g) / t
return a * pow(max((v + b), 0.0), g)
|
7a2a2a2348d701e7fd7dd90c5d016dbf8a2e0c56
| 17,484
|
def gen_apsubset(AP, intrep):
"""Generate set of atomic propositions corresponding to integer
>>> gen_apsubset(AP=("p", "q"), intrep=2)
set(['q'])
"""
return set([AP[i] for i in range(len(AP)) if ((intrep >> i) & 1) != 0])
|
ab219b40c0eda5a0eef4f657f8b06da0f5d782d8
| 17,485
|
def does_classes_contain_private_method(classes, method):
"""
Check if at least one of provided classes contains a method.
If one of the classes contains the method and this method has private access level, return true and class
that contains the method.
"""
for class_ in classes:
if hasattr(class_, method.__name__):
if getattr(class_, method.__name__).__name__ in 'private_wrapper':
return True, class_
return False, None
|
544bd9c5c3f03352ab8674b2665eb583328ac437
| 17,486
|
import json
def read_config(path):
""" Function read json format config and return dict """
with open(path,'r') as cfg:
return json.load(cfg)
|
c21c075c551c8ad059eb397d453a994083a80237
| 17,487
|
def res_permission_denied():
"""
Default response for permission denied exception.
"""
return {
"type": "authentication_error",
"code": "permission_denied",
"detail": "You do not have permission to perform this action.",
"attr": None,
}
|
ba77608f8e16c86ca55da74c0e732c160ea1ae86
| 17,488
|
import re
def parse_fatal_stacktrace(text):
"""Get useful information from a fatal faulthandler stacktrace.
Args:
text: The text to parse.
Return:
A tuple with the first element being the error type, and the second
element being the first stacktrace frame.
"""
lines = [
r'(?P<type>Fatal Python error|Windows fatal exception): (?P<msg>.*)',
r' *',
r'(Current )?[Tt]hread [^ ]* \(most recent call first\): *',
r' File ".*", line \d+ in (?P<func>.*)',
]
m = re.search('\n'.join(lines), text)
if m is None:
# We got some invalid text.
return ('', '')
else:
msg = m.group('msg')
typ = m.group('type')
func = m.group('func')
if typ == 'Windows fatal exception':
msg = 'Windows ' + msg
return msg, func
|
20d26d3e0d69b5fd3bba1fbf1dbe6877b58b125a
| 17,489
|
from typing import Dict
def dict_sort(d: dict, key=lambda item: item[1]) -> Dict:
"""sort a dictionary items"""
return {k: v for k, v in sorted(d.items(), key=key)}
|
3686867a4b302fc9d9a5014b3cdadccbc8175d39
| 17,490
|
import json
def json_loads():
"""Return json loads."""
return json.loads
|
8e54f1263e65330e780326dbf57af983013fa016
| 17,492
|
import re
def extract_variables(sFormula):
""" Extract variables in expression, e.g. {a}*x + {b} -> ['a','b']
The variables are replaced with p[0],..,p[n] in order of appearance
"""
regex = r"\{(.*?)\}"
matches = re.finditer(regex, sFormula, re.DOTALL)
formula_eval=sFormula
variables=[]
ivar=0
for i, match in enumerate(matches):
for groupNum in range(0, len(match.groups())):
var = match.group(1)
if var not in variables:
variables.append(var)
formula_eval = formula_eval.replace('{'+match.group(1)+'}','p[{:d}]'.format(ivar))
ivar+=1
return variables, formula_eval
|
7ae5b836504876c815b15c87bad774334fd4dd80
| 17,493
|
def select_files(files, search):
"""Select files based on a search term of interest.
Parameters
----------
files : list of str
File list.
search : str
String to use to keep files.
Returns
-------
list of str
File list with selected files kept.
"""
return [file for file in files if search in file]
|
91fc2e08645c349b14425b5f6e86c38906156601
| 17,496
|
def _index_list_of_dict_by_key(seq, key):
""" If you need to fetch repeatedly from name, you should index them by name (using a dictionary), this way get
operations would be O(1) time. An idea:
Source: https://stackoverflow.com/a/4391722/4562156
:param seq: list of dictionaries
:param key: key to index each dictionary within seq on
:return: indexed list of dictionaries by `key`
"""
return {d[key]: {**d, "index": idx} for (idx, d) in enumerate(seq)}
# return dict((d[key], dict(d, index=index)) for (index, d) in enumerate(seq))
|
a9eeea491997fa7d714db48c3e6e38c1fb5b6e78
| 17,497
|
def get_child_2_direct_parent_dict_RCTM(fn_in):
"""
child_2_parent_dict --> child 2 direct parents
R-BTA-109581 R-BTA-109606
R-BTA-109581 R-BTA-169911
R-BTA-109581 R-BTA-5357769
"""
child_2_parent_dict = {}
with open(fn_in, "r") as fh_in:
for line in fh_in:
parent, child = line.split("\t")
child = child.strip()
if child.startswith("R-"):
child = child[2:]
if parent.startswith("R-"):
parent = parent[2:]
if child not in child_2_parent_dict:
child_2_parent_dict[child] = {parent}
else:
child_2_parent_dict[child] |= {parent}
return child_2_parent_dict
|
73b7e148fa3ad6faf604560a61dc93a4e696ea4b
| 17,498
|
import uuid
def _zset_common(conn, method, scores, ttl=30, **kw):
"""
_zset_common
@param conn:
@param method:
@param scores:
@param ttl:
@param kw:
@return:
"""
id = str(uuid.uuid4())
execute = kw.pop('_execute', True)
pipeline = conn.pipeline(True) if execute else conn
for key in list(scores.keys()):
scores['idx:' + key] = scores.pop(key)
getattr(pipeline, method)('idx:' + id, scores, **kw)
pipeline.expire('idx:' + id, ttl)
if execute:
pipeline.execute()
return id
|
90633fc7bc370129616512ace23b9f918ba0738e
| 17,500
|
def file_writer(filename):
"""
Open a file for writing.
Args:
filename: (string) the name of the path/file to open for writing.
Returns:
file object.
Raises:
IOError: If filename is not writeable.
"""
try:
fileobj = open(filename, 'w')
except IOError as e:
raise IOError("{0}: {1}".format(e.strerror, filename))
return fileobj
|
c04e684b5cb35c8442d4a75762d63c0974745b9c
| 17,501
|
def read_keyfile(keyfile):
""" Reads a file pairing labels and values"""
keys = {}
with open(keyfile,"r") as kf:
for line in kf:
if line and len(line) > 2:
key, val = line.strip().split()
keys[key] = float(val)
return keys
|
23b8d1df9fd4a9019ab62f477376e6505f176382
| 17,502
|
import os
def infer_result_name(input_name):
"""Calculate output name for annotated paper"""
nameroot, _ = os.path.splitext(input_name)
newpath = nameroot + "_annotated.xml"
return newpath
|
2147f07c0d01b1c535aead688e0d2e95636d2048
| 17,503
|
def change_ttw(x):
"""Math function which creates next time to wait (ttw)
Args:
x (int): number of checks
Returns:
time to wait
"""
if x < 15:
return 0
elif x > 42:
return 120
return 1.2 ** (x - 15)
|
0a51012cac84ffdf25b762a71c5fa1dd2aa4a291
| 17,505
|
import re
def slugify(
string,
*,
lowercase=True,
whitespace=False,
whitespace_replacement='-',
):
"""
Converts the given string into a "slug" which can be safely used in a directory name.
This function is naive, and doesn't handle unicode or anything special.
If we need that in the future, consider something like python-slugify.
"""
slug = string
if lowercase:
slug = slug.lower()
slug = re.sub(r"[^\w\s]", '', slug)
if not whitespace:
slug = re.sub(r"\s+", whitespace_replacement, slug)
return slug
|
d3ba395b4bdc29b8c0e98cfecfa4bb2ce8e0eca5
| 17,506
|
def doAddition(a,b):
"""
The function doAddition accecpts two integer numbers and returns the sum of it.
"""
return a+b
|
c4afcdab6c5e4570eff848b2f6a0aa07713f0dda
| 17,507
|
def get_binsize(bins):
"""
Infer bin size from a bin DataFrame. Assumes that the last bin of each
contig is allowed to differ in size from the rest.
Returns
-------
int or None if bins are non-uniform
"""
sizes = set()
for chrom, group in bins.groupby("chrom"):
sizes.update((group["end"] - group["start"]).iloc[:-1].unique())
if len(sizes) > 1:
return None
if len(sizes) == 1:
return next(iter(sizes))
else:
return None
|
cd8a7127083a24bc24f79be4aa8225884efa643a
| 17,508
|
def extend_displace(bin_str):
"""
扩展置换(E 盒置换)
:param bin_str: 32 位密文
:return: 置换后的 32 位密文
"""
if len(bin_str) != 32:
raise ValueError("二进制字符串长度必须是 32")
displace_table = [2, 1, 2, 3, 4, 5,
4, 5, 6, 7, 8, 9,
8, 9, 10, 11, 12, 13,
12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21,
20, 21, 22, 23, 24, 25,
24, 25, 26, 27, 28, 29,
28, 29, 30, 31, 32, 1]
re_bin = ""
for i in displace_table:
re_bin += bin_str[i - 1]
return re_bin
|
b087adfd0703763ce9301e66250cb8193e1bc752
| 17,509
|
def find_max(link_dict):
"""
Finding the maximum of a hash table's values is an O(N) operation, where N is the number of keys already present
in the hash table <link_dict>. However, because we are only computing this value once between program runs,
and in fact not even once for a one-time run, there is no need to optimize efficiency via a (for example) max-heap.
"""
if link_dict:
max_value = max(link_dict.values())
else:
max_value = 0
return max_value
|
21dbddad9096e7f82e89ca72afdcb9c47fdfc03c
| 17,510
|
def _replace_revision(raw_header: bytes, revision: bytes) -> bytes:
"""Replace the 'revision' field in a raw header."""
return raw_header[:8] + revision + raw_header[8 + 4 :]
|
a9c251528ae7e374c815db5b83ce4fbd164a7afd
| 17,511
|
def keyword_list(value):
"""Ensure keywords are treated as lists"""
if isinstance(value, list): # list already
return value
else: # csv string
return value.split(',')
|
9ab8f75fed9d85164d2b450da2c2fcdfc6e070c1
| 17,512
|
from bs4 import BeautifulSoup
def standardize_html(html):
"""Clean and format html for consistency."""
cleaned = html.replace(". ", ". ").replace(" ", "").replace("\n", "")
parsed = BeautifulSoup(cleaned, "lxml").prettify().strip()
return parsed
|
fd1d60f97ae7de313acb43fb327dee6864324109
| 17,514
|
import hashlib
def get_model_hash(rvt_model_path):
"""
Creates a hash of provided rvt model file
:param rvt_model_path:
:return: hash string
"""
BLOCKSIZE = 65536
hasher = hashlib.sha256()
with open(rvt_model_path, "rb") as rvt:
buf = rvt.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = rvt.read(BLOCKSIZE)
return hasher.hexdigest()
|
1ee0f2935112eda8729df84e41b1d0fee2d224a1
| 17,515
|
from typing import Tuple
from typing import List
def _check_dissipator_lists(gammas, lindblad_operators) -> Tuple[List, List]:
"""Check gammas and lindblad operators are lists of equal length."""
if gammas is None:
gammas = []
if lindblad_operators is None:
lindblad_operators = []
assert isinstance(gammas, list), \
"Argument `gammas` must be a list)]."
assert isinstance(lindblad_operators, list), \
"Argument `lindblad_operators` must be a list."
assert len(gammas) == len(lindblad_operators), \
"Lists `gammas` and `lindblad_operators` must have the same length."
return gammas, lindblad_operators
|
4071be282667b6c688cffdf28cd3a5e4ce8f6dbf
| 17,517
|
def steps(current, target, max_steps):
""" Steps between two values.
:param current: Current value (0.0-1.0).
:param target: Target value (0.0-1.0).
:param max_steps: Maximum number of steps.
"""
if current < 0 or current > 1.0:
raise ValueError("current value %s is out of bounds (0.0-1.0)", current)
if target < 0 or target > 1.0:
raise ValueError("target value %s is out of bounds (0.0-1.0)", target)
return int(abs((current * max_steps) - (target * max_steps)))
|
0287efec583bfb8c37907a34ca5adf7c0aa61886
| 17,519
|
def findUniqueNumber(n: int):
"""
Given a set of numbers present twice except the unique number, it finds the unique number among the set of numbers.
:param n : Size of the input
:return : The unique integer
"""
ans = 0
vec = [int(x) for x in input("Enter set of numbers separated by space\n").split(' ')]
for i in range(n):
ans = ans ^ vec[i]
return ans
|
bf9a64bf0474a354dec8f0b891a6f0134aa53c73
| 17,520
|
def parse_quantities(root):
"""Returns a dictionary of quanitity classes."""
quantities = {}
for node in root.find("{*}quantityClassSet"):
name = node.find('{*}name').text
quantities[name] = dict(
dimension = node.find('{*}dimension').text,
baseForConversion = node.find('{*}baseForConversion').text,
)
alt = node.find('{*}alternativeBase')
if alt is not None:
quantities[name]['alternativeBase'] = alt.text
members = []
for member in node.findall('{*}memberUnit'):
members.append(member.text)
if members:
quantities[name]["members"] = members
return quantities
|
7d00c869da739889bf5372102ad3c6448a8f7689
| 17,522
|
def get_coords_animal(position, animal):
"""
Retourne toutes les coordonnes de l'animal
a parti de la position du coin en bas a gauche
"""
lvects = animal[2]
lpos = []
for v in lvects:
lpos.append((position[0]+v[0], position[1] + v[1]))
return lpos
|
472358bc2fc1e73928ad6121bc9cc5cc18e3426c
| 17,523
|
from typing import Any
def expand_to_tuple(item: Any) -> tuple[Any, ...]:
"""Wraps anything but tuple into a tuple.
Args:
item: any sequence or a single item.
Returns:
a tuple.
>>> from redex import util
>>> util.expand_to_tuple((1,))
(1,)
>>> util.expand_to_tuple((1,2))
(1, 2)
"""
return item if isinstance(item, tuple) else (item,)
|
7f7f072759ec4b5e493d5f55678f853eb99fe765
| 17,524
|
def get_loc_exit_data():
"""
Get location-exit data.
Generate data to test turning a list of agent locations and a list of agent
destination gate numbers into a state vector.
"""
agent_locations1 = [(1, 2), (3, 4), (5, 6)]
agent_locations2 = [(20, 10), (45, 80), (99, 32)]
exits1 = [0, 3, 5]
exits2 = [2, 8, 4]
expected1 = [1, 3, 5, 2, 4, 6, 0, 3, 5]
expected2 = [20, 45, 99, 10, 80, 32, 2, 8, 4]
loc_exit_data = [(agent_locations1, exits1, expected1),
(agent_locations2, exits2, expected2)]
return loc_exit_data
|
77727a4f824f9272db10a3408e4c3d47ef6fe6db
| 17,525
|
def c_string_literal(env, string):
"""
Escapes string and adds quotes.
"""
# Warning: Order Matters! Replace '\\' first!
e = [("\\", "\\\\"), ("\'", "\\\'"), ("\"", "\\\""), ("\t", "\\t"), ("\n", "\\n"), ("\f", ""), ("\r", "")]
for r in e:
string = string.replace(r[0], r[1])
return "\"" + string + "\""
|
3906144e5a125d6d6620d7645378bab3265f8217
| 17,526
|
def is_third_friday(dt):
"""
判断是否为第三个周五(股指期货交割日,节假日除外)
参数:
dt: datetime格式的数据
示例:dt = datetime.datetime(2015, 4, 5)
"""
return dt.weekday() == 4 and 14 < dt.day < 22
|
8f57218b14156e40606357bdb4b0ba39bf8cc8cc
| 17,527
|
def add_zeros(x, length=4):
"""Zero pad x."""
strx = str(x)
lenx = len(strx)
diff = length - lenx
for _ in range(diff):
strx = '0' + strx
return strx
|
ed46c2005dec2324229629341309148fecd7c14f
| 17,529
|
def __filter_dict(state_dict, props, filters):
""" Filters a state dictionary to only a dict that contains props for all
objects that adhere to the filters. A filter is a combination of a
property and value."""
def find(obj_dict_pair):
# Get the object properties
obj_dict = obj_dict_pair[1]
# Check if all the desirable properties are in the object dict
if not all([p in obj_dict.keys() for p in props]):
return None
# Check if any filter property is present, if so check its value
def filter_applies(filter_):
filter_prop = filter_[0]
filter_val = filter_[1]
if filter_prop in obj_dict.keys():
return filter_val == obj_dict[filter_prop] \
or filter_val in obj_dict[filter_prop]
else:
return False # if filter is not present, we return False
# If filters are given, go over each filter to see if it applies
if filters is not None:
filter_results = map(filter_applies, filters.items())
# Check if all filters are either not applicable or return true
applies = all(filter_results)
if applies is False: # if it does not adhere to the filters
return None
# Filter the dict to only the required properties
new_dict = {p: obj_dict[p] for p in props}
# Return the new tuple
return obj_dict_pair[0], new_dict
# Map our find method to all state objects
filtered_objects = map(find, state_dict.items())
# Extract all state objects that have the required properties and adhere to
# the filters
objects = [obj_dict_pair for obj_dict_pair in filtered_objects
if obj_dict_pair is not None]
# Transform back to dict
objects = {obj_id: obj_dict for obj_id, obj_dict in objects}
# Return
return objects
|
942f250251a6d7ad8db9b5c4a81e69c050279f50
| 17,530
|
import re
def get_component_version():
"""Get component version from the first line of License file"""
component_version = 'NULL'
with open('/THIRD-PARTY-LICENSES.txt', 'r') as license_file:
version_match = re.search('Amazon SageMaker Components for Kubeflow Pipelines; version (([0-9]+[.])+[0-9]+)',
license_file.readline())
if version_match is not None:
component_version = version_match.group(1)
return component_version
|
cbce752d59d40cca6022bce454295c2bb7a38a01
| 17,533
|
from typing import List
import csv
def read_data(file_path: str = "CBETHUSD.csv") -> List[float]:
"""read the data from file_path"""
raw = []
with open(file_path) as data:
reader = csv.reader(data)
next(reader)
for row in reader:
temp = float(row[1])
raw.append(temp)
return raw
|
9cb2f30de55e91626b505c9c49596b1972b0551f
| 17,534
|
def get_plural(val_list):
""" Get Plural: Helper function to return 's' if a list has more than one (1)
element, otherwise returns ''.
Returns:
str: String of 's' if the length of val_list is greater than 1, otherwise ''.
"""
return 's' if len(val_list) > 1 else ''
|
b86387cb2abd3c5176f5dcefbf7bc3e1a49a346c
| 17,535
|
def remove_binding(name: str) -> dict:
"""This method does not remove binding function from global object but
unsubscribes current runtime agent from Runtime.bindingCalled notifications.
Parameters
----------
name: str
**Experimental**
"""
return {"method": "Runtime.removeBinding", "params": {"name": name}}
|
783765114437c831e9625380acb83de5458137c8
| 17,536
|
def _is_variable_argument(argument_name):
"""Return True if the argument is a runtime variable, and False otherwise."""
return argument_name.startswith('$')
|
cf74d6dfd1c0ea1b560bf3766e2fac8af1dbafda
| 17,537
|
def vote_smart_candidate_object_filter(one_candidate):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param one_candidate:
:return:
"""
one_candidate_filtered = {
'candidateId': one_candidate.candidateId,
'firstName': one_candidate.firstName,
'nickName': one_candidate.nickName,
'middleName': one_candidate.middleName,
'preferredName': one_candidate.preferredName,
'lastName': one_candidate.lastName,
'suffix': one_candidate.suffix,
'title': one_candidate.title,
'ballotName': one_candidate.ballotName,
'electionParties': one_candidate.electionParties,
'electionStatus': one_candidate.electionStatus,
'electionStage': one_candidate.electionStage,
'electionDistrictId': one_candidate.electionDistrictId,
'electionDistrictName': one_candidate.electionDistrictName,
'electionOffice': one_candidate.electionOffice,
'electionOfficeId': one_candidate.electionOfficeId,
'electionStateId': one_candidate.electionStateId,
'electionOfficeTypeId': one_candidate.electionOfficeTypeId,
'electionYear': one_candidate.electionYear,
'electionSpecial': one_candidate.electionSpecial,
'electionDate': one_candidate.electionDate,
'officeParties': one_candidate.officeParties,
'officeStatus': one_candidate.officeStatus,
'officeDistrictId': one_candidate.officeDistrictId,
'officeDistrictName': one_candidate.officeDistrictName,
'officeStateId': one_candidate.officeStateId,
'officeId': one_candidate.officeId,
'officeName': one_candidate.officeName,
'officeTypeId': one_candidate.officeTypeId,
'runningMateId': one_candidate.runningMateId,
'runningMateName': one_candidate.runningMateName,
}
return one_candidate_filtered
|
56fbbe3c2f128364d800d05fe6690eedb9f5f748
| 17,539
|
def BLA_index(i, stg):
"""
Return the indices in BVA table for this iteration and stage
this is the jump from i to j = i + (1 << stg)
"""
return (2 * i) + ((1 << stg) - 1)
|
1016c416d3bb3b58c4aa5145917f53ba825a1fbd
| 17,540
|
def make_range(clusters, extend=0):
"""
Convert to interval ends from a list of anchors
extend modifies the xmax, ymax boundary of the box,
which can be positive or negative
very useful when we want to make the range as fuzzy as we specify
"""
eclusters = []
for cluster in clusters:
xlist, ylist, scores = zip(*cluster)
score = sum(scores)
xchr, xmin = min(xlist)
xchr, xmax = max(xlist)
ychr, ymin = min(ylist)
ychr, ymax = max(ylist)
# allow fuzziness to the boundary
xmax += extend
ymax += extend
# because extend can be negative values, we don't want it to be less than min
if xmax < xmin: xmin, xmax = xmax, xmin
if ymax < ymin: ymin, ymax = ymax, ymin
#if xmax < xmin: xmax = xmin
#if ymax < ymin: ymax = ymin
eclusters.append(((xchr, xmin, xmax),\
(ychr, ymin, ymax), score))
return eclusters
|
3baad1df3038f09e8998e820669c47f166d0c21f
| 17,541
|
from pathlib import Path
def testr_001_path():
"""Path to the TESTR-001 report repository.
"""
path = Path(__file__).parent / 'TESTR-001'
return path.resolve()
|
71ee4fe1358681a716b3490483a18746622b9229
| 17,542
|
def get_capability(capabilities, capability_name):
"""Search a set of capabilities for a specific one."""
for capability in capabilities:
if capability["interface"] == capability_name:
return capability
return None
|
4d83ec53a06b75313a47ea3ba618161ecd5f3782
| 17,543
|
import torch
def mpjae(predicted, target):
"""
Mean per-joint angle error (3d bone vector angle error between gt and predicted one)
"""
assert predicted.shape == target.shape # [B,T, K]
joint_error = torch.mean(torch.abs(predicted - target).cuda(), dim=0) # Calculate each joint angle
print('each bone angle error:', joint_error)
return torch.mean(joint_error)
|
07e2cadb9b38c39514558791b3d5c605a19a0dc4
| 17,545
|
def get_ciphers(fw_conn, service):
"""Get Ciphers
Args:
fw_conn (PanDevice): A panos object for device
service (str): A string containing either mgmt or ha for ciphers
Returns:
results (Element): XML results from firewall
"""
base_xpath = ("/config/devices/entry[@name='localhost.localdomain']"
"/deviceconfig/system/ssh/ciphers/{}".format(service))
results = fw_conn.xapi.get(xpath=base_xpath)
return results
|
2fe9837c884d1257afb48982721c7e273ddf7fc9
| 17,548
|
def mergeSort(myList: list):
"""
Sorts the entire list by creating smallers sublist and re-combines them. Divide and conquer!
"""
if (len(myList) > 1):
middle = len(myList)//2
leftList = myList[:middle]
rightList = myList[middle:]
print(leftList, '*'*5, rightList)
# Recursion
mergeSort(leftList)
mergeSort(rightList)
# Iterators to cycle through the two sublists.
i = 0
j = 0
# main list iterator
k = 0
while i < len(leftList) and j < len(rightList):
if leftList[i] < rightList[j]:
myList[k] = leftList[i]
i += 1
else:
myList[k] = rightList[j]
j += 1
k += 1
while i < len(leftList):
myList[k] = leftList[i]
i += 1
k += 1
while j < len(rightList):
myList[k] = rightList[j]
j += 1
k += 1
# To show step by step the making of the final list.
print(f'Left List: {leftList}, Right List {rightList}')
print(myList)
print('-'*10)
return myList
|
79c5e41df11c1f692891e02cb72f148abecb776f
| 17,549
|
def stdout_to_list(stdout):
"""Convert stdout (str) to list of stripped strings"""
return [x.strip() for x in stdout.split('\n') if x.strip()]
|
ec641d29201bbfc60a083952daf0b9e696b786dc
| 17,550
|
def straight(start, dice):
"""
Score the dice based on rules for LITTLE_STRAIGHT or BIG_STRAIGHT.
"""
dice.sort()
for die in dice:
if die != start:
return 0
start += 1
return 30
|
37062c8ac40f7a1fbea6f7afdfc219acc66c0df7
| 17,551
|
def Cumfreq(symbol, dictionary):
"""
This Function Takes as inputs a symbol and a dictionary containing
all the symbols that exists in our stream and their frequencies
and returns the cumulative frequency starting from the very
beginning of the Dictionary until that Symbol.
Arguments:
symbol {[type]} -- [the symbol we want to get the cumulative frequency for]
dictionary {[type]} -- [the dictionary that contains the frequency of all symbols]
Returns:
p int -- [the upper bound ,which is the sum of all frequencies from the beginning of the dictionary till the specified symbol]
"""
P = 0
for sym in dictionary:
P += dictionary[sym]
if sym == symbol:
break
return P
|
b5dffb0512b4704bd76c49eac4bb3e9714ebbcb1
| 17,552
|
def parseCoords(coordList):
"""
Pass in a list of values from <coordinate> elements
Return a list of (longitude, latitude, altitude) tuples
forming the road geometry
"""
def parseCoordGroup(coordGroupStr):
"""
This looks for <coordinates> that form the road geometry, and
then parses them into (longitude, latitude, altitude). Altitude
is always 0.
If the coordinate string is just the coordinates of a place, then
return the empty list
"""
#print "coordGroupStr:", coordGroupStr
coords = coordGroupStr.strip().split(" ")
if len(coords) > 3:
coords = map(lambda x: x.split(","), coords)
coords = map(lambda x: tuple(map(float, x)), coords)
coords = map(lambda x: (x[1], x[0]), coords)
#print 'returning:', coords
return coords
else:
return []
ret = []
#print "coordList:", coordList
for coordGroup in coordList:
ret += parseCoordGroup(coordGroup)
return ret
|
7c358973fc4279e03cf8d535ff41de7571f0a7d2
| 17,554
|
import re
def _make_script_portable(source: str) -> str:
"""Parse a shell script and get rid of the machine-specific parts that
import.py introduces. The resulting script must be run in an environment
that has the right binaries in its $PATH, and with a current working
directory similar to where import.py found its target's make root."""
lines = []
for line in source.split("\n"):
if re.match("cd '?/", line):
# Skip cd's to absolute directory paths. Note that shlex quotes
# its argument with ' if it contains spaces/single quotes.
continue
if re.match("'?/", line):
quote = "'" if line[0] == "'" else ""
ind = line.find(quote + " ")
if ind == -1:
ind = len(line)
else:
ind += len(quote)
lastind = line.rfind("/", 0, ind)
assert lastind != -1
# Emit a call to "which" as the first part, to ensure the called
# binary still sees an absolute path. qemu-irix requires this,
# for some reason.
line = "$(which " + quote + line[lastind + 1 : ind] + ")" + line[ind:]
lines.append(line)
return "\n".join(lines)
|
ec55f23f169798afbae6f5c891be9fd2272850dd
| 17,555
|
def _get_block_device_mappings(ec2_client, ami_id):
"""Returns the list of block device mappings to ensure cleanup.
This list sets connected EBS volumes to be deleted when the EC2
instance is terminated.
"""
# Not all devices use EBS, but the default value for DeleteOnTermination
# when the device does use EBS is true. See:
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-mapping.html
# * https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html
return [{'DeviceName': mapping['DeviceName'],
'Ebs': {'DeleteOnTermination': True}}
for mapping in ec2_client.Image(ami_id).block_device_mappings
if not mapping.get('Ebs', {}).get('DeleteOnTermination', True)]
|
e2bcae07479b47074c1d78d5a1b0a4e4919ef09b
| 17,556
|
def get_built_ins():
"""
Get the list of built-in keywords by parsing the R command output in 'builtin.log'
:return: List of built-in keywords
:rtype: List
"""
result = []
with open('builtin.log') as f:
for line in f:
flist = line.split()
result.append(flist[1].replace('\"', ''))
result.append(flist[2].replace('\"', ''))
return result
|
5041578a4eeede150d1ab081fe3500731e26272f
| 17,557
|
def get_foreignkeys():
"""
Query string for returning foreign keys.
:return: string
"""
return """
SELECT
p.owner AS referenced_table_schema,
p.table_name AS referenced_table_name,
ccp.column_name AS referenced_column_name,
c.owner AS fk_table_schema,
c.table_name AS fk_table_name,
ccc.COLUMN_NAME AS fk_column_name
FROM all_constraints c,
all_constraints p,
all_cons_columns ccp,
all_cons_columns ccc
WHERE c.constraint_type ='R'
AND c.r_constraint_name = p.constraint_name
AND p.constraint_type IN ('P')
and c.constraint_name = ccc.constraint_name
and p.constraint_name = ccp.constraint_name"""
|
06cafdd881897a9cd2d879cef0a9f1ff44235688
| 17,559
|
from datetime import datetime
def datetime_from_salesforce(d):
"""Create a Python datetime from a Salesforce-style ISO8601 string"""
return datetime.strptime(d, "%Y-%m-%dT%H:%M:%S.%f%z")
|
ca9cbeb5dff44860166a27c771da1cdb8f3d395a
| 17,560
|
def listGetShiftedGeometricMean(listofnumbers, shiftby=10.0):
""" Return the shifted geometric mean of a list of numbers, where the additional shift defaults to
10.0 and can be set via shiftby
"""
geommean = 1.0
nitems = 0
for number in listofnumbers:
nitems = nitems + 1
nextnumber = number + shiftby
geommean = pow(geommean, (nitems - 1) / float(nitems)) * pow(nextnumber, 1 / float(nitems))
return geommean - shiftby
|
904bb38a199052b086b7a2c695ce675011f65019
| 17,561
|
import ctypes
def make_multiplier(block, multiplier):
"""JIT-compiles a function that multiplies its RDX argument with an
unsigned 64-bit constant."""
if multiplier > (2**64-1) or multiplier < 0:
raise ValueError("Multiplier does not fit in unsigned 64-bit integer")
# This function encodes the disassembly of multiply.c, which you can see
# with the command `make dis`. It may be different on your CPU, so adjust
# to match.
#
# 48 b8 ed ef be ad de movabs $0xdeadbeefed,%rax
# 00 00 00
# 48 0f af c7 imul %rdi,%rax
# c3 retq
# Encoding of: movabs <multiplier>, rax
block[0] = 0x48
block[1] = 0xb8
# Little-endian encoding of multiplier
block[2] = (multiplier & 0x00000000000000ff) >> 0
block[3] = (multiplier & 0x000000000000ff00) >> 8
block[4] = (multiplier & 0x0000000000ff0000) >> 16
block[5] = (multiplier & 0x00000000ff000000) >> 24
block[6] = (multiplier & 0x000000ff00000000) >> 32
block[7] = (multiplier & 0x0000ff0000000000) >> 40
block[8] = (multiplier & 0x00ff000000000000) >> 48
block[9] = (multiplier & 0xff00000000000000) >> 56
# Encoding of: imul rdi, rax
block[10] = 0x48
block[11] = 0x0f
block[12] = 0xaf
block[13] = 0xc7
# Encoding of: retq
block[14] = 0xc3
# Return a ctypes function with the right prototype
function = ctypes.CFUNCTYPE(ctypes.c_uint64)
function.restype = ctypes.c_uint64
return function
|
b48c0db5635d6d12c215fcfee0f3e9f043132641
| 17,563
|
def similarity_hash(hash_digests):
# type: (list[bytes]) -> bytes
"""
Creates a similarity preserving hash from a sequence of equal sized hash digests.
:param list hash_digests: A sequence of equaly sized byte-hashes.
:returns: Similarity byte-hash
:rtype: bytes
"""
n_bytes = len(hash_digests[0])
n_bits = n_bytes * 8
vector = [0] * n_bits
for digest in hash_digests:
h = int.from_bytes(digest, "big", signed=False)
for i in range(n_bits):
vector[i] += h & 1
h >>= 1
minfeatures = len(hash_digests) * 1.0 / 2
shash = 0
for i in range(n_bits):
shash |= int(vector[i] >= minfeatures) << i
return shash.to_bytes(n_bytes, "big", signed=False)
|
804619477bdea3f7a79c6473d47e55c5106a586a
| 17,564
|
import binascii
def mkauth(username: str, password: str, scheme: str = "basic") -> str:
"""
Craft a basic auth string
"""
v = binascii.b2a_base64(
(username + ":" + password).encode("utf8")
).decode("ascii")
return scheme + " " + v
|
b9dd22c830e8c493ac4239ff6f4800e554d1e439
| 17,565
|
def eta_Mc_M(Mc, M):
"""
Computes the symmetric-mass-ratio from the Chirp Mass and
total mass
input: Mc, M
output: eta
"""
return (Mc/M)**(5./3.)
|
535e2ac7cd08d4b0c7df49bbd1c69287012b65ca
| 17,566
|
def aperture_circle(X_local, m, aperture):
"""
A circular Aperture.
name: 'circle'
size: [radius]
Contains the radius of the aperture.
"""
origin_x = aperture['origin'][0]
origin_y = aperture['origin'][1]
size = aperture['size'][0]
m[m] &= (((X_local[m,0] - origin_x)**2 + (X_local[m,1] - origin_y)**2) < size**2)
return m
|
291216781bd70589e479a3620f78e48c7203e1b3
| 17,567
|
def resetTrendBuffer(b,newsize,newdata=None):
""" Cleans up the buffer, resizes and inserts new data """
b.resizeBuffer(1)
b.moveLeft(1)
b.setMaxSize(newsize)
if newdata is not None and len(newdata): b.extend(newdata)
return len(b)
|
a6ad580a6c2bc44bc9d1ac172bd7433d7e0f8164
| 17,571
|
def str2pair(x):
"""
Recibe una cadena como '4,5' y retorna una tupla (4, 5)
"""
nums = x.split(',')
return int(nums[0]), float(nums[1])
|
d191832fe1eb9e03787e7bed3292c86247d09e24
| 17,572
|
def normalize_cols(table):
"""
Pad short rows to the length of the longest row to help render "jagged"
CSV files
"""
longest_row_len = max([len(row) for row in table])
for row in table:
while len(row) < longest_row_len:
row.append('')
return table
|
bfd20be54690eaa30f479740809111c8096f592d
| 17,574
|
def get_lvp_params(app):
"""
Gather parameters for partial LV loads (loads that model an LV customer, the load elements are
stored in a line element without having own buses in PowerFactory, so the lines in pandapower
must be split at those points and new buses must be created).
"""
com_ldf = app.GetFromStudyCase('ComLdf')
lvp_params = {
'iopt_sim': com_ldf.iopt_sim,
'scPnight': com_ldf.scPnight,
'Sfix': com_ldf.Sfix,
'cosfix': com_ldf.cosfix,
'Svar': com_ldf.Svar,
'cosvar': com_ldf.cosvar,
'ginf': com_ldf.ginf,
'i_volt': com_ldf.i_volt
}
return lvp_params
|
65cc4270440346c71e9c688797aa0073aba3399a
| 17,575
|
def city_info(request):
"""Return an OpenWeatherMap API city location."""
return {
'_id': 6434841,
'name': 'Montcuq',
'country': 'FR',
'zip_code': 46800,
'coord': {
'lon': 1.21667,
'lat': 44.333328
}
}
|
b9292046a8d06309b98ea7964f7ea298b5e7f549
| 17,576
|
import argparse
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(
description="FC-DRN: Fully Convolutional DenseResNet")
parser.add_argument('--results-dir', type=str, nargs='?',
default="weights_pretrained/",
help="Temp directory to save checkpoints during "
"training. To disable intermediate directory,"
" set this argument to the same value that"
" 'results-dir-final'")
parser.add_argument('--results-dir-final', type=str, nargs='?',
default="weights_pretrained/",
help="Final directory where experiment related files "
"are stored: log file, json file with script"
" configuration, weights (last epoch and best"
" jaccard) and evaluation images.")
parser.add_argument("--exp-name", type=str, default='fc-drn-p-d',
help="Experiment name")
parser.add_argument("--load-weights", action='store_true',
help="Load experiment in '--exp-name-toload' "
"specified folder.")
parser.add_argument("--exp-name-toload", type=str, default='',
help="Introduce an experiment name to load. "
"Script will begin training from this "
"experiments' best weights.")
parser.add_argument("--model", type=str, default='pools_ft_dils',
choices=['pools', 'dils', 'sconv', 'pools_ft_dils',
'sconv_ft_dils'],
help="Change transformation types.")
parser.add_argument("--dataset", type=str, default='camvid',
help="Dataset to use. Options: 'camvid'.")
parser.add_argument("--train-batch-size", type=int, default=3)
parser.add_argument("--val-batch-size", type=int, default=1)
parser.add_argument("--epoch-num", type=int, default=1000,
help="Number of epochs to train.")
parser.add_argument("--patience", type=int, default=200)
parser.add_argument("--learning-rate", type=float, default=0.001)
parser.add_argument("--optimizer", type=str, default='RMSprop',
help="Optimizer. Options: 'RMSprop', 'sgd', 'Adam'. ")
parser.add_argument("--weight-decay", type=int, default=0.00001)
parser.add_argument("--init-dils", type=str, default='identity',
help="Initialization for dilated convolutions."
" Options: 'identity', 'random'.")
parser.add_argument("--crop-size", nargs='+', type=int, default=(324, 324),
help="Crop size. Enter '--crop-size w h' ")
parser.add_argument("--loss-type", type=str, default='cce',
help="Loss to use. Options: 'cce', 'cce_soft'. ")
parser.add_argument("--show-model", action='store_true',
help="Show number of parameters in the model and the"
" model itself.")
parser.add_argument("--save-test-images", action='store_true',
help="Save predictions while evaluating with "
"'--test True'.")
parser.add_argument("--train", action='store_true',
help="Train the model.")
parser.add_argument("--test", action='store_false',
help="Evaulate either in the "
"test or the validation set.")
parser.add_argument("--test-set", type=str, default='test',
help="Select the test in which to evaluate the model:"
" 'test' or 'val'")
parser.add_argument("--checkpointer", action='store_false',
help="If True, training resumes from the 'last epoch'"
" weights found in the experiment folder. Useful "
"to save experiment at each epoch. If False,"
" training starts from scratch.")
return parser.parse_args()
|
085a27ddec2b6f457da1e3f3f735d89d297d952d
| 17,577
|
from typing import List
def resolve_vlan_ids(vlans: List, root) -> List[int]:
"""Resolve named vlans from /configuration/vlans/vlan path"""
rv = list()
for vlan in vlans:
if vlan.text.isnumeric():
rv.append(int(vlan.text))
continue
elem = root["dev_conf"].xpath(
"//vlan[name/text()='{member}']/vlan-id".format(member=vlan.text)
)
if elem is not None and len(elem):
rv.append(int(elem[0].text))
return rv
|
e000b25b4061ac7fa1b61d583eb9a63750ed381b
| 17,578
|
def int_to_hex(number):
"""
Check 0
>>> int_to_hex(0)
'0'
Value less than 10
>>> int_to_hex(9)
'9'
Value requiring letter digits
>>> int_to_hex(15)
'F'
Boundary (Uses 10s place)
>>> int_to_hex(16)
'10'
Boundary (Uses 10s, and 1s places)
>>> int_to_hex(17)
'11'
Multiple 16ths
>>> int_to_hex(129)
'81'
Boundary (Uses 100s, 10s, and 1s places)
>>> int_to_hex(301)
'12D'
"""
hex_string = ''
hex_digits = [0,1,2,3,4,5,6,7,8,9,'A','B','C','D','E','F']
if number == 0:
hex_string += str(0)
else:
while number > 0:
digit = number % 16
hex_string += str(hex_digits[digit])
number = int(number / 16)
return hex_string[::-1]
|
d3b1dd39aa6f63daa7491ad20c6a4576c3a7df53
| 17,579
|
def print_list_body(list_in, tabs, output="", indent=4):
"""Enumerate through each file key's parameter list items"""
tabs += 1
for item in list_in:
output += (" " * indent) * tabs + str(item) + "\n"
return output
|
be9fdeabd8a3f7c153c63a64305f2dd904d9e6ff
| 17,580
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.