content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def read_file(filename):
"""
This function reads the names of wallpapers out of the configuration
file, filename. Assumed data:
"""
with open(filename, 'r') as f:
file_lines = f.read().split('\n')
if file_lines.count('') != 0:
file_lines.remove('')
return file_lines
|
db1a1b0673f1889bd65bc921ca72397aeb03d0f8
| 14,606
|
import os
def recReadDir(baseDir, contain = ''):
""" read filenames recursively """
# no dir to expand, return
if len([x for x in baseDir if os.path.isdir(x)]) == 0:
# filter before return results
baseDir = [x for x in baseDir if contain in x]
return baseDir
# expand dirs
out = []
for it in baseDir:
if os.path.isdir(it):
# expand dirs
out += [os.path.abspath(os.path.join(it,x)) for x in os.listdir(it)]
elif contain in it:
# filter one more time to make efficient
out += [it]
return recReadDir(out, contain)
|
7d18e5cd3243d386eb44481622e5c60ac8f1fde3
| 14,607
|
def dailystats_date_json():
"""Return a /dailystats/<DATE> response."""
return {
"id": 0,
"day": "2018-06-04",
"mint": 12.779999999999999,
"maxt": 33.329999999999998,
"icon": 2,
"percentage": 100,
"wateringFlag": 0,
"vibration": [100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
"simulatedPercentage": 100,
"simulatedVibration": [100, 100, 100, 100, 100, 100, 100, 100, 100],
}
|
7bffa4d1f7d2a00aa51f1cc8929a2cd3383375fe
| 14,608
|
def char_fun(A, b):
"""
Returns True if dictionary b is a subset
of dictionary A and False otherwise.
"""
result = b.items() <= A.items()
return result
|
e8d2f9bc69f1d3b164e5d80a35fca870f66a632a
| 14,609
|
def safe_check_lens_eq(arr1, arr2, msg=None):
"""
Check if it is safe to check if two arrays are equal
safe_check_lens_eq(None, 1)
safe_check_lens_eq([3], [2, 4])
"""
if msg is None:
msg = 'outer lengths do not correspond'
if arr1 is None or arr2 is None:
return True
else:
assert len(arr1) == len(arr2), msg + '(%r != %r)' % (len(arr1), len(arr2))
|
09163acf03148710f670cda180e7c026f507b0df
| 14,610
|
import os
def getArchSpec(architecture):
"""
Helper function to return the key-value string to specify the architecture
used for the make system.
"""
arch = architecture if architecture else None
if not arch and "ARCH" in os.environ:
arch = os.environ["ARCH"]
return ("ARCH=" + arch) if arch else ""
|
ec27283e79698fb0525d2464836f5ddaaca2d399
| 14,611
|
import random
def generate_exact_cover(num_of_subsets):
"""Generates a new exact cover problem with the given number of random filled subsets"""
subset = set()
while len(subset) < num_of_subsets:
subset.add(frozenset(random.sample(range(1 ,num_of_subsets + 1), random.randint(0, num_of_subsets))))
return list(subset)
|
2e63bdc7181caa312ed42d48578989c8ddc29050
| 14,612
|
def read_long_description(path: str) -> str:
"""Utility function to read the README file."""
with open(path) as file:
data: str = file.read()
return data
|
c206f2cec2613fde5217845c1e36cd507924716e
| 14,613
|
def permute(nums: list[int]) -> list[list[int]]:
"""ALGORITHM"""
# DS's/res
uniq_perms = [[]]
for curr_num in nums:
uniq_perms = [
perm[:insertion_idx] + [curr_num] + perm[insertion_idx:]
for perm in uniq_perms
for insertion_idx in range(len(perm) + 1)
]
return uniq_perms
|
3da21c4bdaf77a1e55c3ece4b90a90d53a3565f7
| 14,614
|
def update_par_helper(row, blocks, add_num):
"""The current function accepts line, line constraint, and integer.The
purpose of the function is to update the parameters it receives (those
parameters which the main function receives) according to the
pointer value. For each of the three possible values, it will
update the parameters differently"""
update_pointer_value = 0
update_row = row[1:]
update_blocks = blocks[:]
if add_num == 0:
update_pointer_value = 0
elif add_num == 1:
if len(update_blocks) > 0:
b = update_blocks[0] - 1
if b == 0:
update_pointer_value = -1
del update_blocks[0]
elif b > 0:
update_pointer_value = 1
update_blocks[0] = b
return update_row, update_blocks, update_pointer_value
|
732bba078066418312c454dd4fbb7132632f25d2
| 14,616
|
def _get_seeds_to_create_not_interpolate_indicator(dense_keys, options):
"""Get seeds for each dense index to mask not interpolated states."""
seeds = {
dense_key: next(options["solution_seed_iteration"]) for dense_key in dense_keys
}
return seeds
|
e3ee4260309be644f11becba19f1ef3386d182d9
| 14,617
|
def make_bigrams(bigram_mod, texts):
"""Gera bigrama dos textos."""
return [bigram_mod[doc] for doc in texts]
|
9ac129f72a5dd4fdf4fb64694073df2482a76a9e
| 14,618
|
def _case_convert_snake_to_camel(token: str) -> str:
"""Implements logic to convert a snake case token to a camel case one.
"""
while True:
try:
# find next underscore
underscore_loc = token.index('_')
except ValueError:
# converted all underscores
break
# is the underscore at the end of the string?
if underscore_loc == len(token) - 1:
break
orig = token
token = f'{orig[:underscore_loc]}{orig[underscore_loc+1].upper()}'
# is there more after the capital?
if len(orig) > underscore_loc+2:
token += f'{orig[underscore_loc+2:]}'
return token
|
3796f627cc836f884aeab765769cf652f8fd1ff9
| 14,619
|
def make_reduce_compose(obj, hook_cls, hooks: list):
"""only keeps the overridden methods not the empty ones"""
def _get_loop_fn(fns):
def loop(*args, **kwargs):
for fn in fns:
fn(*args, **kwargs)
return loop
method_list = [func for func in dir(hook_cls)
if callable(getattr(hook_cls, func)) and not func.startswith("__")]
for method in method_list:
hook_fns = []
for hook in hooks:
base_fn = getattr(hook_cls, method)
hook_fn = getattr(hook, method)
if hook_fn.__func__ != base_fn:
hook_fns.append(hook_fn)
setattr(obj, method, _get_loop_fn(hook_fns))
|
03cc81714ec1309e2db896d0e59f98ac528f5adb
| 14,620
|
def _truncate_float(matchobj, format_str='0.2g'):
"""Truncate long floats
Args:
matchobj (re.Match): contains original float
format_str (str): format specifier
Returns:
str: returns truncated float
"""
if matchobj.group(0):
return format(float(matchobj.group(0)), format_str)
return ''
|
7fc4e90e496ea6f04838a1ccefb810c2467fafaf
| 14,621
|
import base64
def to_base64(data):
"""
Utility function to base64 encode.
:param data: The data to encode.
:return: The base64 encoded data.
"""
return base64.b64encode(data)
|
014ca1e1534e9350034e148abcceac525b0f0349
| 14,622
|
def json_set_check(obj):
"""
json.dump(X,default=json_set_check)
https://stackoverflow.com/questions/22281059/set-object-is-not-json-serializable
"""
if isinstance(obj, set):
return list(obj)
raise TypeError
|
d8b632c54938aac4797794d5d40fcd93c3415e04
| 14,623
|
import zipfile
def get_pfile_contents(pfile):
"""
Get a list of files within the pfile Archive
"""
if zipfile.is_zipfile(pfile):
zip = zipfile.ZipFile(pfile)
return zip.namelist()
else:
return None
|
8f67b87a0e35fe42d0c20cb9ed0c3cdc93a89163
| 14,624
|
def rate(report, rate_type):
"""generate gamma or epsilon rate
report (list of str): diagnostic report
rate_type (str): gamma or epsilon
return: binary result in form of str"""
pre_rate = []
if rate_type == "gamma":
rt = 1
elif rate_type == "epsilon":
rt = -1
else:
raise ValueError
for n in range(len(report[0])):
pre_rate.append(0)
for r in report:
for bul in range(len(r)):
if r[bul] == "1":
pre_rate[bul] += rt
elif r[bul] == "0":
pre_rate[bul] -= rt
for bul in range(len(pre_rate)):
if pre_rate[bul] > 0:
pre_rate[bul] = "1"
elif pre_rate[bul] < 0:
pre_rate[bul] = "0"
else:
if rate_type == "gamma":
pre_rate[bul] = "1"
elif rate_type == "epsilon":
pre_rate[bul] = "0"
else:
raise ValueError
rate = "".join(pre_rate)
return rate
|
3e08ea470bcbdc7805f2f056086f2965aa4bc91f
| 14,625
|
import re
def twitter_preprocess(
text: str,
no_rt: bool = True,
no_mention: bool = False,
no_hashtag: bool = False
) -> str:
"""
Preprocessing function to remove retweets, mentions and/or hashtags from
raw tweets.
Examples
--------
>>> twitter_preprocess("RT @the_new_sky: Follow me !")
'Follow me !'
>>> twitter_preprocess("@the_new_sky please stop making ads for your #twitter account.", no_mention=True, no_hashtag=True)
' please stop making ads for your account.'
"""
if no_rt:
text = re.sub(r"^RT @(?:[^ ]*) ", '', text)
if no_mention:
text = re.sub(r"@\w+", '', text)
if no_hashtag:
text = re.sub(r"#\w+", '', text)
return text
|
1644b7da6b43ebd77a33bcb186f2634b6d3bc8db
| 14,628
|
def calculate_diff_metrics(util1, util2):
"""Calculate relative difference between two utilities"""
util_diff = {}
for metric_k1, metric_v1 in util1.items():
if metric_k1 not in util2:
continue
util_diff[metric_k1] = {}
for avg_k1, avg_v1 in metric_v1.items():
if avg_k1 not in util2[metric_k1]:
continue
diff = abs(avg_v1 - util2[metric_k1][avg_k1]) / max(1e-9, avg_v1)
util_diff[metric_k1][avg_k1] = diff
return util_diff
|
45b8dc7a2441333870c8a0faa5b6e8da56df8a40
| 14,629
|
def rpht_output(file_name):
""" Read the projected frequencies
"""
# Read the file and read in the non-zero frequencies
freqs = []
with open(file_name, 'r') as freq_file:
for line in freq_file:
if line.strip() != '':
freqs.append(float(line.strip()))
# Build lists for the real and imaginary frequencies
real_freqs = []
imag_freqs = []
for i, val in enumerate(freqs):
# Ignore zeros
if val != 0.0:
# Always include the first element (so logic later works)
if i == 0:
real_freqs.append(val)
else:
# If current val higher than previous, it is an imag. number
if freqs[i] <= freqs[i-1]:
real_freqs.append(val)
else:
imag_freqs.append(val)
return real_freqs, imag_freqs
|
e2f7e181f68c23be276588e662dd7db9f7355270
| 14,630
|
from typing import List
from typing import Tuple
import re
def extract_format_items(s: str) -> List[Tuple[int, str]]:
"""Return a list (index, format item) pairs from a c-style format string.
Based on:
https://stackoverflow.com/questions/30011379/how-can-i-parse-a-c-format-string-in-python
"""
cfmt = '''\
( # start of capture group 1
% # literal "%"
(?: # first option
(?:[-+0 #]{0,5}) # optional flags
(?:\d+|\*)? # width
(?:\.(?:\d+|\*))? # precision
(?:h|l|ll|w|I|I32|I64)? # size
[cCdiouxXeEfgGaAnpsSZ] # type
) | # OR
%%) # literal "%%"
'''
return [(m.start(1), m.group(1)) for m in re.finditer(cfmt, s, flags=re.X)]
|
93ce59d40dd60772a9ea6235c945a17d71ca07d0
| 14,633
|
def text_progress(current, total):
"""Opens the photo_import_settings.json file in a text editor
Parameters:
current (String): The currently completed progress
total (String): The total amount
Returns:
progress (String): A formatted string with the current progress [current/total]
"""
return '[' + format(current, '0' + str(len(str(total)))) + \
'/' + \
format(total, '0' + str(len(str(total)))) + ']'
|
b04b5bf018ff96c2e0f4ed5f1ce60c20c21e9574
| 14,634
|
def _householder_factor(newton, halley, hh3):
"""
:param newton:
:param halley:
:param hh3:
:return:
"""
return (1 + 0.5 * halley * newton) / (1 + newton * (halley + hh3 * newton / 6))
|
373e3155557a2403ec79e78a5239d54f2ca651f5
| 14,635
|
def toy_reward(config):
""" The reward function to maximize (ie. returns performance from a fake training trial).
Args:
config: dict() object defined in unit-test, not ConfigSpace object.
"""
reward = 10*config['b'] + config['c']
reward *= 30**config['a']
if config['d'] == 'good':
reward *= 5
elif config['d'] == 'neutral':
reward *= 2
return reward
|
cc8ae7dfc31c8867fd1fb14720521d0b52e11074
| 14,636
|
def compute_length(of_this):
"""
Of_this is a dict of listlikes. This function computes the length of that object, which is the length of all of the listlikes, which
are assumed to be equal. This also implicitly checks for the lengths to be equal, which is necessary for Message/TensorMessage.
"""
lengths = [len(value) for value in of_this.values()]
if lengths == []:
# An empty dict has 0 length.
return 0
if len(set(lengths)) != 1: # Lengths are not all the same
raise ValueError("Every element of dict must have the same length.")
return lengths[0]
|
4f8e2b3b17ba6bed29e5636de504d05a028304d8
| 14,637
|
import socket
def client(
host: str,
port: str,
use_sctp: bool = False,
ipv6: bool = False,
buffer_size: int = 4096,
timeout: float = 5.0
) -> int:
"""Main program to run client for remote shell execution
:param host: Server name or ip address
:type host: str
:param port: Port on which server listens, can be also name of the service
:type port: str
:param use_sctp: Use SCTP transport protocol or not, defaults to False
:type use_sctp: bool, optional
:param ipv6: Use IPv6 protocol, defaults to False
:type ipv6: bool, optional
:param buffer_size: Buffer size for recv and send
:type buffer_size: int, optional
:param timeout: Timeout after which command is not more awaited, defaults to 5.0
:type timeout: float, optional
:return: Exit code
:rtype: int
"""
buffer_size = 4096
family = socket.AF_INET
transport_protocol = socket.IPPROTO_TCP
# Whether to use ipv6 or not
if ipv6:
family = socket.AF_INET6
# Whether to use SCTP or not
if use_sctp:
transport_protocol = socket.IPPROTO_SCTP
# Use DNS to get address and port of a server
try:
sockaddr = socket.getaddrinfo(
host=host,
port=port,
family=family,
proto=transport_protocol
)[0][-1]
except socket.gaierror as e:
print(f"getaddrinfo() error ({e.errno}): {e.strerror}")
return -1
# Define socket
try:
sock = socket.socket(family, socket.SOCK_STREAM, transport_protocol)
sock.setblocking(True)
sock.settimeout(timeout)
except OSError as e:
print(f"socket() error ({e.errno}): {e.strerror}")
return -2
# Connect to a server
try:
sock.connect(sockaddr)
print(f"Connected to {sockaddr[0]}:{sockaddr[1]}")
except OSError as e:
print(f"connect() error ({e.errno}): {e.strerror}")
return -3
print("Press: Ctrl+D to safely exit, Ctrl+C to interrupt")
# Remote shell endless loop
try:
while True:
cmd = input('> ')
try:
sock.sendall(bytes(cmd, 'utf-8'))
except OSError as e:
print(f"sendall() error ({e.errno}): {e.strerror}")
return -3
try:
data = sock.recv(buffer_size)
print(data.decode('utf-8').strip(' \n'))
except socket.timeout as e:
print("Request timeout..")
except OSError as e:
print(f"recv() error({e.errno}): {e.strerror}")
return -3
except KeyboardInterrupt:
print(" Keyboard Interrupt")
sock.close()
return 255
except EOFError:
print("Safely Closing")
sock.close()
return 0
|
196de595f6d6573c6bc1a2b2209688060b246cdc
| 14,638
|
def gamma_approx(mean, variance):
"""
Returns alpha and beta of a gamma distribution for a given mean and variance
"""
return (mean ** 2) / variance, mean / variance
|
8103a69d807c39c95c1604d48f16ec74761234c4
| 14,639
|
def public_coverage_filter(data):
"""
Filters for the public health insurance prediction task; focus on low income Americans, and those not eligible for Medicare
"""
df = data
df = df[df['AGEP'] < 65]
df = df[df['PINCP'] <= 30000]
return df
|
e8a579437fe484a738deefe6b32defaf405b5a58
| 14,641
|
def get_snippet(soup):
"""obtain snippet from soup
:param soup: parsed html by BeautifulSoup
:return: snippet_list
"""
tags = soup.find_all("div", {"class": "gs_rs"})
snippet_list = [tags[i].text for i in range(len(tags))]
return snippet_list
|
8955aa6eee837f6b1473b8de2d02083d35c3b46e
| 14,642
|
import yaml
def read_config(file_name="config.yaml"):
"""Read configurations.
Args:
file_name (str): file name
Returns:
configs (dict): dictionary of configurations
"""
with open(file_name, 'r') as f:
config = yaml.safe_load(f)
return config
|
51faf47b4c28d1cbf80631d743d9087430efb148
| 14,643
|
def filter_dual_selection_aromatic(sele1_atoms, sele2_atoms, aromatic1_index, aromatic2_index):
"""
Filter out aromatic interactions that are not between selection 1 and selection 2
Parameters
----------
sele1_atoms: list
List of atom label strings for all atoms in selection 1
sele2_atoms: list
List of atom label strings for all atoms in selection 2
aromatic1_index: int
Aromatic atom index 1
aromatic2_index: int
Aromatic atom index 2
Returns
-------
filter_bool: bool
True to filter out interaction
"""
if (aromatic1_index in sele1_atoms) and (aromatic2_index in sele2_atoms):
return False
if (aromatic1_index in sele2_atoms) and (aromatic2_index in sele1_atoms):
return False
return True
|
b1cb590510e45b0bfb5386e1514f24911156d01b
| 14,645
|
def no_numbers(data: str):
"""
Checks whether data contain numbers.
:param data: text line to be checked
:return: True if the line does not contain any numbers
"""
numbers = "0123456789"
for c in data:
if c in numbers:
return False
return True
|
d6f0f306c980da2985d906fa0eaee96d2f598e7b
| 14,648
|
def splinter_webdriver():
"""Use Chrome, which is simpler to launch in headless mode.
Firefox needs the Gecko driver to be manually installed first.
Moreover, as of 22/07/2018, Firefox in headless mode is not yet
available in python:3.6-alpine3.7 (version 52).
"""
return 'chrome'
|
6586567f423c12475b2a084d9945667bc34636bc
| 14,649
|
def lung_capacity(mass):
"""Caclulate lung capacity
Args
----
mass: float
Mass of animal
Return
------
volume: float
Lung volume of animal
References
----------
Kooyman, G.L., Sinnett, E.E., 1979. Mechanical properties of the harbor
porpoise lung, Phocoena phocoena. Respir Physiol 36, 287–300.
"""
return 0.135 * (mass ** 0.92)
|
75bddd298ad7df69cda6c78e2353ee4cef22865a
| 14,651
|
def correlation_calculator(x_points: list[float], y_points: list[float]) -> float:
"""Return a correlation of the association between the x and y variables
We can use this to judge the association between our x and y variables to determine if
we can even use linear regression. Linear regression assumes that there is some sort of
association between the x and y variables.
We will use the Pearson Correlation Coeficient Formula, the most commonly used correlation
formula. The formula is as follows:
- r = (n(Σxy) - (Σx)(Σy)) / ((nΣx^2 - (Σx)^2)(nΣy^2 - (Σy)^2)) ** 0.5
r = Pearson Coefficient
n= number of the pairs of the stock
∑xy = sum of products of the paired scores
∑x = sum of the x scores
∑y= sum of the y scores
∑x^2 = sum of the squared x scores
∑y^2 = sum of the squared y scores
Preconditions:
- len(x_points) == len(y_points)
>>> x_p = [6,8,10]
>>> y_p = [12,10,20]
>>> round(correlation_calculator(x_p, y_p), 4) == 0.7559
True
"""
# calculate n, the number of pairs
n = len(x_points)
# Calculate the sum for x points and x squared points
sum_x = 0
sum_x_squared = 0
for val in x_points:
sum_x += val
sum_x_squared += val ** 2
# Calculate the sum for y points and y squared poionts
sum_y = 0
sum_y_squared = 0
for val in y_points:
sum_y += val
sum_y_squared += val ** 2
# Calculate the sum for x points times y points
sum_xy = 0
for i in range(n):
sum_xy += x_points[i] * y_points[i]
# print(sum_x, sum_y, sum_x_squared, sum_y_squared, sum_xy)
numer = (n * sum_xy - sum_x * sum_y)
denom = ((n * sum_x_squared - sum_x ** 2) * (n * sum_y_squared - sum_y ** 2)) ** 0.5
r = numer / denom
return r
|
3fa697a15ff86511cff2a1b37b532f3d14caf7d8
| 14,652
|
async def database_feeds(db):
"""Get database metrics pertaining to feeds"""
metrics = {
"database_feeds_count_total": await db.feeds.count(),
"database_feeds_status_error_total": 0,
"database_feeds_status_unreachable_total": 0,
"database_feeds_status_inaccessible_total": 0,
"database_feeds_status_redirected_total": 0,
"database_feeds_status_fetched_total": 0,
"database_feeds_status_pending_total": 0,
}
cursor = db.feeds.aggregate([{"$group": {"_id": "$last_status", "count": {"$sum": 1}}},
{"$sort":{"count":-1}}])
counts = {i['_id']: i['count'] async for i in cursor}
for k,v in counts.items():
if k==None:
metrics['database_feeds_status_pending_total'] += v
elif k==0:
metrics['database_feeds_status_unreachable_total'] += v
elif 0 < k < 300:
metrics['database_feeds_status_fetched_total'] += v
elif 300 <= k < 400:
metrics['database_feeds_status_redirected_total'] += v
elif 400 <= k < 500:
metrics['database_feeds_status_inaccessible_total'] += v
else:
metrics['database_feeds_status_error_total'] += v
return metrics
|
c7ebbda9acc37edafb22103395904c2863379cf6
| 14,653
|
import math
def count_ctdd(aa_set, sequence):
"""
helper function for ctdd encoding
:param aa_set:
:param sequence:
:return:
"""
number = 0
for aa in sequence:
if aa in aa_set:
number = number + 1
cutoffNums = [1, math.floor(0.25 * number), math.floor(0.50 * number), math.floor(0.75 * number), number]
cutoffNums = [i if i >= 1 else 1 for i in cutoffNums]
code = []
for cutoff in cutoffNums:
myCount = 0
for i in range(len(sequence)):
if sequence[i] in aa_set:
myCount += 1
if myCount == cutoff:
code.append((i + 1) / len(sequence) * 100)
break
if myCount == 0:
code.append(0)
return code
|
bc4f55855794d073397b471652e8dbab18520d1b
| 14,654
|
def generate_where_in_clause(field_name, feature_list):
"""
Args:
field_name (str): Name of column to query
feature_list (list): List of values to generate IN clause
Returns:
string e.g. `WHERE name IN ('a' ,'b', 'c')`
"""
# Build up 'IN' clause for searching
where_str = f"{field_name} in ("
for p in feature_list:
if not isinstance(p, str):
where_str += f"{str(p)},"
else:
where_str += f"'{str(p)}',"
where_str = f"{where_str[:-1]})"
return where_str
|
d00179cfa6a9841b00195b99863d7b37f60c45e4
| 14,655
|
def set_username(strategy, details, user, social, *args, **kwargs):
"""This pipeline function can be used to set UserProfile.has_username_set = True
Normally not used if the auto-generated username is ugly
"""
if not user:
return None
response = None
if hasattr(user, 'profile'):
user_profile = user.profile
if hasattr(user_profile, 'has_username_set'):
user_profile.has_username_set = True
user_profile.save()
return response
|
1c217a8c0b75e2acb38c06b8f9cefdf70e272e91
| 14,656
|
import requests
def get_wait_time(token):
"""
The item in entries schema:
{
u'waitTime': {
u'status': u'Down', # or u'Operating'
u'postedWaitMinutes': 5,
u'fastPass': {
u'available': True,
u'endTime': u'13: 45: 00',
u'startTime': u'12: 45: 00'
},
u'singleRider': True
},
u'id': u'attTronLightcyclePowerRun;entityType=Attraction;destination=shdr'
},
"""
headers = {
'Host': 'apim.shanghaidisneyresort.com',
'X-Conversation-Id': 'shdrA5320488-E03F-4795-A818-286C658EEBB6',
'Accept': '*/*',
'User-Agent': 'SHDR/4.1.1 (iPhone; iOS 9.3.5; Scale/2.00)',
'Accept-Language': 'zh-cn',
'Authorization': 'BEARER %s' % token,
}
resp = requests.get('https://apim.shanghaidisneyresort.com/facility-service/theme-parks/desShanghaiDisneyland;'
'entityType=theme-park;destination=shdr/wait-times?mobile=true®ion=®ion=CN',
headers=headers)
resp.raise_for_status()
return resp.json()
|
d3915ee70c83b6eae0f2b491c99010bf2ee79754
| 14,657
|
def _zero_out_datetime(dt, unit):
"""To fix a super obnoxious issue where datetrunc (or SQLAlchemy) would
break up resulting values if provided a datetime with nonzero values more
granular than datetrunc expects. Ex. calling datetrunc('hour', ...) with
a datetime such as 2016-09-20 08:12:12.
Note that if any unit greater than an hour is provided, this method will
zero hours and below, nothing more.
:param dt: (datetime) to zero out
:param unit: (str) from what unit of granularity do we zero
:returns: (datetime) a well-behaved, non query-breaking datetime
"""
units = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond']
i = units.index(unit) + 1
for zeroing_unit in units[i:]:
try:
dt = dt.replace(**{zeroing_unit: 0})
except ValueError:
pass
return dt
|
adac3a5c84b0b7def93c044488afb57074cf2d09
| 14,660
|
def fitness_score_distance(id1, id2, to_consider, distance):
"""A value to choose between two resources considering the distance"""
return max(distance(id1,to_consider), distance(to_consider, id2))/distance(id1,id2)
|
ab6dd14f0e3ac3ef5b26abe3136ff5a61c3b74a5
| 14,661
|
import pandas
def update_context(df_context) -> pandas.DataFrame:
"""replaces unspecified air and water flows for impact categories that
don't rely on sub-compartments for characterization factor selection."""
single_context = ['Freshwater acidification',
'Terrestrial acidification',
'Climate change, long term',
'Climate change, short term',
'Climate change, ecosystem quality, short term',
'Climate change, ecosystem quality, long term',
'Climate change, human health, short term',
'Climate change, human health, long term',
'Photochemical oxidant formation',
'Ozone Layer Depletion',
'Ozone layer depletion',
'Marine acidification, short term',
'Marine acidification, long term',
'Ionizing radiations',
]
context = { 'Air/(unspecified)' : 'Air',
# 'Water/(unspecified)' : 'Water',
}
df_context.loc[df_context['Indicator'].isin(single_context),
'Context'] = df_context['Context'].map(context).fillna(df_context['Context'])
return df_context
|
5fb3420e5af4bfd10653b8246c370ab5ed5ec254
| 14,664
|
import math
def ln(x):
"""
x est un nombre strictement positif.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Retourne l'image du nombre x par la fonction logarithme népérien.
"""
return math.log(x)
|
3612dce0120c6b8b3d4176f1883ee8fb1694d7f4
| 14,665
|
import os
def find_file_named(root, name):
"""
Find a file named something
:param root: Root directory to search recursively through
:param name: The name of the file to search for
:return: Full path to the (first!) file with the specified name found,
or None if no file was found of that name.
"""
assert isinstance(root, str)
assert isinstance(name, str)
# Search the directory recursively
for path, dirs, files in os.walk(root):
for file in files:
if file == name:
return os.path.join(path, file)
return None
|
f914dab8a0e775f3033047e368e75ff408294be0
| 14,666
|
def remove(S, rack):
"""removes only one instance of a letter in a rack"""
if rack == []:
return []
if S == rack[0]:
return rack[1:]
return [rack[0]] + remove(S, rack[1:])
|
28041197da6a7978dacbfe65d413d4e6fd9c3a03
| 14,668
|
from typing import Iterable
from typing import Any
from typing import Union
from typing import List
def remove_from_iterable(
the_iterable: Iterable[Any], items_to_remove: Iterable[Any]
) -> Union[Iterable[Any], None]:
"""
remove_from_iterable removes any items in items_to_remove from the_iterable.
:param the_iterable: Source iterable to remove items from.
:type the_iterable: Iterable[Any]
:param items_to_remove: Iterable containing items to be removed.
:type items_to_remove: Iterable[Any]
:return: Iterable containing all remaining items or None if no items left.
:rtype: Union[Iterable[Any], None]
"""
new_iterable: List[Any] = []
for item in the_iterable:
if item not in items_to_remove:
try:
new_iterable.append(item)
except NameError:
new_iterable = [item]
try:
return new_iterable
except NameError:
return None
|
9ac800fb69f46c8b65ed2ac73158678b4c0ae7d6
| 14,669
|
def import_data(filename):
""" Take data from txt file"""
dataset = list()
with open(filename) as f:
lines = f.readlines()
for line in lines:
dataset.append(line.split())
return dataset
|
d360506f2015f31b09dbac632427390eca10cf1b
| 14,670
|
def to_bytes(data: str):
"""
将unicode字符串转换为bytes
"""
try:
bytes_data = data.encode("GBK")
return bytes_data
except AttributeError:
return data
|
9a3777d5ec52daa0a063a089c42943abf4e5c8a9
| 14,671
|
import re
def find_config_file_line_number(path: str, section: str, setting_name: str) -> int:
"""Return the approximate location of setting_name within mypy config file.
Return -1 if can't determine the line unambiguously.
"""
in_desired_section = False
try:
results = []
with open(path) as f:
for i, line in enumerate(f):
line = line.strip()
if line.startswith('[') and line.endswith(']'):
current_section = line[1:-1].strip()
in_desired_section = (current_section == section)
elif in_desired_section and re.match(r'{}\s*='.format(setting_name), line):
results.append(i + 1)
if len(results) == 1:
return results[0]
except OSError:
pass
return -1
|
03b1f77b051f260471653df6e7ddd7a599c8e100
| 14,672
|
def flatten_nested_list(l):
"""Flattens a list and provides a mapping from elements in the list back
into the nested list.
Args:
l: A list of lists.
Returns:
A tuple (flattened, index_to_position):
flattened: The flattened list.
index_to_position: A list of pairs (r, c) such that
index_to_position[i] == (r, c); flattened[i] == l[r][c]
"""
flattened = []
index_to_position = {}
r = 0
c = 0
for nested_list in l:
for element in nested_list:
index_to_position[len(flattened)] = (r, c)
flattened.append(element)
c += 1
r += 1
c = 0
return (flattened, index_to_position)
|
14e20ef2a29dc338a4d706e0b2110a56447ae84e
| 14,676
|
def get_owner_email_from_domain(domain):
"""Look up the owner email address for a given domain.
Returning dummy addresses here - implement your own domain->email lookup here.
Returns:
str: email address
"""
email = {
'example-domain-a.example.com': 'owner-a@example.com',
'example-domain-b.example.com': 'owner-b@example.com',
}.get(domain, 'example-resource-owner@example.com')
return email
|
2d3851c87c5ba3c19af4c989887bf750b66ffbe1
| 14,677
|
def stringify_dict(d):
"""create a pretty version of arguments for printing"""
if 'self' in d:
del d['self']
s = 'Arguments:\n'
for k in d:
if not isinstance(d[k], str):
d[k] = str(d[k])
s = s + '%s: %s\n' % (k, d[k])
return(s)
|
e79220bf7d1d769f5408cd0532ea97390ad35541
| 14,679
|
def rules(host, _args, _old_graph, graph, _started_time):
"""list all the rules"""
for rule_name in sorted(graph.rules):
host.print_out("%s %s" % (rule_name,
graph.rules[rule_name]['command']))
return 0
|
c4d094a90634101b754debde637685b95fcde556
| 14,680
|
import torch
def linear_regression(X, Y=None, order=None, X_test=None):
"""Closed form linear regression
"""
if X.dim() == 1:
X = X.unsqueeze(1)
if Y is None:
if order is None:
beta_left = torch.matmul(torch.matmul(X.t(), X).inverse(), X.t())
return beta_left
else:
assert isinstance(order, int) and order > 0
X = torch.cat([torch.pow(X, i) for i in range(1, order+1)] + [X.new_ones(X.size())], dim=1)
beta_left = torch.matmul(torch.matmul(X.t(), X).inverse(), X.t())
return beta_left
if Y.dim() == 1:
Y = Y.unsqueeze(-1)
if order is None:
beta = torch.matmul(torch.matmul(torch.matmul(X.t(), X).inverse(), X.t()), Y)
if X_test is None:
Y_test = torch.matmul(X, beta)
else:
Y_test = torch.matmul(X_test, beta)
return beta, Y_test
else:
assert isinstance(order, int) and order > 0
X = torch.cat([torch.pow(X, i) for i in range(1, order+1)] + [X.new_ones(X.size())], dim=1)
if X_test is None:
X_test = X
else:
if X_test.dim() == 1:
X_test = X_test.unsqueeze(1)
X_test = torch.cat([torch.pow(X_test, i) for i in range(1, order+1)] + [X_test.new_ones(X_test.size())], dim=1)
return linear_regression(X, Y, order=None, X_test=X_test)
|
07420512f176955b394aeaa67a0eb0add168b38d
| 14,682
|
def extract_secondary_keys_from_form(form):
"""Extracts text (autocomplete capable) secondary keys list from Indexes form.
@param form: is a MUI secondary indexes form object (Django Form() object)"""
keys_list = []
for field_id, field in form.fields.iteritems():
if 'field_name' in field.__dict__.iterkeys():
if field.field_name:
f_name = field.__class__.__name__
if f_name != "DateField" or f_name != "ChoiceField":
keys_list.append(field.field_name)
return keys_list
|
bf73217293f379257f7e8f0ab33b45d735b04edb
| 14,683
|
def update_name(name, mapping):
""" Update the street name using the mapping dictionary. """
for target_name in mapping:
if name.find(target_name) != -1:
a = name[:name.find(target_name)]
b = mapping[target_name]
c = name[name.find(target_name)+len(target_name):]
name = a + b + c
return name
|
8046fc9ef83f262251fac1a3e540a607d7ed39b9
| 14,684
|
import re
def annotation_version(repo, tag_avail):
"""Generates the string used in Git tag annotations."""
match = re.match("^(.*)-build([0-9]+)$", tag_avail[repo.git()]["build_tag"])
if match is None:
return "%s version %s." % (repo.git(), tag_avail[repo.git()]["build_tag"])
else:
return "%s version %s Build %s." % (repo.git(), match.group(1), match.group(2))
|
484eada27b698c2d7a9236d7b86ea6c53050e1e8
| 14,685
|
import math
def _scale_absolute_gap(gap, scale):
"""Convert an absolute gap to a relative gap with the
given scaling factor."""
assert scale > 0
if not math.isinf(gap):
return gap / scale
else:
return gap
|
29c1b03e53c66bf7728066ff3503118a266acd3d
| 14,686
|
def yolo_label_format(size, box):
"""
Rule to convert anchors to YOLO label format
Expects the box to have (xmin, ymin, xmax, ymax)
:param size: Height and width of the image as a list
:param box: the four corners of the bounding box as a list
:return: YOLO style labels
"""
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[0] + box[2]) / 2.0
y = (box[1] + box[3]) / 2.0
w = box[2] - box[0]
h = box[3] - box[1]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return x, y, w, h
|
c958f75ffe3de8462ed25c5eb27bcaa21d52c46c
| 14,688
|
def upload_dir(app, filename):
""" Returns the upload path for applications.
The end result will be something like:
"uploads/applications/Firefox/icons/firefox.png"
"""
path = ('applications/' + app.name + '/icons/' + filename)
# Note: if the file already exist in the same path, django will
# automatically append a random hash code to the filename!
return path
|
e188c6c11733e0b88313c65b6be67c8db2fd69a9
| 14,689
|
import itertools
def get_elements_by_attr(xml_node, attribute, value):
"""helper function for xml trees when using minidom"""
def recur_get_element_by_attr(xml_node, attribute, value):
if xml_node.attributes:
test_list = xml_node.attributes.keys()
if attribute in test_list:
if xml_node.getAttribute(attribute) == value:
yield xml_node
nls = [recur_get_element_by_attr(nd, attribute, value) for nd in xml_node.childNodes]
yield list(itertools.chain.from_iterable(nls))
def flatten(container):
#from hexparrot @ http://stackoverflow.com/questions/10823877/what-is-the-fastest-way-to-flatten-arbitrarily-nested-lists-in-python pylint: disable=redefined-builtin
for i in container:
if isinstance(i, (list, tuple)):
for j in flatten(i):
yield j
else:
yield i
return list(flatten([el for el in recur_get_element_by_attr(xml_node, attribute, value)]))
|
8ea494665393cc6c885ad4f611dbf0c3302617b2
| 14,690
|
def is_instance_id(instance):
""" Return True if the user input is an instance ID instead of a name """
if instance[:2] == 'i-':
return True
return False
|
f4389c340b21c6b5e0f29babe49c556bd4b4c120
| 14,692
|
def readiness() -> tuple[str, int]:
"""A basic healthcheck. Returns 200 to indicate flask is running"""
return 'ready', 200
|
de1072809a32583a62229e390ea11f6b21f33b1e
| 14,694
|
def name_sep_val(mm, name, sep='=', dtype=float, ipos=1):
"""Read key-value pair such as "name = value"
Args:
mm (mmap): memory map
name (str): name of variable; used to find value line
sep (str, optional): separator, default '='
dtype (type, optional): variable data type, default float
ipos (int, optiona): position of value in line, default 1 (after sep)
Return:
dtype: value of requested variable
Examples:
>>> name_sep_val(mm, 'a') # 'a = 2.4'
>>> name_sep_val(mm, 'volume', ipos=-2) # 'volume = 100.0 bohr^3'
>>> name_sep_val(mm, 'key', sep=':') # 'key:val'
>>> name_sep_val(mm, 'new', sep=':') # 'new:name'
>>> name_sep_val(mm, 'natom', dtype=int) # 'new:name'
"""
idx = mm.find(name.encode())
if idx == -1:
raise RuntimeError('"%s" not found' % name)
mm.seek(idx)
line = mm.readline().decode()
tokens = line.split(sep)
val_text = tokens[ipos].split()[0]
val = dtype(val_text)
return val
|
4eee14e88ca2a6f43cb1b046de239c6847d57931
| 14,695
|
def remove_inactive(batch, live_bus, sim=False):
##### CHANGE ARGS NAMING CONVENTION #####
"""
remove buses from live_bus absent in the new stream batch
sim: stream_batch -> live_bus [FALSE]
live_bus -> active_bus [TRUE]
FUTURE: also remove ones that reaches the last stop
"""
if sim:
new_refs = [bus for bus in batch.keys()]
else:
new_refs = [bus['VehicleRef'] for bus in batch]
for ref in list(live_bus.keys()):
if ref not in new_refs:
del live_bus[ref]
return live_bus
|
404373b5c3fe607af667bb388ba41737704ae164
| 14,696
|
def binary_search_two_pointers_recur(sorted_nums, target, left, right):
"""Util for binary_search_feast_recur()."""
# Edge case.
if left > right:
return False
# Compare middle number and recursively search left or right part.
mid = left + (right - left) // 2
if sorted_nums[mid] == target:
return True
elif sorted_nums[mid] < target:
return binary_search_two_pointers_recur(
sorted_nums, target, mid + 1, right)
else:
return binary_search_two_pointers_recur(
sorted_nums, target, left, mid - 1)
|
553c6217b29f3507290a9e49cd0ebd6acaccec63
| 14,697
|
def _decode_list(vals):
""" List decoder
"""
return [l.decode() if hasattr(l, 'decode') else l for l in vals]
|
b0018f40628e64132c62095a5d87f84d5b0d7323
| 14,698
|
def extract_class(query):
"""Extract original class object from a SQLAlchemy query.
Args:
query (query): SQLAlchemy query
Returns:
class: base class use when setting up the SQL query
"""
first_expression = query.column_descriptions[0]['expr']
try:
# query returns subset of columns as tuples
return first_expression.class_
except AttributeError:
# query returns a full class ORM object
return first_expression
|
48bfec4301c752c68bda613334dcb7dfc17f3f15
| 14,699
|
def get_restore_user(domain, couch_user, as_user_obj):
"""
This will retrieve the restore_user from the couch_user or the as_user_obj
if specified
:param domain: Domain of restore
:param couch_user: The couch user attempting authentication
:param as_user_obj: The user that the couch_user is attempting to get
a restore user for. If None will get restore of the couch_user.
:returns: An instance of OTARestoreUser
"""
couch_restore_user = as_user_obj or couch_user
if couch_restore_user.is_commcare_user():
return couch_restore_user.to_ota_restore_user()
elif couch_restore_user.is_web_user():
return couch_restore_user.to_ota_restore_user(domain)
else:
return None
|
09ad1783236860779064f2929bb91c05915cdd6e
| 14,700
|
import os
import fnmatch
def has_html(folder_path):
"""Simple function that counts .html files and returns a binary:
'True' if a specified folder has any .html files in it, 'False' otherwise."""
html_list = []
for dirpath,dirnames,filenames in os.walk(folder_path):
for file in fnmatch.filter(filenames, "*.html"): # Check if any HTML files in folder_path
html_list.append(file)
if len(html_list)==0:
return False
else:
return True
|
44b262c7ea3f51078d234e9f65e007cf30cea8e8
| 14,701
|
def capitalize_column_names(dataframe):
"""Capitalize column names of the model's input dataframe.
Keep column names consistent with the dataframe used for
training the model."""
dataframe.columns = [x.upper() for x in dataframe.columns]
cols = []
for col in dataframe.columns:
if col.endswith('_ACTIVE'):
col = col.replace('_ACTIVE_ACTIVE', '_ACTIVE_Active')
elif col.endswith('_CLOSED'):
col = col.replace('_CLOSED', '_Closed')
cols.append(col)
dataframe.columns = cols
return dataframe
|
52486a58b72099ebd73c38876604743a386905fe
| 14,702
|
from tempfile import mkdtemp
def create_temporary_directory(prefix_dir=None):
"""Creates a temporary directory and returns its location"""
return mkdtemp(prefix='bloom_', dir=prefix_dir)
|
b2a1ddeb8bcaa84532475e3f365ab6ce649cd50c
| 14,705
|
def get_num_clones(pop_size, clone_factor):
"""
:param pop_size: 100
:param clone_factor: 0.1
:return:
"""
return int(pop_size * clone_factor)
|
2cc9639b4aa598793d305980ad76aecb59157f8b
| 14,706
|
import random
import string
import uuid
def create_customer():
"""
Create a customer entity.
:return: A dict with the entity properties.
"""
name = "".join(random.choice(string.ascii_lowercase) for _ in range(10))
return {
'entity_id': str(uuid.uuid4()),
'name': name.title(),
'email': "{}@server.com".format(name)
}
|
48434187249164992d81694070f027f5ca8b862c
| 14,707
|
from typing import Sequence
def in_bbox(point: Sequence, bbox: Sequence):
"""
Checks if point is inside bbox
:param point: point coordinates [lng, lat]
:param bbox: bbox [west, south, east, north]
:return: True if point is inside, False otherwise
"""
return bbox[0] <= point[0] <= bbox[2] and bbox[1] <= point[1] <= bbox[3]
|
fb95e2506adee4ff425997a0211ee53095894f35
| 14,708
|
def group(extracted_iter, sample_names, gene_ids):
"""Given the raw dictionary results, group into per-sample,
per-gene dictionary."""
samples = {s: {} for s in sample_names}
dns = ("vep", "varscan")
for lined in extracted_iter:
for sampled in lined:
sample = sampled["sample"]
entry = {dn: sampled[dn] for dn in dns}
# select only variants with VEP annotation
if entry["vep"]:
for gene_id in gene_ids:
if gene_id not in samples[sample]:
samples[sample][gene_id] = []
gene_varscan = entry["varscan"]
gene_vep = [x for x in entry["vep"]
if x["Gene"] == gene_id]
if gene_vep:
gene_entry = {
"vep": gene_vep,
"varscan": gene_varscan,
"sample": sample,
"gene": gene_id
}
samples[sample][gene_id].append(gene_entry)
return samples
|
1e5360a5d67636c46f60bcb440ef6c6d185b94de
| 14,709
|
import subprocess
def has_latex():
"""Return whether the 'latex' command is available on the system."""
try:
subprocess.check_output(['latex', '--version'])
return True
except OSError:
return False
|
d8808cf17da9c24c0ff588449cbf01cf036cfbfd
| 14,713
|
def _FinalElement(key):
"""Return final element of a key's path."""
return key.path().element_list()[-1]
|
80ecac3fe3eb3d2587d64dd759af10dcddb399fe
| 14,714
|
def cs_metadata(cursor, cs_id):
"""Get metadata for a given ``cs_id`` in a NEPC database.
Parameters
----------
cursor : cursor.MySQLCursor
A MySQLCursor object. See return value ``cursor`` of :func:`.connect`.
cs_id : int
The ``cs_id`` for a cross section dataset in the NEPC database at ``cursor``.
Returns
-------
list
See :attr:`CS.metadata`. List items are in same order as :attr:`.CS.metadata`.
"""
cursor.execute("SELECT A.`cs_id` , "
"C.`name` , "
"A.`units_e`, A.`units_sigma`, A.`ref`, "
"D.`name`, E.`name`, "
"F.`name`, G.`name`, "
"A.`threshold`, A.`wavelength`, A.`lhs_v`, A.`rhs_v`, "
"A.`lhs_j`, A.`rhs_j`, "
"A.`background`, A.`lpu`, A.`upu`, "
"D.`long_name`, E.`long_name`, "
"F.`long_name`, G.`long_name`, "
"C.`lhs_e`, C.`rhs_e`, "
"C.`lhs_hv`, C.`rhs_hv`, "
"C.`lhs_v`, C.`rhs_v`, "
"C.`lhs_j`, C.`rhs_j` "
"FROM `cs` AS A "
"LEFT JOIN `processes` AS C "
"ON C.`id` = A.`process_id` "
"LEFT JOIN `states` AS D "
"ON D.`id` = A.`lhsA_id` "
"LEFT JOIN `states` AS E "
"ON E.`id` = A.`lhsB_id` "
"LEFT JOIN `states` AS F "
"ON F.`id` = A.`rhsA_id` "
"LEFT JOIN `states` AS G "
"ON G.`id` = A.`rhsB_id` "
"WHERE A.`cs_id` = " + str(cs_id))
return list(cursor.fetchall()[0])
|
57262d8e22be7cf9166d4edd0a658fe4662f68f6
| 14,715
|
import torch
from typing import Optional
def clamp(data: torch.Tensor, min: float, max: float, out: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Clamp tensor to minimal and maximal value
Args:
data: tensor to clamp
min: lower limit
max: upper limit
out: output tensor
Returns:
Tensor: clamped tensor
"""
return torch.clamp(data, min=float(min), max=float(max), out=out)
|
3e835f134fd9eedefb5ca7841d8c3a9960063389
| 14,716
|
def clean_command_level(text: str) -> str:
"""Remove parents from the command level"""
return text.replace("Command.", "")
|
0fbe2a82fa9f802e2e2118c5adb6dd5057a3f490
| 14,718
|
import json
def export_to_json(dict_data,e):
"""Fonction export au format json"""
try:
with open(e.path, "w") as outfile:
json.dump(dict_data, outfile)
return True
except IOError:
return False
|
b18d6c9ecebb83a4f69514af381e6a01f5a4529c
| 14,720
|
def rtiInputFixture(rtiConnectorFixture):
"""
This `pytest fixture <https://pytest.org/latest/fixture.html>`_
creates a session-scoped :class:`rticonnextdds_connector.Input` object
which is returned everytime this fixture method is referred.
The initialized Input object is cleaned up at the end
of a testing session.
``MySubscriber::MySquareReader`` `datareader
<https://community.rti.com/static/documentation/connext-dds/5.2.3/doc/api/connext_dds/api_cpp2/classdds_1_1sub_1_1DataReader.html>`_ in
``test/xml/TestConnector.xml`` `application profile
<https://community.rti.com/rti-doc/510/ndds.5.1.0/doc/pdf/RTI_CoreLibrariesAndUtilities_XML_AppCreation_GettingStarted.pdf>`_
is used for initializing the Input object.
:param rtiConnectorFixture: :func:`rtiConnectorFixture`
:type rtiConnectorFixture: `pytest.fixture <https://pytest.org/latest/builtin.html#_pytest.python.fixture>`_
:returns: session-scoped Input object for testing
:rtype: :class:`rticonnextdds_connector.Input`
"""
return rtiConnectorFixture.get_input("MySubscriber::MySquareReader")
|
b9fbd841e1640fb77b2991583a2b167e714a0a7b
| 14,721
|
from functools import reduce
from operator import mul
def multiply(*args):
"""\
这里可以写单元测试
>>> multiply(2,3)
6
>>> multiply('baka~',3)
'baka~baka~baka~'
"""
return reduce(mul,args)
|
c09f240ce67192e5d079569c75e2c1d852b6d0e7
| 14,723
|
def unfreeze_net(net):
"""
Unfreezing all net to make all params trainable
Args:
net: net to unfreeze
Returns:
unfreezed net
"""
print("SystemLog: Unfreezing all net to make all params trainable")
for p in net.parameters():
p.requires_grad = True
return net
|
a3deb31271c34c072c59f609f6520a29be8979ed
| 14,724
|
import binascii
def base64(data):
"""
Get base64 string (1 line) from binary "data".
:param (bytes, bytearray) data: Data to format.
:raises TypeError: Raises an error if the data is not a bytearray of bytes
instance
:return str: Empty string if this failed, otherwise the base64 encoded
string.
"""
if isinstance(data, (bytearray, bytes)):
b64_data = binascii.b2a_base64(data).decode('ascii')
else:
raise TypeError('Data passed to base64 function is of the wrong type')
# Remove any new lines and carriage returns
b64_data.replace("\n", "").replace("\r", "")
return b64_data.strip("\n")
|
a51e138beddb788c248c5f2423b3fde2faacdb63
| 14,725
|
def split_name_version(script_name):
""" 返回分割的脚本名称,版本。"""
name_version = script_name.rsplit('-', 1)
if len(name_version) == 1:
name, version = name_version[0], None
else:
name, version = name_version
try:
version = float(version)
except ValueError:
# 若无法转为浮点,那么将判定为其后的-是非版本号
name = script_name
version = None
return name, version
|
7ca30aa26eb07faa75f206574d083c7f13f89625
| 14,728
|
import time
def set_time_stamp(actual_log_size, log_file):
"""
Write the time stamp for each sample of cpu performance to the file
:param actual_log_size: Number of
:param log_file: log_file_pointer
:return:
"""
if(log_file):
time_stamp = "[ " + str(time.time()) + " ]\n"
log_file.write(time_stamp)
actual_log_size += 1
return actual_log_size
|
668b40b3a3b689cb96e483b74dd38f517b3cc0e1
| 14,729
|
def RotorAndMagnetsDesign(main):
"""Definition of the rotor and magnets design on ANSYS.
Args:
main (Dic): Contains the parameters of the rotor and magnets design.
Returns:
Dic: The same input dont modified only is readed by the following lines.
"""
# oEditor definition
oEditor = main['ANSYS']['oEditor']
# Stator and PM Names
Names = main['ANSYS']['Rotor&Magnets']['Name']
# Stator and rotor names
Colors = main['ANSYS']['Rotor&Magnets']['Color']
# Rotor material definition
RotorMaterial = main['ANSYS']['Materials']['Stator']['StatorMaterialName']
# Arrange material names
material = [RotorMaterial, 'vacuum']
# Poles
Poles = main['ANSYS']['FixedVariables']['Poles']
for k in range(2):
# Part definition
oEditor.CreateUserDefinedPart(
[
"NAME:UserDefinedPrimitiveParameters",
"DllName:=", "RMxprt/PMCore.dll",
"Version:=", "12.0",
"NoOfParameters:=", 13,
"Library:=", "syslib",
[
"NAME:ParamVector",
[
"NAME:Pair",
"Name:=", "DiaGap",
"Value:=", "DiaGap"
],
[
"NAME:Pair",
"Name:=", "DiaYoke",
"Value:=", 'DiaYokeR'
],
[
"NAME:Pair",
"Name:=", "Length",
"Value:=", "0mm"
],
[
"NAME:Pair",
"Name:=", "Poles",
"Value:=", str(int(Poles))
],
[
"NAME:Pair",
"Name:=", "PoleType",
"Value:=", '1'
],
[
"NAME:Pair",
"Name:=", "Embrace",
"Value:=", 'Embrace'
],
[
"NAME:Pair",
"Name:=", "ThickMag",
"Value:=", 'ThickMag'
],
[
"NAME:Pair",
"Name:=", "WidthMag",
"Value:=", '45mm'
],
[
"NAME:Pair",
"Name:=", "Offset",
"Value:=", '0deg'
],
[
"NAME:Pair",
"Name:=", "Bridge",
"Value:=", '2mm'
],
[
"NAME:Pair",
"Name:=", "Rib",
"Value:=", '3mm'
],
[
"NAME:Pair",
"Name:=", "LenRegion",
"Value:=", '200'
],
[
"NAME:Pair",
"Name:=", "InfoCore",
"Value:=", str([0, 1][k])
]
]
],
[
"NAME:Attributes",
"Name:=", str(Names[k]),
"Flags:=", "",
"Color:=", str(Colors[k]).replace(',', ' '),
"Transparency:=", 0,
"PartCoordinateSystem:=", "Global",
"UDMId:=", "",
"MaterialValue:=", "\"vacuum\"".replace(
'vacuum', str(material[k])),
"SurfaceMaterialValue:=", "\"\"",
"SolveInside:=", True,
"ShellElement:=", False,
"ShellElementThickness:=", "0mm",
"IsMaterialEditable:=", True,
"UseMaterialAppearance:=", False,
"IsLightweight:=", False
]
)
return main
|
03c83eaf8ca28514cf836119ee45c7befd4fed7d
| 14,730
|
def get_exp_data(diff_fname):
"""
Parse genes with differential expression score.
The file expects genes with a score in a tab delimited format.
"""
gene_exp_rate=dict()
dfh=open(diff_fname, "rU")
for line in dfh:
parts=line.strip('\n\r').split('\t')
try:
float(parts[1])
except:continue
gene_exp_rate[parts[0]]=float(parts[1])
dfh.close()
return gene_exp_rate
|
b376f9e8f42ed2e1f3c0209696de7e85b8bed082
| 14,732
|
import time
def timestamp() -> float:
"""
Returns fractional seconds of a performance counter.
It does include time elapsed during sleep and is system-wide
Note: The reference point of the returned value is undefined,
so that only the difference between the results of two calls is valid.
Returns:
float: fractional seconds
"""
return time.perf_counter()
|
e2c4d8c06d8d07ca67ec4f857a464afde19b6c2a
| 14,734
|
def verify_user_prediction(user_inputs_dic: dict, correct_definition_dic: dict):
"""
Verifies user prediction json against correct json definition
returns true if correct format, false if not
"""
if user_inputs_dic.keys() != correct_definition_dic.keys():
return False
for user_key, user_value in user_inputs_dic.items():
possible_values = correct_definition_dic[user_key].keys()
if user_value not in possible_values:
return False
return True
|
de1d2b579ab312929f64196467a3e08874a5be42
| 14,735
|
def _maybe_format_css_class(val: str, prefix: str = ""):
"""
Create a CSS class name for the given string if it is safe to do so.
Otherwise return nothing
"""
if val.replace("-", "_").isidentifier():
return f"{prefix}{val}"
return ""
|
c5c615b9e0894807a020186cdade9c031f904c06
| 14,736
|
import string
def rhyme_codes_to_letters(rhymes, unrhymed_verse_symbol="-"):
"""Reorder rhyme letters so first rhyme is always an 'a'."""
sorted_rhymes = []
letters = {}
for rhyme in rhymes:
if rhyme < 0: # unrhymed verse
rhyme_letter = unrhymed_verse_symbol
else:
if rhyme not in letters:
letters[rhyme] = len(letters)
rhyme_letter = string.ascii_letters[letters[rhyme]]
sorted_rhymes.append(rhyme_letter)
return sorted_rhymes
|
f0cbeac514be1f67e2487cf1fe267445a89d1485
| 14,737
|
def to_days(astring, unit):
""" No weeks in QUDT unit library: days or years """
factors = {
"week": 7.0, "weeks": 7.0,
"Week": 7.0, "Weeks": 7.0,
"day": 1.0, "Day": 1.0,
"days": 1.0, "Days": 1.0,
"hour": 1.0/24.0, "hours": 1.0/24.0,
"Hour": 1.0/24.0, "Hours": 1.0/24.0,
"minute": 1.0/1440.0, "Minute": 1.0/1440.0,
"minutes": 1.0/1440.0, "Minutes": 1.0/1440.0,
"s": 1.0/86400.0
}
factor = factors.get(unit, 0.0)
try:
value = float(astring)
except ValueError:
return None
return factor * value
|
b3cb66d0db77c394aa932aba8200c116f7eae6ae
| 14,738
|
def get_text(soup, file_path):
"""Read in a soup object and path to the soup source file,
and return an object with the record's text content"""
text = ""
text_soup = soup.find('div', {'id': 'Text2'})
for o in text_soup.findAll(text=True):
text += o.strip() + "\n"
return {
"text": text
}
|
764d3751742584c657613f5c2d00ef544a060a1d
| 14,740
|
def get_item_from_list_by_key_value(items, key, value):
"""Get the item from list containing sequence of dicts."""
for item in items:
if item[key] == value:
return item
return None
|
fcf70b8fccebfc2995fb4fec903559bc587b4576
| 14,742
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.