content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def requirements():
"""Returns the requirement list."""
with open('requirements.txt', 'r') as f:
return [line.strip() for line in f.readlines()]
|
e40af4819eac602c4af4845e2f59233dc3fa8a62
| 699,176
|
def _query_pkey(query, args):
"""Append 'query' and 'args' into a string for use as a primary key
to represent the query. No risk of SQL injection as memo_dict will
simply store memo_dict['malicious_query'] = None.
"""
return query + '.' + str(args)
|
d524697af247879f6aca5fa3918d80199b6148d5
| 699,177
|
from typing import List
from typing import Tuple
def color_map_generate(
colors: List[Tuple[int, int, int]], add_black_if_not_exist: bool = False
) -> List[Tuple[int, Tuple[int, int, int]]]:
"""
컬러 리스트에서 [(id, 컬러)] 리스트를 생성합니다.
단, `(0, 0, 0)`이 있을 경우, 검은색의 id는 0으로 할당됩니다.
`add_black_if_not_exist` 옵션에 따라, 검은 색이 리스트에 없는 경우에도 0번으로 할당 가능합니다.
Parameters
----------
colors : List[Tuple[int, int, int]]
BGR 컬러의 리스트
add_black_if_not_exist : bool
만약, 컬러 리스트에 black이 없다면, black을 추가합니다.
Returns
-------
List[Tuple[int, Tuple[int, int, int]]]
ID 및 BGR 컬러의 리스트
"""
code_tuples: List[Tuple[int, Tuple[int, int, int]]] = []
black_exists: bool = False
if (0, 0, 0) in colors:
colors_without_black = list(filter(lambda el: el != (0, 0, 0), colors))
code_tuples.append((0, (0, 0, 0)))
black_exists = True
else:
colors_without_black = colors
if add_black_if_not_exist:
code_tuples.append((0, (0, 0, 0)))
black_exists = True
for index, color in enumerate(colors_without_black):
new_index = index + 1 if black_exists else index
code_tuples.append((new_index, color))
return code_tuples
|
38175b00d3f0bd5ba232cfcfd4b2e4bdb5ee7440
| 699,178
|
def collate_question(query, template, slot):
"""
Collate a question based on template and slot
"""
T1 = "select one to refine your search"
T2 = "what (do you want | would you like) to know about (.+)?"
T3 = "(which | what) (.+) do you mean?"
T4 = "(what | which) (.+) are you looking for?"
T5 = "what (do you want | would you like) to do with (.+)?"
T6 = "who are you shopping for?"
T7 = "what are you trying to do?"
T8 = "do you have any (specific | particular) (.+) in mind?"
question = None
if slot == "<QUERY>":
slot = query
if template == T1 or template == T6 or template == T7:
question = template
elif template == T2:
if slot == query:
question = "what do you want to know about %s?" % slot
else:
question = "what do you want to know about this %s?" % slot
elif template == T3:
question = "which %s do you mean?" % slot
elif template == T4:
question = "what %s are you looking for?" % slot
elif template == T5:
question = "what do you want to do with %s?" % slot
elif template == T8:
question = "do you have any specific %s in mind?" % slot
else:
raise ValueError("Error of template!")
return question
|
1d615aa6b9b923b0f4c5dff0f01b9e04e45c41f5
| 699,179
|
import re
def paragraph_newline(paragraphs):
"""Format newlines for paragraphing"""
# Put new paragraphs here; will be joined on ''
newlined_ps = ['']
first_footnote = True
line = re.compile('\(\d\d*\)')
poet_line = re.compile('/\n')
footnote_line = re.compile('\[\^\d\d*\]:')
for i, para in enumerate(paragraphs):
prev_p = newlined_ps[-1]
next_p = paragraphs[i+1] if i+1 < len(paragraphs) else ''
next2_p = paragraphs[i+2] if i+2 < len(paragraphs) else ''
# handle poetic blocks with line indicators with slash and newline
if (line.match(para)
and not line.match(next_p)
and not footnote_line.match(next_p)
and next_p):
newlined_ps.append(para + ' /\n')
# add poetic line break situation 2
elif (not line.match(para)
and not footnote_line.match(para)
and line.match(next_p)
and not line.search(next2_p)
and next2_p):
newlined_ps.append(para + ' /\n')
# add poetic line break sit 3
elif (not line.match(para)
and not footnote_line.match(para)
and not line.match(next_p)
and next_p):
newlined_ps.append(para + ' /\n')
# distinguish footnote block with double newlines
elif footnote_line.match(para) and first_footnote:
newlined_ps.append(para + '\n')
first_footnote = False
# handle all other paragraphs
elif i+1 != len(paragraphs):
newlined_ps.append(para + '\n\n')
# attach last element without newlines
else:
newlined_ps.append(para)
return ''.join(newlined_ps).strip()
|
d43ef0de1f95945a747d44957bb86128e1281c5c
| 699,180
|
from typing import Dict
def alias(alias: str) -> Dict[str, str]:
"""Select a single alias."""
return {"alias": alias}
|
9321f00118658dc74fbc9704f1ea0e4a33e1f7aa
| 699,181
|
import subprocess
def pdf(filename):
"""Generates a .pdf file.
Use the pdf latex instance (must be installed)
"""
p = subprocess.Popen(['/usr/bin/pdflatex', filename +".tex" ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) #bash command in python
out, err = p.communicate()
return out
|
7cd8809134bb75ac37caca79f560ca4c600e2348
| 699,182
|
def fix_hardmix():
"""Fixture for a hard mixture"""
return {
"background": {"markov:rmin_500_rmax_1000_thresh_0.8_priceVarEst_1e9": 1},
"hft": {"noop": 1},
}
|
dee9b13b6b7f14ae334a6e1eda6c1fd2a362ba32
| 699,183
|
def max_digits(x):
"""
Return the maximum integer that has at most ``x`` digits:
>>> max_digits(4)
9999
>>> max_digits(0)
0
"""
return (10 ** x) - 1
|
3f0ffdfbbb3fdaec8e77889b3bfa14c9b9829b2e
| 699,184
|
def to_numpy(t):
"""
If t is a Tensor, convert it to a NumPy array; otherwise do nothing
"""
try:
return t.numpy()
except:
return t
|
3ac3ef0efd9851959f602d42e7f38bdd7e5da21a
| 699,185
|
def get_name_from_choice(value, choice):
"""選択肢から値によって、表示名称を取得する
:param value: 値
:param choice: 選択肢
:return:
"""
for k, v in choice:
if k == value:
return v
return None
|
00055bc53aeba244a751f9dffe5c1fa2a7c71383
| 699,186
|
def var_series(get_var_series, n) -> list:
"""
:param get_var_series: func return random value
:param n: number of elements
:return: variation series
"""
l = []
for i in range(n):
l.append(get_var_series())
l.sort()
return l
|
95400d8f3368d7777ed9bf6d1fb42bc06aa3f17d
| 699,187
|
import argparse
def get_parser():
"""Get parser."""
parser = argparse.ArgumentParser(description="Parameters for H-UCRL.")
parser.add_argument(
"--agent",
type=str,
default="RHUCRL",
choices=["RARL", "RAP", "MaxiMin", "BestResponse", "RHUCRL"],
)
parser.add_argument("--base-agent", type=str, default="BPTT")
parser.add_argument("--config-file", type=str, default="config/bptt.yaml")
parser.add_argument("--environment", type=str, default="MBHalfCheetah-v0")
parser.add_argument("--alpha", type=float, default=2.0)
parser.add_argument("--action-cost", type=float, default=0.1)
parser.add_argument("--hallucinate", type=bool, action="store_true", default=False)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--num-threads", type=int, default=2)
parser.add_argument("--max-steps", type=int, default=1000)
parser.add_argument("--train-episodes", type=int, default=200)
parser.add_argument("--antagonist-episodes", type=int, default=200)
parser.add_argument("--render", action="store_true", default=False)
return parser
|
9d815542ee18766da7ca6dc8cb298dc964b58a9a
| 699,188
|
def get_img_channel_num(img, img_channels):
"""获取图像的通道数"""
img_shape = img.shape
if img_shape[-1] not in img_channels:
img_channels.append(img_shape[-1])
return img_channels
|
0627cffbb3e80fddccc47a4cc8b697a59cdda077
| 699,189
|
import pandas as pd
def keep_common_genes(rna_df, sig_df):
"""
Given two dataframes, eliminate all genes that are not present in both dataframes.
Both dataframes must be indexed with the same gene identifier
Inputs:
- rna_df: pandas df. Rows are genes (indexed by 'Hugo_Symbol') and columns are patients
- sig_df: pandas df. Rows are genes (indexed by 'Hugo_Symbol') and columns are cell types
Output:
- rna_df_red: pandas data frame. rna_df reduced to only contain genes also present in sig_df
- sig_df_red: pandas data frame. sig_df reduced to only contain genes also present in rna_df
"""
# Eliminate genes that are not present in both rna df and signature matrix
shared_index = pd.merge(rna_df, sig_df, how='inner', left_index=True, right_index=True)
rna_df_red = rna_df.loc[shared_index.index]
sig_df_red = sig_df.loc[shared_index.index]
# Make sure genes listed in same order for both dataframes lines up:
rna_df_red = rna_df_red.sort_index()
sig_df_red = sig_df_red.sort_index()
return rna_df_red, sig_df_red
|
f934a351a8c5d8f158b33bb9233dbd62f9dd4c8a
| 699,190
|
def circulate(number, lower, upper):
""" 数を範囲内で循環させる。
Params:
number = 数。
lower = 数の下限。
upper = 数の上限。
Returns: 循環させた数。
"""
if lower <= number <= upper:
return number
elif number < lower:
return circulate(upper + lower + number + 1, lower, upper)
else:
return circulate(lower + number - upper - 1, lower, upper)
|
382b82768cc170b27b810c54d0ce753277d1a6f4
| 699,192
|
def auto_str(cls):
"""Auto generate string representation of the object.
Args:
cls: Class for which to generate the __str__ method.
"""
def __str__(self):
return "%s(%s)" % (
type(self).__name__,
", ".join("%s=%s" % item for item in vars(self).items()),
)
cls.__str__ = __str__
return cls
|
71a95cfeecb00146898656a0150d97fc346ddad3
| 699,193
|
import argparse
def get_train_input_args():
"""
Command Line Arguments:
1. Data Folder as data_dir
2. Save Folder for checkpoints as --save_dir with default value 'checkpoints'
3. CNN Model Architecture as --arch with default value 'vgg16'
4. Learning rate as --learning_rate with default value 0.001
5. Traning epochs as --epochs with default value 1
6. Hidden units of the first layer as --hidden_units_01 with default value 4096
7. Hidden units of the second layer as --hidden_units_02 with default value 1024
8. Path of a checkpoint as --checkpoint_path
9. Use gpu if available as -g or --gpu
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() -data structure that stores the command line arguments object
"""
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', type = str, help = 'path to the folder of flower images')
parser.add_argument('--save_dir', type = str, default = 'checkpoints', help = 'Save folder for model checkpoints')
parser.add_argument('--arch', type = str, default = 'vgg16', choices=['vgg16', 'densenet121'], help = 'CNN Model Architecture')
parser.add_argument('-l', '--learning_rate', type = float, default = 0.001, help = 'Learning rate')
parser.add_argument('-e', '--epochs', type = int, default = 1, help = 'Epochs to train the model')
parser.add_argument('-h1', '--hidden_units_01', type = int, default = 4096, help = 'Hidden units of the first layer')
parser.add_argument('-h2', '--hidden_units_02', type = int, default = 1024, help = 'Hidden units of the second layer')
parser.add_argument('-cp', '--checkpoint_path', type = str, help = 'Path of a checkpoint')
parser.add_argument('-g', '--gpu', action='store_true', required=False, help = 'Use gpu if available')
in_args = parser.parse_args()
if in_args is None:
print("* Doesn't Check the Command Line Arguments because 'get_input_args' hasn't been defined.")
else:
print("Command Line Arguments:\n dir =", in_args.data_dir, "\n save_dir =", in_args.save_dir, "\n arch =", in_args.arch, "\n learning_rate =", in_args.learning_rate, "\n epochs =", in_args.epochs, "\n hidden_units_01 =", in_args.hidden_units_01, "\n hidden_units_02 =", in_args.hidden_units_02)
if in_args.checkpoint_path is not None:
print("\n checkpoint_path =", in_args.checkpoint_path)
if in_args.gpu is not None:
print("\n Use gpu if available")
return in_args
|
1631d63783905f421ed2dc9c702673ff3866153a
| 699,194
|
import json
def _jsonToDict(json_file):
"""Reads in a JSON file and converts it into a dictionary.
Args:
json_file (str): path to the input file.
Returns:
(dict) a dictionary containing the data from the input JSON file.
"""
with open(json_file, 'r') as fid:
dout = json.loads(fid.read())
return dout
|
6e4b5996d6aaf2982012c6c2f135d7876ee5b3ca
| 699,195
|
def _matches_app_id(app_id, pkg_info):
"""
:param app_id: the application id
:type app_id: str
:param pkg_info: the package description
:type pkg_info: dict
:returns: True if the app id is not defined or the package matches that app
id; False otherwise
:rtype: bool
"""
return app_id is None or app_id in pkg_info.get('apps')
|
4d6cd0d652a2751b7a378fd17d5ef7403cfdc075
| 699,196
|
def quad_pvinfo(tao, ele):
"""
Returns dict of PV information for use in a DataMap
"""
head = tao.ele_head(ele)
attrs = tao.ele_gen_attribs(ele)
device = head['alias']
d = {}
d['bmad_name'] = ele
d['pvname_rbv'] = device+':BACT'
d['pvname'] = device+':BDES'
d['bmad_factor'] = -1/attrs['L']/10
d['bmad_attribute'] = 'b1_gradient'
return d
|
b35e04d78d419673afbc0711f9d454e01bc1dd5d
| 699,198
|
import os
def _mkdir(dir_path):
"""Safely make directory."""
try:
os.makedirs(dir_path)
except OSError:
pass
return dir_path
|
f02ed083c90c25eb04c46e694da617b94eaacb29
| 699,199
|
import json
def fix_hardprof_str(hardprof):
"""Fixture for a hard profile string"""
return json.dumps(hardprof)
|
7cf08fceb561ebadcb44d6b13bcb7ad52198934d
| 699,200
|
def Imp(x, y):
"""Return True if X implies Y
Performs a bitwise comparison of identically positioned bits
and sets corresponding bit in the output.
This amounts to the following truth table:
X Y Output
F F T
T F F
F T T
T T T
https://docs.microsoft.com/en-us/office/vba/language/reference/user-interface-help/imp-operator
"""
ix, iy = int(x), int(y)
if not (ix or iy):
result = 1
else:
result = 0
while ix or iy:
# Shift result by one bit
result = result << 1
#
# Get the bits for comparison
x_bit1 = ix & 1
y_bit1 = iy & 1
if not (x_bit1 and not y_bit1):
result = result | 1
#
ix = ix >> 1
iy = iy >> 1
#
if isinstance(x, bool) and isinstance(y, bool):
return bool(result)
else:
return result
|
68ac0456879d0aedec58988de2528bd9c6141941
| 699,201
|
from typing import Callable
from typing import Set
from typing import Type
def is_quorum(is_slice_contained: Callable[[Set[Type], Type], bool], nodes_subset: set):
"""
Check whether nodes_subset is a quorum in FBAS F (implicitly is_slice_contained method).
"""
return all([
is_slice_contained(nodes_subset, v)
for v in nodes_subset
])
|
4a4af3af8ca5a3f969570ef3537ae75c17a8015c
| 699,202
|
import os
def get_maps_dir():
"""
Get the exploration/maps dir
:return str: exploration/maps dir
"""
return os.path.join(os.path.dirname(os.path.dirname(__file__)), "maps")
|
d30410b4fe5e87f9183d18a7d5ee82d6f3cc0b7a
| 699,203
|
import os
def _split_by_regions(regions, dirname, out_ext, in_key):
"""Split a BAM file data analysis into chromosomal regions.
"""
def _do_work(data):
bam_file = data[in_key]
if bam_file is None:
return None, []
part_info = []
base_out = os.path.splitext(os.path.basename(bam_file))[0]
nowork = [["nochrom"], ["noanalysis", regions["noanalysis"]]]
for region in regions["analysis"] + nowork:
out_dir = os.path.join(data["dirs"]["work"], dirname, data["name"][-1], region[0])
if region[0] in ["nochrom", "noanalysis"]:
region_str = region[0]
else:
region_str = "_".join([str(x) for x in region])
region_outfile = os.path.join(out_dir, "%s-%s%s" %
(base_out, region_str, out_ext))
part_info.append((region, region_outfile))
out_file = os.path.join(data["dirs"]["work"], dirname, data["name"][-1],
"%s%s" % (base_out, out_ext))
return out_file, part_info
return _do_work
|
71007f0da5934aad0b3ed38ad96efd33707bdbe3
| 699,205
|
from re import match
def extrakey(key):
"""Return True if key is not a boring standard FITS keyword.
To make the data model more human readable, we don't overwhelm the output
with required keywords which are required by the FITS standard anyway, or
cases where the number of headers might change over time.
This list isn't exhaustive.
Parameters
----------
key : :class:`str`
A FITS keyword.
Returns
-------
:class:`bool`
``True`` if the keyword is not boring.
Examples
--------
>>> extrakey('SIMPLE')
False
>>> extrakey('DEPNAM01')
False
>>> extrakey('BZERO')
True
"""
# don't drop NAXIS1 and NAXIS2 since we want to document which is which
if key in ('BITPIX', 'NAXIS', 'PCOUNT', 'GCOUNT', 'TFIELDS', 'XTENSION',
'SIMPLE', 'EXTEND', 'COMMENT', 'HISTORY', 'EXTNAME', ''):
return False
# Table-specific keywords
if match(r'T(TYPE|FORM|UNIT|COMM|DIM)\d+', key) is not None:
return False
# Compression-specific keywords
if match(r'Z(IMAGE|TENSION|BITPIX|NAXIS|NAXIS1|NAXIS2|PCOUNT|GCOUNT|TILE1|TILE2|CMPTYPE|NAME1|VAL1|NAME2|VAL2|HECKSUM|DATASUM)', key) is not None:
return False
# Dependency list
if match(r'DEP(NAM|VER)\d+', key) is not None:
return False
return True
|
d4c36c3a5408d7056e55d5e67ebebe0b960e5b72
| 699,206
|
def P(N_c, N_cb, e_t, t, A, N_bb):
"""
Returns the points :math:`P_1`, :math:`P_2` and :math:`P_3`.
Parameters
----------
N_c : numeric
Surround chromatic induction factor :math:`N_{c}`.
N_cb : numeric
Chromatic induction factor :math:`N_{cb}`.
e_t : numeric
Eccentricity factor :math:`e_t`.
t : numeric
Temporary magnitude quantity :math:`t`.
A : numeric
Achromatic response :math:`A` for the stimulus.
N_bb : numeric
Chromatic induction factor :math:`N_{bb}`.
Returns
-------
tuple
Points :math:`P`.
Examples
--------
>>> N_c = 1.0
>>> N_cb = 1.00030400456
>>> e_t = 1.1740054728519145
>>> t = 0.149746202921
>>> A = 23.9394809667
>>> N_bb = 1.00030400456
>>> P(N_c, N_cb, e_t, t, A, N_bb) # doctest: +ELLIPSIS
(30162.8908154..., 24.2372054..., 1.05)
"""
P_1 = ((50000 / 13) * N_c * N_cb * e_t) / t
P_2 = A / N_bb + 0.305
P_3 = 21 / 20
return P_1, P_2, P_3
|
82e444ca0e7563f061b69daedd82ff3fc771f190
| 699,208
|
import random
import string
def randomStringGenerator(length=13):
"""create random string ascii values"""
return ''.join(random.choice(string.ascii_letters) for _ in range(length))
|
5f4406f67d953cc1198ae79ae049dbd6e9b678b4
| 699,209
|
import os
import pickle
def _load_static_data(module_path):
"""
load the data, raise the error if failed to load latlongrid.dat
Parameters
----------
module_path : string
mainpath of the LatLonGrid module
Returns
-------
latlondata : dict
dictionary containing for each subgrid...
a) the multipolygon 'zone_extent'
b) the WKT-string 'projection'
c) the sets for T18/T6/T3/T1-tiles covering land 'coverland'
d) the LatLonGrid version 'version'
"""
latlon_data = None
fname = os.path.join(os.path.dirname(module_path), "data", "latlongrid.dat")
with open(fname, "rb") as f:
latlon_data = pickle.load(f)
return latlon_data
|
e3ec215abb3bf7389c18583375d8a3c234af8cbf
| 699,211
|
import os
def report_directory_path():
"""Path to directory where monitor write report files to.
This must point to the directory which is mounted to the monitor service
container to read issued reports.
"""
base_directory = os.path.realpath(
os.path.join(os.path.dirname(__file__), os.pardir)
)
return os.path.join(base_directory, "reports")
|
555a901e0c665d753ec38d736264a7b37eb11a1f
| 699,212
|
def forensic():
""" brute force / comprendre le problème ... """
def is_strange(i):
l = len(str(i))
if l == 1:
return True
if i % l != 0:
return False
return is_strange(i // l)
def construction(i):
s = str(i)
l = len(s)
if l == 1: return "1" + " (" + s + ")"
return str(l) + " " + construction(i // l)
n = 0
l = 0
for i in range(100, 100000):
if is_strange(i):
if l != len(str(i)):
l = len(str(i))
n = 0
n += 1
print("{:3} {:10} {}".format(n, i, construction(i)))
|
02acb3a92aa2fc110ab356dc5786dac247c3c63f
| 699,213
|
import re
def trim_http(url :str) -> str:
"""
Discard the "http://" or "https://" prefix from an url.
Args:
url: A str containing an URL.
Returns:
The str corresponding to the trimmed URL.
"""
return re.sub("https?://", "", url, flags = re.IGNORECASE)
|
472e71a646de94a1ad9b84f46fc576ed5eb5a889
| 699,214
|
def changeArc(G, i ,j):
"""
change statistic for Arc
"""
return 1
|
90b33be8ac87a67362b43185e688e8fdff218e64
| 699,215
|
def _jq_format(code):
"""
DEPRECATED - Use re.escape() instead, which performs the intended action.
Use before throwing raw code such as 'div[tab="advanced"]' into jQuery.
Selectors with quotes inside of quotes would otherwise break jQuery.
If you just want to escape quotes, there's escape_quotes_if_needed().
This is similar to "json.dumps(value)", but with one less layer of quotes.
"""
code = code.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n')
code = code.replace('\"', '\\\"').replace('\'', '\\\'')
code = code.replace('\v', '\\v').replace('\a', '\\a').replace('\f', '\\f')
code = code.replace('\b', '\\b').replace(r'\u', '\\u').replace('\r', '\\r')
return code
|
3c46caa1a02798a7b8539a92a6b68f29e3dcd63c
| 699,216
|
import codecs
import os
def fetch_data(cand, ref):
""" Store each reference and candidate sentences as a list """
references = []
if '.txt' or '.en' in ref:
reference_file = codecs.open(ref, 'r', 'utf-8')
references.append(reference_file.readlines())
else:
for root, dirs, files in os.walk(ref):
for f in files:
reference_file = codecs.open(os.path.join(root, f), 'r', 'utf-8')
references.append(reference_file.readlines())
candidate_file = codecs.open(cand, 'r', 'utf-8')
candidate = candidate_file.readlines()
return candidate, references
|
a72bce7f54ba93773425ae15081ea34b0be33f4e
| 699,218
|
import random
def make_batch_pay_data(busi_type, client_tp):
"""
批量代付标识,预付卡业务不填
:param busi_type:
:return:
"""
if busi_type == "03":
batch_pay = ""
elif client_tp == "1":
batch_pay = "2"
else:
batch_pay = random.choice(["1", "2"])
return batch_pay
|
28992bfe369f0befd3fc78022a2f1035174243c8
| 699,219
|
from pathlib import Path
import os
def clean(record: str) -> int:
"""
Clean up all cached data related to record.
:param (str) record: User-supplied record file.
:return: True if successfully cleaned; otherwise False.
:rtype: int
"""
counter = 0
for file in Path(os.getcwd() + "/cache").rglob(record):
os.remove(file)
counter += 1
return counter
|
bd0e48d35ab5a6fdf0a82d5a9b1ed81a404c600b
| 699,220
|
def remove_recorders(osi):
"""Removes all recorders"""
return osi.to_process('remove', ['recorders'])
|
c63f23a031a5c957fd8667fc5be87fbd96b26a1e
| 699,221
|
def removeCutsFromFootprint(footprint):
"""
Find all graphical items in the footprint, remove them and return them as a
list
"""
edges = []
for edge in footprint.GraphicalItems():
if edge.GetLayerName() != "Edge.Cuts":
continue
footprint.Remove(edge)
edges.append(edge)
return edges
|
ddb047e00f98d73037abb5d9dc0a2e00de457f56
| 699,222
|
from typing import OrderedDict
def to_options(split_single_char_options=True, **kwargs):
"""Transform keyword arguments into a list of cmdline options
Imported from GitPython.
Original copyright:
Copyright (C) 2008, 2009 Michael Trier and contributors
Original license:
BSD 3-Clause "New" or "Revised" License
Parameters
----------
split_single_char_options: bool
kwargs:
Returns
-------
list
"""
def dashify(string):
return string.replace('_', '-')
def transform_kwarg(name, value, split_single_char_options):
if len(name) == 1:
if value is True:
return ["-%s" % name]
elif value not in (False, None):
if split_single_char_options:
return ["-%s" % name, "%s" % value]
else:
return ["-%s%s" % (name, value)]
else:
if value is True:
return ["--%s" % dashify(name)]
elif value is not False and value is not None:
return ["--%s=%s" % (dashify(name), value)]
return []
args = []
kwargs = OrderedDict(sorted(kwargs.items(), key=lambda x: x[0]))
for k, v in kwargs.items():
if isinstance(v, (list, tuple)):
for value in v:
args += transform_kwarg(k, value, split_single_char_options)
else:
args += transform_kwarg(k, v, split_single_char_options)
return args
|
b5835d8d9f52fed1ba34799c319be06e9913a1e3
| 699,223
|
from typing import Match
def gen_ruby_html(match: Match) -> str:
"""Convert matched ruby tex code into plain text for bbs usage
\ruby{A}{B} -> <ruby><rb>A</rb><rp>(</rp><rt>B</rt><rp>)</rp></ruby>
Also support | split ruby
\ruby{椎名|真昼}{しいな|まひる} ->
<ruby><rb>椎名</rb><rp>(</rp><rt>しいな</rt><rp>)</rp><rb>真昼</rb><rp>(</rp><rt>まひる</rt><rp>)</rp></ruby>
"""
return ''.join('<ruby><rb>{}</rb><rp>(</rp><rt>{}</rt><rp>)</rp></ruby>'.format(*pair) for pair in
zip(match['word'].split('|'), match['ruby'].split('|')))
|
bd603b67a61812152f7714f3a13f48cac41db277
| 699,224
|
import time
def _generate_request_id():
"""
生成一个请求 id,暂时用 (时间戳 % 60 分钟)
:return:
"""
return int(time.time() * 1000 % (60 * 60 * 1000))
|
b334265d41b68fc3455297bbc21065e7cb064920
| 699,225
|
def SkipIf(should_skip):
"""Decorator which allows skipping individual test cases."""
if should_skip:
return lambda func: None
return lambda func: func
|
135486c94640231328089115d61ba2d1eb87e46a
| 699,226
|
def norm(x, train_stats):
"""
Normalize the data.
"""
return (x - train_stats['mean']) / train_stats['std']
|
c7791c39b64a5041e537bb7554c0c4a7267d180c
| 699,227
|
import random
import string
def randomstring(size=20):
"""Create a random string.
Args:
None
Returns:
result
"""
# Return
result = ''.join(
random.SystemRandom().choice(
string.ascii_uppercase + string.digits) for _ in range(size))
return result
|
64ecd935130f6308367d02a5d699ea05c906cbb2
| 699,228
|
def has_seven(k):
"""Returns True if at least one of the digits of k is a 7, False otherwise.
>>> has_seven(3)
False
>>> has_seven(7)
True
>>> has_seven(2734)
True
>>> has_seven(2634)
False
>>> has_seven(734)
True
>>> has_seven(7777)
True
"""
if k % 10 == 7:
return True
elif k < 10:
return False
else:
return has_seven(k // 10)
|
8c0b7b02e36247b3685a6a0142ebd81dd63e220c
| 699,229
|
def get_subreport(binary_list, least_most_common_bit, bit_position):
"""Gets the report subset that has the least/most common bit within the specified bit position.
Args:
binary_list ():
least_most_common_bit ():
bit_position ():
Returns:
subreport (list): Subset that fulfill the constraint.
"""
subreport = []
for bits in binary_list:
if bits[bit_position] == least_most_common_bit:
subreport.append(bits)
return subreport
|
ca69762a416ead2dd3ba177150dc6a7ae6a6f494
| 699,231
|
def get_grid_size(grid):
"""Return grid edge size."""
if not grid:
return 0
pos = grid.find('\n')
if pos < 0:
return 1
return pos
|
9ff60184bb0eed7e197d015d8b330dcf615b4008
| 699,233
|
def ts_code(code: str) -> str:
"""
转换证券代码为 tushare 标准格式
:param code:
:return:
"""
if len(code) != 9:
raise Exception('无效的证券代码: 长度不符')
stock_code = code.upper()
if stock_code.endswith('.SZ') or stock_code.endswith('.SH'):
return stock_code
elif stock_code.startswith('SZ.') or stock_code.startswith('SH.'):
return '%s.%s' % (stock_code[3:], stock_code[0:2])
else:
raise Exception('无效的证券代码: %s' % code)
|
86f568553955ab66bcea7f86ecc341329f8931e3
| 699,234
|
def no_cache(func):
"""
Decorator made to modify http-response to make it no_cache
"""
def disable_caching(request_handler, *args, **kwargs):
request_handler.set_header('Cache-Control', 'no-cache, no-store, must-revalidate')
request_handler.set_header('Pragma', 'no-cache')
request_handler.set_header('Expires', 0)
return func(request_handler, *args, **kwargs)
return disable_caching
|
f5f5b9c8e78a883395ad6cbf263c1c82b1aafdc9
| 699,235
|
def index() -> str:
"""
Static string home page
"""
return "Hello, Python!!!"
|
7bf6b684e8404d0b2d52bb7578e1f44e37bdf565
| 699,236
|
def get_unique_coords(df, lat_field, lon_field):
"""
Takes in a dataframe and extract unique fields for plotting.
Uses dataframe group by to reduce the number of rows that need to
be plotted, returns the lat and lon fields
"""
unique = df.groupby([lat_field, lon_field]).size().reset_index()
lat, lon= unique[lat_field].values, unique[lon_field].values
return lat, lon
|
3914e59a9263e07a18833135b0d30a3e5b1d4ce6
| 699,237
|
import os
def scanForFiles(folder_name, extensions=('.frm', '.bas', '.cls', '.vb')):
"""Return all suitable VB files in a folder and subfolders"""
filenames = []
for subdir, dirs, files in os.walk(folder_name):
for filename in files:
filepath = os.path.join(subdir, filename)
extn = os.path.splitext(filepath)[1]
if extn.lower() in extensions:
print('Creating tests for %s' % filepath)
filenames.append(filepath)
return filenames
|
685043454a105adeabe2768973d0a728f29fd6e4
| 699,238
|
def unhex(value):
"""Converts a string representation of a hexadecimal number to its equivalent integer value."""
return int(value, 16)
|
0a8c297a93f484c8a1143511ef8511521c887a78
| 699,239
|
def make_00a6(sucess):
"""キャラクター削除結果"""
return ("\x00" if sucess else "\x9c")
|
2c89d1ba457a3d550742721ddfa9c6f6e7f391c7
| 699,240
|
def puzzles():
""" Import a list of Sudoku puzzles from text file
and return it as a list
Puzzles credit:
http://norvig.com/top95.txt
http://norvig.com/hardest.txt
"""
with open('sudoku_puzzles.txt') as file:
data = file.readlines()
#remove all end of line characters
return [i.replace('\n','') for i in data]
|
e4f876c52b21756a9a97c6f1bc3a0cd2c00fca61
| 699,241
|
import threading
import os
def concurrency_safe_write(object_to_write, filename, write_func):
"""Writes an object into a unique file in a concurrency-safe way."""
thread_id = id(threading.current_thread())
temporary_filename = '{}.thread-{}-pid-{}'.format(
filename, thread_id, os.getpid())
write_func(object_to_write, temporary_filename)
return temporary_filename
|
a2ef6dc6cb69523bc7fd487c2d1298a1c59d5323
| 699,242
|
import random
def genRandomStructure():
"""
Generates a random neural network structure.
"""
s = []
# Input layer has either 2 or three nodes.
if random.uniform(0, 1) > 0.5:
s.append(3)
else:
s.append(2)
p = 1.0
while p > 0.5:
# Add a new layer of random size to the network.
# Max layer size is 10 nodes.
layer_size = random.randint(1, 10)
s.append(layer_size)
p = random.uniform(0, 1)
# Output layer has a single node.
s.append(1)
return s
|
efd1ea11e27cdea9222052ac7f12dbed89f5ac44
| 699,243
|
def filter_errors(seq, errors):
"""Helper for filter_keys.
Return singleton list of error term if only errors are in sequence; else
return all non-error items in sequence.
Args
seq: list of strings
errors: dict representing error values
Returns
List of filtered items.
"""
key_err, val_err = errors['key'], errors['val']
key_err_str, val_err_str = errors['key_str'], errors['val_str']
return ([key_err_str] if set(seq) == set([key_err]) else
[val_err_str] if set(seq) == set([val_err]) else
[s for s in seq if s not in (key_err, val_err)])
|
93f44cd933e48974c393da253b7f37057ea7de19
| 699,244
|
def _offset(x, y, xoffset=0, yoffset=0):
"""Return x + xoffset, y + yoffset."""
return x + xoffset, y + yoffset
|
a6c88b2ee3172e52f7a538d42247c0fdc16352f8
| 699,245
|
def filter_to_be_staged(position):
"""Position filter for Experiment.filter() to include only worms that still need to be
stage-annotated fully."""
stages = [timepoint.annotations.get('stage') for timepoint in position]
# NB: all(stages) below is True iff there is a non-None, non-empty-string
# annotation for each stage.
return not all(stages) or stages[-1] != 'dead'
|
df771b0856c91c8663ad06cacf86bf3389df04c5
| 699,246
|
import sys
def import_module(module_name):
"""
Improt module and return it.
"""
__import__(module_name)
module = sys.modules[module_name]
return module
|
5b74e1c4f0442b8bf37a3e4145d212698fd49c60
| 699,247
|
def leja_growth_rule(level):
"""
The number of samples in the 1D Leja quadrature rule of a given
level. Most leja rules produce two point quadrature rules which
have zero weight assigned to one point. Avoid this by skipping from
one point rule to 3 point rule and then increment by 1.
Parameters
----------
level : integer
The level of the quadrature rule
Return
------
num_samples_1d : integer
The number of samples in the quadrature rule
"""
if level == 0:
return 1
return level+2
|
b93fe05a3bf9b8c016b92a00078dce8ef156a8c4
| 699,248
|
def nastran_replace_inline(big_string, old_string, new_string):
"""Find a string and replace it with another in one big string.
In ``big_string`` (probably a line), find ``old_string`` and replace
it with ``new_string``. Because a lot of Nastran
is based on 8-character blocks, this will find the 8-character block
currently occupied by ``old_string`` and replace that entire block
with ``new_string``.
big_string, old_string, new_string: str
"""
index = big_string.find(old_string)
block = index / 8
return big_string[:8*block] + new_string.ljust(8) + \
big_string[8*block+8:]
|
f9955d40a74fc57674e79dd6aeea3872ffc2c87d
| 699,249
|
import re
def does_support_ssl(ip):
"""Check if IP supports SSL.
Has aba sequence outside of the bracketed sections and corresponding
bab sequence inside bracketed section.
Examples:
- aba[bab]xyz supports SSL (aba outside square brackets with
corresponding bab within square brackets).
- xyx[xyx]xyx does not support SSL (xyx, but no corresponding yxy).
- aaa[kek]eke supports SSL (eke in supernet with corresponding kek in
hypernet; the aaa sequence is not related, because the interior
character must be different).
- zazbz[bzb]cdb supports SSL (zaz has no corresponding aza, but zbz has
a corresponding bzb, even though zaz and zbz overlap).
"""
regex = r'''(\w)(\w)\1 # aba sequence
[a-z]* # separating characters
(\[[a-z]+\][a-z]+)* # zero or more bracketed sections
# followed by characters
\[[a-z]*\2\1\2[a-z]*\] # bracketed bab
'''
regex2 = r'''\[[a-z]*(\w)(\w)\1[a-z]*\] # bracketed aba
([a-z]+\[[a-z]+\])* # zero or more bracketed sections
[a-z]* # separating characters
\2\1\2 # bab sequence
'''
has_sequence = (re.search(regex, ip, re.VERBOSE)
or re.search(regex2, ip, re.VERBOSE))
if has_sequence:
return has_sequence.group(1) != has_sequence.group(2)
return False
|
f46dda13d53717352fe27446d9fa3d0292d75c26
| 699,250
|
import six
import json
def meta_serialize(metadata):
"""
Serialize non-string metadata values before sending them to
Nova.
"""
return dict((key, (value if isinstance(value,
six.string_types)
else json.dumps(value))
) for (key, value) in metadata.items())
|
a2ef8762c9d1b3d78dc401996392df0985c1c226
| 699,251
|
def keep_merge(walkers, keep_idx):
"""Merge a set of walkers using the state of one of them.
Parameters
----------
walkers : list of objects implementing the Walker interface
The walkers that will be merged together
keep_idx : int
The index of the walker in the walkers list that will be used
to set the state of the new merged walker.
Returns
-------
merged_walker : object implementing the Walker interface
"""
weights = [walker.weight for walker in walkers]
# but we add their weight to the new walker
new_weight = sum(weights)
# create a new walker with the keep_walker state
new_walker = type(walkers[0])(walkers[keep_idx].state, new_weight)
return new_walker
|
b029bd14cab4aec2cdf8edf27a9d63c2ffbbe61e
| 699,253
|
def get_md5_checksum_from_response(upload_response):
"""
Comparing the checksum of the local file with the returned checksum/Etag
stored in the upload response.
"""
etag = upload_response.headers.get('Etag', '')
upload_checksum = etag.replace('"', '')
return upload_checksum
|
ffb9287066aed803f79a406224c54d9035e57fc3
| 699,254
|
def truncate_string(s, length=200, end="…"):
""" A simplified version of Jinja2's truncate filter. """
if len(s) <= length:
return s
result = s[: length - len(end)].rsplit(" ", 1)[0]
return result + end
|
4d97b31d0b9e6ee9eb0c14be88318271ebd1b3bb
| 699,255
|
import re
import string
def remove_more_punct(text):
"""
Parameters
----------
text : str
String containing text we want to lowercase, remove puncuation, and tokenize.
Returns
-------
Tokenized list of words from passed string.
"""
punct = set(string.punctuation)
text = text.lower()
text = "".join([c for c in text if c not in punct])
text = re.sub(r"""[()\’°""#/@;¢€:£<“>{}«®`©”+=~‘|.!?,]""", "", text)
text = re.sub(r'/[^a-zA-Z]',"",text)
text = ' '.join(text.split())
return text
|
992824627b35ef8a572d4e7e722cc2385cbd22c4
| 699,256
|
def is_nan(var) -> bool:
"""
Simple check if a variable is a numpy NaN
based on the simple check where (nan is nan) gives True but (nan == nan) gives False
"""
return not var == var
|
0f91829f9b5ee3dfb22944d956066069b4a3f606
| 699,259
|
def load_instances(model, primary_keys):
"""Load model instances preserving order of keys in the list."""
instance_by_pk = model.objects.in_bulk(primary_keys)
return [instance_by_pk[pk] for pk in primary_keys]
|
21f26f66047fc2b2871f77447035ba15fbf78fa7
| 699,260
|
def _merge_ns(base, override):
"""we can have several namespaces in properties and in call"""
new_ns = {}
for ns in base:
new_ns[ns] = base[ns]
for ns in override:
new_ns[ns] = override[ns]
return new_ns
|
935a5703b42c92104bff543da57183895cd11bf2
| 699,262
|
def GetHotlistRoleName(effective_ids, hotlist):
"""Determines the name of the role a member has for a given hotlist."""
if not effective_ids.isdisjoint(hotlist.owner_ids):
return 'Owner'
if not effective_ids.isdisjoint(hotlist.editor_ids):
return 'Editor'
if not effective_ids.isdisjoint(hotlist.follower_ids):
return 'Follower'
return None
|
0c03460d3e4190d4964a60a2753d31e1732b6e44
| 699,263
|
from unittest.mock import Mock
from unittest.mock import patch
def mock_gateway_fixture(gateway_id):
"""Mock a Tradfri gateway."""
def get_devices():
"""Return mock devices."""
return gateway.mock_devices
def get_groups():
"""Return mock groups."""
return gateway.mock_groups
gateway_info = Mock(id=gateway_id, firmware_version="1.2.1234")
def get_gateway_info():
"""Return mock gateway info."""
return gateway_info
gateway = Mock(
get_devices=get_devices,
get_groups=get_groups,
get_gateway_info=get_gateway_info,
mock_devices=[],
mock_groups=[],
mock_responses=[],
)
with patch("homeassistant.components.tradfri.Gateway", return_value=gateway), patch(
"homeassistant.components.tradfri.config_flow.Gateway", return_value=gateway
):
yield gateway
|
6404cb18ce9dd0e97b33958958d7851ee5739bed
| 699,264
|
import re
def create_contents(text, obsidian=False):
"""create a contents table from parsed headers
obsidian flag indicates single file mode is being used (see file_converter) and markdown format over PDF"""
if obsidian:
contents = "## Contents"
else:
contents = "# Contents"
headers = re.findall(r'(\#+ .*)', text)
for header in headers:
level = header.count('#')
#get index of header name in string
name_index = header.index("# ")
header_content = header[name_index + 2:]
#make header link
#if it's for obsidian, use internal link and don't replace whitespace
if obsidian:
header_link = "[[#" + header_content + "]]"
else:
header_link = "[" + header_content + "](#" + header_content.lower().replace(" ", "-") + ")"
contents += "\n"
contents += " " * (level - 1)
contents += "- "
contents += header_link
return contents
|
8011d0e2118497917e99a217fb7bfa025658cc35
| 699,265
|
import argparse
def parse_args():
"""Process input arguments"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('vcf', metavar='V', help="Input VCF file")
parser.add_argument('--meta', '-m', default='',
help="File giving strain details to convert strain names to \
standardized names used in VCF")
parser.add_argument('--filter', '-f', default=0.0, type=float,
help="Filter to genotypes at frequencies lower than the given level")
return parser.parse_args()
|
b047d53e73f882f43bc4cde773273817c7420fef
| 699,266
|
from typing import Any
def function_sig_key(
name: str,
arguments_matter: bool,
skip_ignore_cache: bool,
*args: Any,
**kwargs: Any,
) -> int:
"""Return a unique int identifying a function's call signature
If arguments_matter is True then the function signature depends on the given arguments
If skip_ignore_cache is True then the ignore_cache kwarg argument is not counted
in the signature calculation
"""
function_sig = name
if arguments_matter:
for arg in args:
function_sig += str(arg)
for argname, value in kwargs.items():
if skip_ignore_cache and argname == 'ignore_cache':
continue
function_sig += str(value)
return hash(function_sig)
|
bfec647db5d819fb87cf3a227927acd5cd6dda10
| 699,267
|
def tenary_pass_through(*args):
"""Pass-through method for a list of argument values.
Parameters
----------
args: list of scalar
List of argument values.
Returns
-------
scalar
"""
return args
|
edbf7123bb2088b51aa27e72e0007c2f30f24d7b
| 699,268
|
import numpy
def scores_vs_time(timeseries, numerator = 'fractionmatched'):
"""Process a timeseries as read by load_trajectory and return the fraction of each reference atom type found at each time.
Parameters
----------
trajectory : dict
Trajectory information as output by load_trajectory
Returns
-------
time_fractions : dict
Dictionary of NumPy arrays, keyed by reference type.
The full score across all types is under `all`.
'all' is from the total list if available or calculated from other references
"""
# How many iterations are present?
max_its = numpy.max([k for k in timeseries])
# Retrieve keys of all reference types
reftypes = set()
for it in timeseries:
for reftype in timeseries[it]:
if reftype not in reftypes:
reftypes.add(reftype)
# Allocate storage
time_fractions = {}
time_fractions['all'] = numpy.zeros( max_its, float)
for reftype in reftypes:
time_fractions[reftype] = numpy.zeros( max_its, float)
# Update with data
for it in range(max_its):
# Update reference types occuring at this iteration
denom = 0
numer = 0
for reftype in reftypes:
if reftype in timeseries[it]:
try:
time_fractions[reftype][it] = timeseries[it][reftype]['fraction']
except KeyError:
print("Can't find key set %s, %s, %s for timeseries." % (it, reftype, 'fraction'))
print("Available keys:", timeseries[it][reftype])
denom += timeseries[it][reftype]['denominator']
numer += timeseries[it][reftype][numerator]
# Any reference type which does not appear at this time point has zero matches so we just leave the value at zero
# Handle 'all' case last
if time_fractions['all'][it] == 0:
time_fractions['all'][it] = numer/float(denom)
return time_fractions
|
17ec9d243425366ae92b15c0d4894379a9d8f2dc
| 699,269
|
import time
def date_file_name(name):
"""Generate file name with date suffix."""
time_str = time.strftime("%Y%m%d-%H%M%S")
return name + "_" + time_str
|
40e37e2a5dbc0208421f797bc2ae349414b05e4e
| 699,271
|
def compute_efficiency_gap(partition):
"""
Input is a gerrychain.Partition object
with voteshare and population data.
It is assumed that `add_population_data` and
`add_voteshare_data` have been called on
the given partition's GeoDataFrame.
Returns the efficiency gap of
the districting plan from the GOP perspective:
(gop_wasted_votes - dem_wasted_votes) / total_votes.
Reference: Stephanopoulos and McGhee.
"Partisan gerrymandering and the efficiency gap." 2015.
"""
k = len(partition.parts)
gop_wasted_votes = 0
dem_wasted_votes = 0
for i in range(1, k + 1):
gop_votes = partition['gop_votes'][i]
dem_votes = partition['dem_votes'][i]
if gop_votes > dem_votes:
gop_wasted_votes += gop_votes - 0.5 * (gop_votes + dem_votes)
dem_wasted_votes += dem_votes
else:
gop_wasted_votes += gop_votes
dem_wasted_votes += dem_votes - 0.5 * (gop_votes + dem_votes)
# Using total population as a proxy for total_votes for simplicity
total_votes = partition.graph.data.population.sum()
return (gop_wasted_votes - dem_wasted_votes) / total_votes
|
2982fe1b2f570c8eca27fade8915e9b117397cd1
| 699,272
|
import requests
import json
def updateData(url):
"""
Returns the symbol and the price of an asset.
Parameters
-----------
url: url used for information retrieval
Returns
-----------
tuple
(symbol name, symbol price)
"""
res = requests.get(url);
text = res.text;
text = text[5:];
jsonData = json.loads(text);
symbolName = jsonData["PriceUpdate"][0][0][0][17][1];
symbolPrice = jsonData["PriceUpdate"][0][0][0][17][4];
print('{0} - {1}'.format(symbolName, symbolPrice));
return (symbolName, symbolPrice);
|
b458dee9da7700f5c4f82a182724bba007c7a05f
| 699,273
|
def formatPublisher(publisher):
"""The publisher's name. """
return publisher
|
713cede34a67351d249439d83982331c60b26dcc
| 699,274
|
import os
import sys
def get_file_path(file):
""" Get file path of file
:param file: the filename
"""
return os.path.join(os.path.dirname(sys.argv[0]), "res", file)
|
015aa3a1b5d7c101c711bee23017cd926d6c9cdb
| 699,275
|
def has_phrase(message: str, phrases: list):
""" returns true if the message contains one of the phrases
e.g. phrase = [["hey", "world"], ["hello", "world"]]
"""
result = False
for i in phrases:
# if message contains it
i = " ".join(i)
if i in message:
result = True
return result
|
f56fcc261e21c538f1f23b811e14ae29d92f3038
| 699,276
|
from typing import Optional
def substringBetween(s: str, left: str, right: str) -> Optional[str]:
"""Returns substring between two chars. Returns"""
try:
return (s.split(left))[1].split(right)[0]
except IndexError:
return None
|
5cda5bc57fe1f65053dffb047bef0b34e0a39a2b
| 699,277
|
def check_output(solution: str, output: str) -> bool:
"""Check if program's output is correct.
Parameters
----------
solution
Sample output taken from BOJ.
output
Output from program run.
Returns
-------
bool
True if correct, False if wrong.
"""
solution_lines = [x.rstrip() for x in solution.rstrip().split("\n")]
output_lines = [x.rstrip() for x in output.rstrip().split("\n")]
return all([x == y for x, y in zip(solution_lines, output_lines)])
|
66d35952d892842bd1b47329bfa7b77e0a878a51
| 699,278
|
from typing import Sequence
def bit_array_to_int(bit_array: Sequence[int]) -> int:
"""
Converts a bit array into an integer where the right-most bit is least significant.
:param bit_array: an array of bits with right-most bit considered least significant.
:return: the integer corresponding to the bitstring.
"""
output = 0
for bit in bit_array:
output = (output << 1) | bit
return output
|
d902a8e3db7b65ad348ef86cbfac371361aedd58
| 699,279
|
def clip_args(func, arg1, arg2, bounds=(0., 1.)):
"""
Clip the arguments to bounds
Return the results of the function where the clipped arguments have been used.
Arguments below the lower bound are set to the lower bound and the arguments
above the upper bound are set to the upper bound.
Parameters
----------
func : func(arg1,arg2)
The function which the clipped arguments are passed to
arg1 : ndarray
1D array with floats.
arg2 : ndarray.
1D array with floats.
bounds : tuple, optional
The bounds that the arguments are limited to.
Returns
-------
clipped_func : func(arg1_clipped,arg2_clipped)
The results of the function where the clipped agruments have been applied.
Notes
-----
This function does not do any type of typechecking
"""
upper_bound = bounds[1]
lower_bound = bounds[0]
arg1_inbound = arg1.copy()
arg2_inbound = arg2.copy()
arg1_inbound[arg1 < lower_bound] = lower_bound
arg1_inbound[arg1 > upper_bound] = upper_bound
arg2_inbound[arg2 < lower_bound] = lower_bound
arg2_inbound[arg2 > upper_bound] = upper_bound
return func(arg1_inbound, arg2_inbound)
|
249b5fed30fd050da570ef5276f826c071457935
| 699,280
|
import math
def euclidean_distance(p1, p2):
"""Calculate the euclidean distance of two 2D points.
>>> euclidean_distance({'x': 0, 'y': 0}, {'x': 0, 'y': 3})
3.0
>>> euclidean_distance({'x': 0, 'y': 0}, {'x': 0, 'y': -3})
3.0
>>> euclidean_distance({'x': 0, 'y': 0}, {'x': 3, 'y': 4})
5.0
"""
return math.sqrt((p1["x"] - p2["x"]) ** 2 + (p1["y"] - p2["y"]) ** 2)
|
8cdc0e0534b2ed9272832fc0f977b9aa0e594c65
| 699,281
|
import re
def read_raw_dictfile(filename, language):
"""Read from unformatted dictfile: eliminate extra spaces and parens,
insert a tab. Return a list of word,pron pairs."""
S = []
with open(filename) as f:
for line in f:
words = re.split(r"\s+", line.rstrip())
S.append((words[0], words[1:]))
return S
|
a8c95c87551119a89bd2c686be35b3a3aba3f567
| 699,282
|
import zlib
def zlib_handler(method):
"""
Prepare hash method and seed depending on CRC32/Adler32.
:param method: "crc32" or "adler32".
:type method: str
"""
hashfunc = zlib.crc32 if method == "crc32" else zlib.adler32
seed = 0 if method == "crc32" else 1
return hashfunc, seed
|
46e9d91e5ebe92f3b5b24f138ae693368128ce09
| 699,283
|
def hex_dump(string):
"""Dumps data as hexstrings"""
return ' '.join(["%0.2X" % ord(x) for x in string])
|
7c0286c57387c5b8f6a79e2a9350eb54e480465a
| 699,284
|
def get_operands_dtype(operands):
"""
Return the data type name of the tensors.
"""
dtype = operands[0].dtype
if not all(operand.dtype == dtype for operand in operands):
dtypes = set(operand.dtype for operand in operands)
raise ValueError(f"All tensors in the network must have the same data type. Data types found = {dtypes}.")
return dtype
|
cda67513ea8ca9f7ea663d232a5358fad8f8597d
| 699,286
|
def get_j(xi, j=0):
"""
得到小数定标标准化的j
"""
if xi / float(10 ^ j) <= 1:
return j
else:
return get_j(xi, j+1)
|
975a74d70ceeba80debfadf171f6c32316cd7d7b
| 699,287
|
def get_shortest_path_waypoints_old(planner, origin, destination):
"""Return a list of waypoints along a shortest-path
Uses A* planner to find the shortest path and returns a list of waypoints.
Useful for trajectory planning and control or for drawing the waypoints.
Args:
planner: carla.macad_agents.navigation's Global route planner object
origin (tuple): Origin (x, y) position on the map
destination (tuple): Destination (x, y) position on the map
Returns:
A list of waypoints connecting the origin and the destination on the map
along the shortest path.
"""
graph, xy_id_map = planner.build_graph()
path = planner.path_search(origin, destination)
xy_list = []
for node in path:
xy_list.append(graph.nodes[node]["vertex"])
return xy_list
|
bb6cfdeb139c6c10cbd4fd8c4d0dc31083ecc2d6
| 699,288
|
def delete_container(client, resource_group_name, name, **kwargs):
"""Delete a container group. """
return client.delete(resource_group_name, name)
|
d69c3fbd89ce684942af4980ca9931908b12db6a
| 699,289
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.