content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def render_generic_exception(e):
"""
Return a formatted string for a generic exception.
Parameters:
e: the exception to render
"""
return 'Details: {0}'.format(e) | 7e8f39145ba768d196c9a5ef094de706b7c57ad2 | 38,926 |
import os
def getRecordIdFromFilename(Filename):
"""
Get the record ID from a filename. The record ID is the filename without
the three character extension.
"""
name, _ = os.path.splitext(Filename)
return name | ed5759072bcda5dd5f9affd52376044aea0105c1 | 38,928 |
import os
def get_class_types_from_data(TRAIN_DATA_PATH):
"""
Megkapjuk a tanitohalmaz eleresi utvonalat, innen kinyerjuk
hogy hany es milyen osztalyaink vannak
return: osztalyok neveit tartalmazo n elemu lista
"""
classes = []
files = os.listdir(TRAIN_DATA_PATH)
for f in files:
if os.path.isdir(TRAIN_DATA_PATH + f):
classes.append(f)
return classes | a43ff6f38df426554749019f722ea8300950bb2b | 38,929 |
def make_crc_tab_fwd(poly):
""" table starting with 0, poly """
def calcentry(v, poly):
res = v<<24
for _ in range(8):
if res&0x80000000:
res = (res<<1) ^ poly
else:
res = (res<<1)
return res&0xFFFFFFFF
return [ calcentry(byte, poly) for byte in range(256) ] | 9ef058691b8d47be54010a7952d8b9c320bc14b4 | 38,930 |
def PyObject_SetItem(space, w_obj, w_key, w_value):
"""Map the object key to the value v. Returns -1 on failure. This is the
equivalent of the Python statement o[key] = v."""
space.setitem(w_obj, w_key, w_value)
return 0 | a77f23bc803090027fdf8249404f10200fe0992a | 38,931 |
def Claret_LD_law(mu, c1, c2, c3, c4):
"""
Claret 4-parameter limb-darkening law.
"""
I = (1 - c1*(1 - mu**0.5) - c2*(1 - mu) - c3*(1 - mu**1.5) - c4*(1 - mu**2)) * mu
return I | 535136e560dba26da012b9d13d7edcc3b507ec54 | 38,933 |
def conv_ext_to_proj(P):
"""
Extended coordinates to projective
:param P:
:return:
"""
return P[0], P[1], P[2] | 609949901b10ce3c3a39d2474c8f31e2d5bf305f | 38,934 |
def _ExtractKeyValuePairsFromLabelsMessage(labels):
"""Extracts labels as a list of (k, v) pairs from the labels API message."""
labels = [] if labels is None else labels
return [(label.key, label.value) for label in labels] | 3e26f60594bb1050cf5c701836754a31ce3defdd | 38,935 |
def get_run_name_nr(_run_name, _run_nr):
"""
:param _run_name: [str], e.g. 'runA'
:param _run_nr: [int], e.g. 1
:return: _run_name_nr: [str], e.g. 'runA-1'
"""
return f"{_run_name}-{_run_nr}" | e0428fd478509301f9623b83316bf058f69561f1 | 38,936 |
def merge_json(data1, data2):
"""merge lists in two json data together
Args:
data1 (json or None): first json data
data2 (json): 2nd json data
Returns:
TYPE: merged data
"""
if not data1:
return data2
else:
for i in data2['list']:
data1['list'][i] = data2['list'][i]
return data1 | 613b931a6d83ef65be9fada1a99d34984892bdd0 | 38,937 |
import random
def get_players_names(nb_player):
"""
Pick random names for the players in a pre-defined list of names.
:param nb_player: number of players in need of a name.
:return
"""
name_list = [
"Bobby",
"Johny",
"Suzan",
"Karen",
"Lauren",
"Anthony",
"Isa",
"Matthew",
"Pablo",
"Sofia",
]
if nb_player > len(name_list):
print(
"To many players not enough names, please choose a number of "
"players inferior to {} or add new names in the name_list of the "
"function"
"'get_players_names' in the file 'utils/game_utils.py".format(
len(name_list)
)
)
player_name = random.sample(name_list, nb_player)
return player_name | c833951cb8bd2a50432ab82a2f749a33817f4d5e | 38,938 |
def get_next_arguments(action, type="input"):
"""
Get a tuple of required/nonrequired inputs or outputs for each method
Parameters
----------
action : Qiime2.action
type : {"input", "param", "output"}
Delineates if getting the action input, param, or output types
Returns
-------
List of tuples containing name and required semantic types
List of tuples containing name and optional semantic types
"""
req = []
non_req = []
if type == "input":
for k, v in action.signature.inputs.items():
if not v.has_default():
req.append([k, v.qiime_type])
else:
non_req.append(["."+k, v.qiime_type])
elif type == "param":
for k, v in action.signature.parameters.items():
if not v.has_default():
req.append([k, v.qiime_type])
else:
non_req.append(["."+k, v.qiime_type])
else:
for k, v in action.signature.outputs.items():
if not v.has_default():
req.append([k, v.qiime_type])
else:
non_req.append(["."+k, v.qiime_type])
return req, non_req | db74dc93dfb3f52f3bdfeff785ae4a29ad31d2f2 | 38,940 |
from distutils.spawn import find_executable
def get_bzipper():
"""Check whether we can parallize bzip2"""
return "pbzip2" if find_executable("pbzip2") else "bzip2" | 82942b2c6c9de02b4b3a2ccdcb66b0bde3ec6b7b | 38,942 |
import random
def miller_rabin(n, k):
"""Run the Miller-Rabin test on n with at most k iterations
Arguments:
n (int): number whose primality is to be tested
k (int): maximum number of iterations to run
Returns:
bool: If n is prime, then True is returned. Otherwise, False is
returned, except with probability less than 4**-k.
See <https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test>
"""
assert n > 3
# find r and d such that n-1 = 2^r × d
d = n-1
r = 0
while d % 2 == 0:
d //= 2
r += 1
assert n-1 == d * 2**r
assert d % 2 == 1
for _ in range(k): # each iteration divides risk of false prime by 4
a = random.randint(2, n-2) # choose a random witness
x = pow(a, d, n)
if x == 1 or x == n-1:
continue # go to next witness
for _ in range(1, r):
x = x*x % n
if x == n-1:
break # go to next witness
else:
return False
return True | 2b1ca6fd03c40bef2650102e690fbc270c01972c | 38,944 |
def joiner(text):
"""
Simple function to join a list together into one string
"""
string = (' ').join(text)
return string | 6d50be365de6899801497b66a7fa79f9c3dc764e | 38,947 |
def imatch(a: str, b: str) -> bool:
"""
return True if the given strings are identical (without regard to case)
"""
return a.lower() == b.lower() | 449f3337e04173d8e62b755ecbc54686809431b0 | 38,948 |
import argparse
def create_tc_arg_parser():
"""Add default command line arguments for all App Engine apps."""
parser = argparse.ArgumentParser()
parser.add_argument('--api_access_id', help='API Access ID', required=True)
parser.add_argument('--api_secret_key', help='API Secret Key', required=True)
parser.add_argument('--api_default_org', help='API Default Org', required=True)
parser.add_argument('--tc_log_path', help='ThreatConnect log path', default='/log')
parser.add_argument('--tc_temp_path', help='ThreatConnect temp path', default='/tmp')
parser.add_argument('--tc_out_path', help='ThreatConnect output path', default='/out')
parser.add_argument('--tc_api_path', help='ThreatConnect api path',
default='https://api.threatconnect.com')
return parser | e4b257e873c481471c7a7ab9c4dde7f5e87f1729 | 38,951 |
import sys
def load_evaluation_where_filenames(experiment):
"""
Parameters
----------
experiment: Identifier of experiment for evaluation
Returns the path and name of the dataset and a list with the file names of the results
-------
"""
where_file_names = {}
# Experiments NAE-IAW
if experiment == '1Dim_Broken_NAE-IAW':
where_file_names["FILE_NAME_re_all"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_1MinDimBroken_300MinL_2000MaxL_2021-08-31_13.25.pickle_2021-09-01_14.58_WHERE_ALL_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
where_file_names["FILE_NAME_re_old_pattern"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_1MinDimBroken_300MinL_2000MaxL_2021-08-31_13.25.pickle_2021-09-01_14.58_WHERE_OLDPATTERN_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
drift_dims = slice(40, 41)
elif experiment == '10Dim_Broken_NAE-IAW':
where_file_names["FILE_NAME_re_all"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_10MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-08-31_16.50_WHERE_ALL_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
where_file_names["FILE_NAME_re_old_pattern"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_10MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-08-31_16.50_WHERE_OLDPATTERN_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
drift_dims = slice(40, 50)
elif experiment == '50Dim_Broken_NAE-IAW':
where_file_names["FILE_NAME_re_all"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_50MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-08-31_17.03_WHERE_ALL_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
where_file_names["FILE_NAME_re_old_pattern"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_50MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-08-31_17.03_WHERE_OLDPATTERN_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
drift_dims = slice(40, 90)
elif experiment == '50Dim_LongAETraining_Broken_NAE-IAW':
where_file_names["FILE_NAME_re_all"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_50MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-09-01_15.26_WHERE_ALL_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
where_file_names["FILE_NAME_re_old_pattern"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_50MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-09-01_15.26_WHERE_OLDPATTERN_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
drift_dims = slice(40, 90)
elif experiment == '50Dim_BigBatchTraining_Broken_NAE-IAW':
where_file_names["FILE_NAME_re_all"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_50MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-09-01_15.40_WHERE_ALL_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
where_file_names["FILE_NAME_re_old_pattern"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_50MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-09-01_15.40_WHERE_OLDPATTERN_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
drift_dims = slice(40, 90)
elif experiment == '50Dim_NewDesign_Broken_NAE-IAW':
where_file_names["FILE_NAME_re_all"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_50MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-09-02_14.42_WHERE_ALL_fitNewAETrue_fitFalse_initNewADWINTrue_feedNewADWINFalse"
where_file_names["FILE_NAME_re_old_pattern"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_50MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-09-02_14.42_WHERE_OLDPATTERN_fitNewAETrue_fitFalse_initNewADWINTrue_feedNewADWINFalse"
drift_dims = slice(40, 90)
elif experiment == '100Dim_Broken_NAE-IAW':
where_file_names["FILE_NAME_re_all"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_100MinDimBroken_300MinL_2000MaxL_2021-08-31_13.27.pickle_2021-08-31_16.00_WHERE_ALL_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
where_file_names["FILE_NAME_re_old_pattern"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_100MinDimBroken_300MinL_2000MaxL_2021-08-31_13.27.pickle_2021-08-31_16.00_WHERE_OLDPATTERN_fitNewAETrue_fitFalse_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
drift_dims = slice(0, 100)
# Experiments RAE-IAW
elif experiment == '1Dim_Broken_RAE-IAW':
where_file_names["FILE_NAME_re_all"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_1MinDimBroken_300MinL_2000MaxL_2021-08-31_13.25.pickle_2021-08-31_13.37_WHERE_ALL_fitNewAEFalse_fitTrue_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
where_file_names["FILE_NAME_re_old_pattern"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_1MinDimBroken_300MinL_2000MaxL_2021-08-31_13.25.pickle_2021-08-31_13.37_WHERE_OLDPATTERN_fitNewAEFalse_fitTrue_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
drift_dims = slice(40, 41)
elif experiment == '10Dim_Broken_RAE-IAW':
where_file_names["FILE_NAME_re_all"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_10MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-08-31_13.55_WHERE_ALL_fitNewAEFalse_fitTrue_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
where_file_names["FILE_NAME_re_old_pattern"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_10MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-08-31_13.55_WHERE_OLDPATTERN_fitNewAEFalse_fitTrue_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
drift_dims = slice(40, 50)
elif experiment == '50Dim_Broken_RAE-IAW':
where_file_names["FILE_NAME_re_all"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_50MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-08-31_14.02_WHERE_ALL_fitNewAEFalse_fitTrue_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
where_file_names["FILE_NAME_re_old_pattern"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_50MinDimBroken_300MinL_2000MaxL_2021-08-31_13.26.pickle_2021-08-31_14.02_WHERE_OLDPATTERN_fitNewAEFalse_fitTrue_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
drift_dims = slice(40, 90)
elif experiment == '100Dim_Broken_RAE-IAW':
where_file_names["FILE_NAME_re_all"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_100MinDimBroken_300MinL_2000MaxL_2021-08-31_13.27.pickle_2021-08-31_15.50_WHERE_ALL_fitNewAEFalse_fitTrue_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
where_file_names["FILE_NAME_re_old_pattern"] = "SAW_Autoencoder_ADWIN_Training_RandomNumpyRandomNormalUniform_1DR_100Dims_100MinDimBroken_300MinL_2000MaxL_2021-08-31_13.27.pickle_2021-08-31_15.50_WHERE_OLDPATTERN_fitNewAEFalse_fitTrue_singleGDFalse_initNewADWINTrue_feedNewADWINFalse"
drift_dims = slice(0, 100)
else:
print("Error: Experiment identifier does not exist!")
sys.exit()
return where_file_names, drift_dims | c6f1ed5aac939c621b6c7bd50cae2221ed26fd7c | 38,952 |
def get_books_by_years(start_year, end_year, books_list):
"""
Get a dictionary of books and their published based on a range of year
Parameters:
start_year: The lower bound of the search range.
end_year: The upper bound of the search range.
books_list: The list of books to search in
Returns:
books_with_years: A dictionary of books with their publised year.
"""
print("You search for books published from " + str(start_year) + " to " + str(end_year))
books_with_years = {}
for book in books_list:
if book["published_year"] >= int(start_year) and book["published_year"] <= int(end_year):
books_with_years[book["title"]] = book["published_year"]
return books_with_years | 72a5fbf0c82eb8353176da0263aab58289e4c782 | 38,953 |
import argparse
import sys
def parse_args(path):
"""
Parse input arguments
"""
parser = argparse.ArgumentParser('Training.py')
parser.add_argument('--TrainFile', dest='TrainFile', type=str,
default = path+'/dataset/tf/train.tfrecords',
help='Please enter train tfrecord file')
parser.add_argument('--ValidFile', dest='ValidFile', type=str,
default = path+'/dataset/tf/valid.tfrecords',
help='Please enter valid tfrecord file')
parser.add_argument('--ModelDir', dest='ModelDir', type=str,
default = path+'/logs',
help='(optimal) folder to store models')
## parameters for network
parser.add_argument('--ClassNum', dest='ClassNum', type=int,
default= 38,
help='(optimal) class number, default is 38')
parser.add_argument('--BatchSize', dest='BatchSize', type=int,
default= 64,
help='(optimal) batch size, default is 64')
parser.add_argument('--ValidNum', dest='ValidNum', type=int,
default= 300,
help='(optimal) valid image number, default is 300')
parser.add_argument('--TrainNum', dest='TrainNum', type=int,
default= 111244,
help='(optimal) train image number, default is 300')
parser.add_argument('--PoolSize', dest='PoolSize', type=int,
default= 2,
help='(optimal) pool size, default is 2')
parser.add_argument('--NetworkType', dest='NetworkType', type=str,
default = 'BatNet',
help='(optimal) BatNet or VggNet')
parser.add_argument('--LearningRate', dest='LearningRate', type=float,
default= 1e-5,
help='(optimal) learning rate, default is 1e-5')
parser.add_argument('--MaxStep', dest='MaxStep', type=int,
default= 20,
help='(optimal) max train step, default is 100000')
parser.add_argument('--SaveStep', dest='SaveStep', type=int,
default= 0,
help='(optimal) max train step, default 0 means \
it is TrainNum divide BatchSize')
parser.add_argument('--TFImageWidth', dest='TFImageWidth', type=int,
default= 64,
help='(optimal) image width, default is 64')
parser.add_argument('--TFImageHeight', dest='TFImageHeight', type=int,
default= 64,
help='(optimal) image height, default is 64')
parser.add_argument('--Channel', dest='Channel', type=int,
default= 3,
help='(optimal) image channel, default is 3')
parser.add_argument('--MinAfterQueue', dest='MinAfterQueue', type=int,
default= 100,
help='(optimal) min after queue, default is 500')
parser.add_argument('--Keep', dest='Keep', type=int,
default= 100,
help='(optimal) keep rate, default is 100')
args = parser.parse_args()
if args.NetworkType not in ['BatNet', 'VggNet']:
print('Invalid feature: ', args.NetworkType)
print("Please input a action: BatNet or VggNet")
if args.ClassNum < 1:
print('\n-------------->')
print('Error! class number must be an integer that >= 1')
sys.exit(1)
if args.BatchSize < 1:
print('\n-------------->')
print('Error! batch size must be an integer that >= 1')
sys.exit(1)
if args.ValidNum < 1:
print('\n-------------->')
print('Error! valid image number must be an integer that >= 1')
sys.exit(1)
if args.TrainNum < 1:
print('\n-------------->')
print('Error! total train image number must be an integer that >= 1')
sys.exit(1)
if args.PoolSize < 1:
print('\n-------------->')
print('Error! pool size must be an integer that >= 1')
sys.exit(1)
if args.LearningRate > 1:
print('\n-------------->')
print('Error! pool size must be <= 1')
sys.exit(1)
if args.MaxStep < 1:
print('\n-------------->')
print('Error! max train step must be an integer that >= 1')
sys.exit(1)
if args.SaveStep < 0:
print('\n-------------->')
print('Error! max save step must be an integer that >= 1')
sys.exit(1)
if args.TFImageWidth < 1:
print('\n-------------->')
print('Error! image width must be an integer that >= 1')
sys.exit(1)
if args.TFImageHeight < 1:
print('\n-------------->')
print('Error! image height must be an integer that >= 1')
sys.exit(1)
if args.TFImageHeight < 1:
print('\n-------------->')
print('Error! image channel must be an integer, colorful image is 3')
sys.exit(1)
if args.MinAfterQueue < 1:
print('\n-------------->')
print('Error! min after queue be an integer that >= 1')
sys.exit(1)
if args.Keep < 1:
print('\n-------------->')
print('Error! keep rate be an integer that >= 1')
sys.exit(1)
return args | a4eb03f9c0ed6d27c31f31321af25353f7dbe85d | 38,954 |
def filter_records_by_victory(records: list) -> list:
"""Filter out records for a specific character"""
specific_records = []
for record in records:
if record["event"]["victory"]:
specific_records.append(record)
return specific_records | cd5d6ab22f03ca14edfc6aff19dadf3b3aefbfc7 | 38,955 |
import ftplib
def get_client(host, port, username, password):
"""
Attempts to create an FTP client at the specified hots with the
specified credentials
"""
try:
client = ftplib.FTP()
client.connect(host, int(port))
client.login(username, password)
return client
except Exception as e:
print(f'Error accessing the FTP server with the specified credentials'
f' {host}:{port} {username}:{password}')
raise(e) | a87be092e65c6b8a3ad2b3819edb33d0269e973f | 38,956 |
def join(*, left, right, how="inner", f=None):
"""SQL-like join of two dictionaries."""
assert isinstance(left, dict) and isinstance(right, dict)
if how == "left":
key = left.keys()
elif how == "right":
key = right.keys()
elif how == "inner":
key = left.keys() & right.keys()
else: # how == "outer"
key = left.keys() | right.keys()
def f_or_none(x):
return None if x is None else (x if not callable(f) else f(x))
return {k: (f_or_none(left.get(k)), f_or_none(right.get(k))) for k in key} | 47c4084b00deec71e9bdef1e546b67021558deb4 | 38,957 |
def sphire_header_magic(tag, special_keys=None):
"""
Returns the star_format key which is similar to sphire_format key
:param tag: key to be checked
:param special_keys: In case special keys are pass
:return: star_format key
"""
star_translation_dict = {
"ptcl_source_image": "_rlnMicrographName",
"phi": "_rlnAngleRot",
"theta": "_rlnAngleTilt",
"psi": "_rlnAnglePsi",
"voltage": "_rlnVoltage",
"cs": "_rlnSphericalAberration",
"bfactor": "_rlnCtfBfactor",
"ampcont": "_rlnAmplitudeContrast",
"apix": "_rlnDetectorPixelSize",
"tx": "_rlnOriginX",
"ty": "_rlnOriginY",
"_rlnMagnification": "_rlnMagnification",
"_rlnDefocusU": "_rlnDefocusU",
"_rlnDefocusV": "_rlnDefocusV",
"_rlnDefocusAngle": "_rlnDefocusAngle",
"_rlnCoordinateX": "_rlnCoordinateX",
"_rlnCoordinateY": "_rlnCoordinateY",
"ISAC_class_id": "_rlnClassNumber",
}
for value in list(star_translation_dict.values()):
star_translation_dict[value] = value
key_value = 0
try:
if tag in special_keys:
if tag == 'ctf':
pass
elif tag == 'xform.projection':
pass
elif tag == 'ptcl_source_coord':
pass
elif tag == 'xform.align2d':
pass
elif tag == 'data_path':
pass
else:
assert False, 'Missing rule for {}'.format(tag)
else:
key_value = star_translation_dict[tag]
except KeyError or TypeError:
return key_value
return key_value | 66ca0fd014bc93bd1ef2f1b1a1d07a30bd1874e3 | 38,958 |
def _clean_ylabel(feature, max_length=20):
"""
Replaces underscores with spaces and splits `feature` based on line length
Parameters
----------
feature : str
String to be cleaned
max_length : str
Maximum length (in characters) of each line. If `feature` is longer
than this length it will be split with a newline character. Default: 20
Returns
-------
feature : str
Cleaned input `feature`
"""
feature = feature.replace('_', ' ')
if len(feature) > max_length:
ylabel = feature.split(' ')
idx = len(ylabel) // 2
feature = '\n'.join([' '.join(ylabel[:idx]), ' '.join(ylabel[idx:])])
return feature | 31c2e57cc57c48f5940d2b57f0fce7aef0600e32 | 38,960 |
def get_text_from_file(fn):
"""
Function: getTodaysLines
Input: fn - filename to get lines from
Output: answer - a list of the lines (with training newlines) in fn where
a string representing today's date in the format yyyy-mm-dd
is present
Functionality: Get's the current day's lines out of a log file into a list
"""
with open(fn) as f:
all_ = f.read() # get all lines from file
return all_ | 1d3eb5fe39501187cf2375f40826183bf4767c82 | 38,961 |
def dfs_recursive(graph, start, visited=None):
"""
Recursive implementation of DFS.
"""
if visited is None:
visited = set()
visited.add(start)
for next in graph[start] - visited:
dfs_recursive(graph, next, visited)
return visited | a32828b225332f58f6ed81d3cf8fcc3d42c98fa1 | 38,962 |
from typing import Any
import re
def regex_method(term: str, key: str, value: Any) -> str:
""" Map file search method 'regex' will return 'value' if regex pattern 'term' matches 'key'
>>> regex_method(r"Hello (?:World|Werld|Squidward)", "Hello World", "squaids")
'squaids'
>>> regex_method("xxx", "Hello World", "squaids")
''
"""
return value if re.search(term, key) else "" | 3de6752d6f205c56c4086b13a17f60346069e92b | 38,964 |
def get_settings(args):
"""Determine the settings from the commandline arguments."""
settings = {
"analysis_directory": args.input,
"report_directory": args.output,
"tokens": args.tokens,
"language": args.language,
}
return settings | 55ac9010e289f5e7fb870d74b531578ddac4c36a | 38,966 |
def _arg_insert(args, arg, pos=0):
"""
Insert arg in args at given position.
:param tuple args: Some function arguments
:param any arg: Some function argument
:param int pos: Insert position. If None argument is appended.
:return: List with arguments where arg is inserted
:rtype: list
"""
args = list(args)
if pos is None:
args.append(arg)
else:
args.insert(pos, arg)
return args | badb8e55f630bda12effaa3467a7a1d37c231771 | 38,967 |
def GenerateSingleQueryParameter(name, param_type, param_value):
"""Generates a single valued named parameter.
Args:
name: name of the parameter.
param_type: Type of the parameter. E.g. STRING, INT64, TIMESTAMP, etc.
param_value: Value of this parameter.
"""
return {
'name': name,
'parameterType': {
'type': param_type
},
'parameterValue': {
'value': param_value
}
} | d7bd31ec80a73f627fcd3de08b77bf1a397aa69f | 38,968 |
def decode_utf8(text: bytes) -> str:
"""Decode `text` as UTF-8 string
Arguments:
bytes {text} -- ascii-encoded bytes
Returns:
str -- decoded text
"""
return text.decode('utf-8') | d9624f6a65d81b45192ff140bae245fb334ef2eb | 38,969 |
def _preprocess_segmentation_dict(segmentation_dict):
"""help to preprocess the indexes to list."""
final_dict = {}
for k in segmentation_dict:
final_dict[k] = [[]]
final_part_indexes = final_dict[k]
part_indexes = segmentation_dict[k]
part_indexes.sort()
for index in range(len(part_indexes)):
if len(final_part_indexes[-1]) == 0:
final_part_indexes[-1].append(part_indexes[index])
elif len(final_part_indexes[-1]) == 2:
final_part_indexes.append([part_indexes[index]])
elif len(final_part_indexes[-1]) == 1:
if index != len(part_indexes) - 1:
this_index = part_indexes[index]
last_index = part_indexes[index - 1]
next_index = part_indexes[index + 1]
if (this_index == last_index + 1) and (this_index
== next_index - 1):
pass
elif (this_index == last_index +
1) and (this_index != next_index - 1):
final_part_indexes[-1].append(this_index)
elif (this_index != last_index + 1) and (this_index !=
next_index - 1):
final_part_indexes.append([this_index])
final_part_indexes.append([])
elif (this_index !=
last_index + 1) and (this_index == next_index - 1):
final_part_indexes.append([this_index])
else:
this_index = part_indexes[index]
last_index = part_indexes[index - 1]
if (this_index == last_index + 1):
final_part_indexes[-1].append(this_index)
else:
final_part_indexes.append([this_index])
return final_dict | e3eea02380f98e53c848923ccc4b1261a68e3cc7 | 38,970 |
def DivideIfPossibleOrZero(numerator, denominator):
"""Returns the quotient, or zero if the denominator is zero."""
if not denominator:
return 0.0
else:
return numerator / denominator | a276130c4ec0319ae3c53757e620243610a9c564 | 38,973 |
import re
def _parse_proplist(data):
"""
Parse properties list.
"""
out = {}
for line in data.split("\n"):
line = re.split(r"\s+", line, 1)
if len(line) == 2:
out[line[0]] = line[1]
return out | 971b26aefab8d67cc7ee6ad66306e9f1e8de82d2 | 38,974 |
def ipset_to_cidr_list(ip_set_from):
"""Convert an IP Set into a list of cidrs.
Args:
ipSetFrom (IPSet): The IPSet to convert from
Returns:
list: List of IP CIDRs
"""
to_list = []
for cidr in ip_set_from.iter_cidrs():
to_list.append(str(cidr))
return to_list | d1df8efd10a47aecd0f240ede7a91ed2d4bcb28c | 38,975 |
def loadDataSet(fileName):
"""
函数说明:加载数据
Parameters:
fileName - 文件名
Returns:
dataMat - 数据矩阵
Website:
http://www.cuijiahua.com/
Modify:
2017-12-09
"""
dataMat = []
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = list(map(float, curLine)) #转化为float类型
dataMat.append(fltLine)
return dataMat | fb67621ab3ff3ac7c6ec8e2f00bc66384b9ec599 | 38,976 |
def _r_long(int_bytes):
"""Convert 4 bytes in little-endian to an integer."""
return int.from_bytes(int_bytes, 'little') | 160a4fcf1acb9831baed8f9ee9307359a9690965 | 38,977 |
def search_for_transcript(edge_IDs, transcript_dict):
""" Given the edge IDs (in set data structure) that make up a query
transcript, look for a match in the transcript dict.
Return gene ID and transcript ID if found, and None if not. """
try:
transcript = transcript_dict[edge_IDs]
gene_ID = transcript["gene_ID"]
return gene_ID, transcript
except:
return None, None | bbbf54e7a1c6c47d5fec7d2fcd424bd885b710bc | 38,978 |
def coinify(atoms):
"""
Convert the smallest unit of a coin into its coin value.
Args:
atoms (int): 1e8 division of a coin.
Returns:
float: The coin value.
"""
return round(atoms / 1e8, 8) | 3acc1771168ba7b990282ccfe3bb6ce3adfbdd7b | 38,979 |
def transform_input_data(data):
"""Transform input data dictionary into format ready to use with
model.predict"""
return {key: [value] for key, value in data.items()} | 8edf2d47f0bc009896950d6c78edbf3c9e8a5b37 | 38,980 |
def docstrings_disabled():
"""Returns True if docstrings are disabled (e.g. by using python -OO)"""
return docstrings_disabled.__doc__ is None | 9e7fed6a19d0f3e0378c363f999b9458fc1c517c | 38,982 |
def is_explicitly_view_dependent(df):
"""
:param df: pandas dataframe with "tokens" columns
:return: a boolean mask
"""
target_words = {'front', 'behind', 'back', 'right', 'left', 'facing', 'leftmost', 'rightmost',
'looking', 'across'}
return df.tokens.apply(lambda x: len(set(x).intersection(target_words)) > 0) | dd61bb868f5ad05f8740a22d0112b063b0f86ead | 38,983 |
import os
def read_coro_floor_from_txt(datadir):
"""
Read the coro floor as float from the output file in the data directory.
:param datadir: str, path to data directory
:return: coronagraph floor as float
"""
with open(os.path.join(datadir, 'coronagraph_floor.txt'), 'r') as file:
full = file.read()
return float(full[19:]) | 16dc98c63702eacd0ccf2492ccdfe14416ec991e | 38,985 |
def huffman_decoding(data, tree):
"""
Function to decode the data using the data and the tree created using the huffman_encoding function
:param data:
:param tree:
:return:
"""
decoded = ''
current_node = tree
for num in data:
if num == '0':
if current_node.left_child.char is None:
current_node = current_node.left_child
else:
decoded += current_node.left_child.char
current_node = tree
else:
if current_node.right_child.char is None:
current_node = current_node.right_child
else:
decoded += current_node.right_child.char
current_node = tree
return decoded | 1771d86e19dbb1f816cd0161dd0f8c2ec0790d8e | 38,986 |
import requests
import configparser
def conf_from_url(url):
"""Read conf file from an URL.
Parameters
----------
url : str
conf file url (in a repo, make sure the "raw" url is passed)
Returns
-------
conf object
"""
text = requests.get(url).text
config = configparser.ConfigParser()
config.read_string(text)
return config | 94eda1351b0ff38593bcc0ad485a8b43ad41fb09 | 38,987 |
def tslash(apath):
"""
Add a trailing slash (``/``) to a path if it lacks one.
It doesn't use ``os.sep`` because you end up in trouble on windoze, when you
want separators for URLs.
"""
if apath and apath != '.' and not apath.endswith('/') and not apath.endswith('\\'):
return apath + '/'
else:
return apath | a4935afcc0037457c4f1fc64551a38cb5163a323 | 38,989 |
def pad(msg, length, byte=b'\x00'):
"""
Padding for our USB messages.
"""
c = length - len(msg)
return msg + byte*c | 69bdca39ea49c19a5f9ea192f4a3bc4a93de1c47 | 38,990 |
import re
def dehtml(html):
"""
convert html to somewhat readable text.
"""
html = re.sub(r"</p>|<br>", "\n", html)
html = re.sub(r"</?\w+[^>]*>", "", html)
html = re.sub(r" ", " ", html)
html = re.sub(r">", ">", html)
html = re.sub(r"<", "<", html)
html = re.sub(r"&", "&", html)
return html | 8e4c497dc9af28fbadcf0ac234d839886fd05dc2 | 38,991 |
def _prune_dockerfile(string, comment_char="#"):
"""Remove comments, emptylines, and last layer (serialize to JSON)."""
string = string.strip() # trim white space on both ends.
json_removed = "\n\n".join(string.split("\n\n")[:-1])
json_removed = "".join(json_removed.split())
return "\n".join(
row
for row in json_removed.split("\n")
if not row.startswith(comment_char) and row
) | 1db3646635c79ea06dfa1a133348ebc53fc19df6 | 38,993 |
def _read_words(fname, open_encoding='utf-8'):
"""
read all distinct words
:param fname:
:return: set, {'apple', 'banana', ...}
"""
ret_words = set()
with open(fname, 'r', encoding=open_encoding) as file:
for line in file:
field_list = line.split('\t')
source1, source2 = field_list[0], field_list[1]
for word in source1.split(' ')+source2.split(' '):
if word:
ret_words.add(word)
return ret_words | e26172ef55d967f66d3286d8369012302876a43d | 38,994 |
import importlib
def load_class_by_name(name: str):
"""Given a dotted path, returns the class"""
mod_path, _, cls_name = name.rpartition('.')
mod = importlib.import_module(mod_path)
cls = getattr(mod, cls_name)
return cls | 21fc8acad739990ee27853d2f6473a478928d1d6 | 38,995 |
import hashlib
def _fingerprint(path):
"""Fingerprint a file."""
with open(path) as fil:
return hashlib.md5(fil.read().encode('utf-8')).hexdigest() | 65d200af8f8e2425f44cff5deeb60656ff572eb9 | 38,996 |
def movieTitle_to_id(movies_titles_list,movies):
"""
Convert a list of movie titles to a list of movie ids
"""
movies_list = []
for i in movies_titles_list:
id = list(movies['movieId'][movies['title'] == i])[0]
movies_list.append(id)
return movies_list | 9e594f8bc590b29b9a09b68b197477ca66df918e | 38,997 |
def retornar_caminho_imagem_post(instance, filename):
""" Retorna o caminho das imagens vinculadas ao post """
return f'posts/{instance.id}/imagens/{filename}' | 363fc196c4165c83f59dd6873824eb40ac458e75 | 38,998 |
def validate_submission(y_pred_file):
"""
Validate that y_pred file is a valid prediction file.
Args:
y_pred_file: Predicted values object in sample_submission format
Returns:
bool: Validation succeeded, if false a detailed error message will be
supplied
Raises:
Any exception will be caught and str(exc) presented to the students
"""
if len(y_pred_file.columns) != 2:
raise ValueError(f"Make sure you have only two columns in your dataset with\
names User-ID and ISBN!")
if all(y_pred_file.columns == ['User-ID','ISBN']) != True:
raise ValueError(f"Make sure you have only two columns in your dataset with\
names User-ID and ISBN!")
if y_pred_file.groupby('User-ID').count().ISBN.unique()[0] != 10:
raise ValueError(f"You have to submit 10 (and only 10) books per user!")
if len(y_pred_file.groupby('User-ID').count().ISBN.unique()) != 1:
raise ValueError(f"You have to submit 10 (and only 10) books per user!")
if len(y_pred_file['User-ID'].unique()) != 589:
raise ValueError(f"Make sure you have all test users in your submission!")
return True | b4795ea086ce493f15ca6453dbdefca53d36ad05 | 38,999 |
def getGlobalRadixInfo(n):
"""
For n larger than what can be computed using local memory fft, global transposes
multiple kernel launces is needed. For these sizes, n can be decomposed using
much larger base radices i.e. say n = 262144 = 128 x 64 x 32. Thus three kernel
launches will be needed, first computing 64 x 32, length 128 ffts, second computing
128 x 32 length 64 ffts, and finally a kernel computing 128 x 64 length 32 ffts.
Each of these base radices can futher be divided into factors so that each of these
base ffts can be computed within one kernel launch using in-register ffts and local
memory transposes i.e for the first kernel above which computes 64 x 32 ffts on length
128, 128 can be decomposed into 128 = 16 x 8 i.e. 8 work items can compute 8 length
16 ffts followed by transpose using local memory followed by each of these eight
work items computing 2 length 8 ffts thus computing 16 length 8 ffts in total. This
means only 8 work items are needed for computing one length 128 fft. If we choose
work group size of say 64, we can compute 64/8 = 8 length 128 ffts within one
work group. Since we need to compute 64 x 32 length 128 ffts in first kernel, this
means we need to launch 64 x 32 / 8 = 256 work groups with 64 work items in each
work group where each work group is computing 8 length 128 ffts where each length
128 fft is computed by 8 work items. Same logic can be applied to other two kernels
in this example. Users can play with difference base radices and difference
decompositions of base radices to generates different kernels and see which gives
best performance. Following function is just fixed to use 128 as base radix
"""
base_radix = min(n, 128)
numR = 0
N = n
while N > base_radix:
N //= base_radix
numR += 1
radix = []
for i in range(numR):
radix.append(base_radix)
radix.append(N)
numR += 1
R1 = []
R2 = []
for i in range(numR):
B = radix[i]
if B <= 8:
R1.append(B)
R2.append(1)
else:
r1 = 2
r2 = B // r1
while r2 > r1:
r1 *= 2
r2 = B // r1
R1.append(r1)
R2.append(r2)
return radix, R1, R2 | e986fd784e0b3bd44f76db8247728d3a8aa23e1d | 39,000 |
def play_name(obj):
"""Given a task or block, return the name of its parent play.
This is loosely inspired by ansible.playbook.base.Base.dump_me.
"""
# pylint: disable=protected-access; Ansible gives us no sufficient public
# API to implement this.
if not obj:
return ''
if hasattr(obj, '_play'):
return obj._play.get_name()
return play_name(getattr(obj, '_parent')) | 442abf0c3c6dc55de726b96339b6ff4b275c272b | 39,001 |
def digitSum(number):
"""
The function will get the sum of the number.
"""
sumNum = 0
while number:
sumNum += number % 10 # Get last digit of the number.
number //= 10 # Remove the last digit of the number.
return sumNum | bcc5d0348adbba79f2c161bb148deaba5a81e3e8 | 39,002 |
def data_calculation(sites):
"""
param sites: list of website data.
Returns the total of all site values.
"""
total = 0
for data_item in sites:
total += data_item['value']
return total | 14622b6ddfa9f14a3842219b6be9e1eeec4d534a | 39,003 |
def to_csv(data):
"""Get a list of dictionaries and return as a csv string."""
if not data or not isinstance(data, list):
return ""
# You got a better way to do this section? PR me please.
result = str()
header = list(data[0].keys())
result += '"' + '","'.join(header) + '"\n'
for element in data:
row = str()
for key in header:
value = element[key]
if isinstance(value, int) or isinstance(value, float):
row += '{},'.format(value)
elif isinstance(value, list):
row += '"' + ','.join(value) + '",'
else:
row += '"{}",'.format(value)
result += row[:-1] + '\n'
return result | cc0dbc24a48c411b4dead634643311d6826d413c | 39,004 |
def extract_indices_from_dependencies(dependencies):
""" Extract all tokens from dependencies
Input example:
[[8, 'cop', 7], [8, 'nsubj', 6]]
Output example:
[6, 7, 8]
"""
word_positions = set()
for governor_pos, _, dependent_pos in dependencies:
word_positions.add(governor_pos)
word_positions.add(dependent_pos)
return list(sorted(word_positions)) | ed171902f6d5b9d3f28a56d866ff6a3011f2ec4e | 39,006 |
def _qs_concatenate(*args):
"""Returns a list containing the extension of each list in args and the
appension of each non-list item in args"""
final = [] # k7
for item in args: # 3
# k8
final.extend(item) if isinstance(item, list) else final.append(item)
return final # k9 | ecacac7389c6fd5dfbd7762a57e03eb1b0e4d20e | 39,007 |
def dfdb(B, E):
"""
B is the base
E is the exponent
f = B^E
partial df/dB = E * B**(E-1)
"""
out = E * (B**(E-1))
return out | 89047a198028320ecd2cbfac26064db6af8a784b | 39,008 |
from bs4 import BeautifulSoup
def parse_htmllist(baseurl, extension, content):
# Python 3.3
"""
import htmllistparse
cwd, listing = htmllistparse.parse(content)
"""
soup = BeautifulSoup(content, "html.parser")
return [
baseurl + "/" + node.get("href")
for node in soup.find_all("a")
if node.get("href").endswith(extension)
] | 0e535412eb0f638abf365e8c2dd583002fd0e91e | 39,010 |
def insert_doc(doc, new_items):
"""Insert ``new_items`` into the beginning of the ``doc``
Docstrings in ``new_items`` will be inserted right after the
*Parameters* header but before the existing docs.
Parameters
----------
doc : str
The existing docstring we're inserting docmentation into.
new_items : list
List of strings to be inserted in the ``doc``.
Examples
--------
>>> from nipype.utils.docparse import insert_doc
>>> doc = '''Parameters
... ----------
... outline :
... something about an outline'''
>>> new_items = ['infile : str', ' The name of the input file']
>>> new_items.extend(['outfile : str', ' The name of the output file'])
>>> newdoc = insert_doc(doc, new_items)
>>> print(newdoc)
Parameters
----------
infile : str
The name of the input file
outfile : str
The name of the output file
outline :
something about an outline
"""
# Insert new_items after the Parameters header
doclist = doc.split('\n')
tmpdoc = doclist[:2]
# Add new_items
tmpdoc.extend(new_items)
# Add rest of documents
tmpdoc.extend(doclist[2:])
# Insert newlines
newdoc = []
for line in tmpdoc:
newdoc.append(line)
newdoc.append('\n')
# We add one too many newlines, remove it.
newdoc.pop(-1)
return ''.join(newdoc) | 6b729e9066c2690801d7a749fd366e828bc8cd18 | 39,012 |
def _get_encoder_dimension(encoder):
"""Find dimensionality of encoded vectors.
Args:
encoder: Object implementing the encode() method which takes
a list of strings as input and returns a list of
numpy vectors as output.
Returns:
dimension: Integer size of the encoded vectors.
"""
vector = encoder.encode(['test sentence'])
dimension = vector[0].shape[0]
return dimension | cef789252f4dd9975f1e1bddc5175bbdd49d9220 | 39,013 |
def _capture_permutation_importance(perm_importance_df, n_important_features=None):
"""
"""
top_imp_features_df = (
perm_importance_df[["feature", "weight"]]
.groupby("feature")
.mean()
# .sort_values(by="importance", ascending=False)[:n_important_features]
.sort_values(by="weight", ascending=False)
.reset_index()
)
return top_imp_features_df | c9bd47f9f17541f5ab71222ac77e71a3956f6ae5 | 39,015 |
import json
def manifest():
"""Read a test manifest.json."""
with open("tests/manifest.json") as f:
manifest = json.load(f)
return manifest | dcbfe3ecf1c6779c6224f29a1ffaa6040c5dd075 | 39,017 |
def bounding_box_pt(coords):
"""
Given a set of coordinatesof points, find the bounding box.
Then calculate, in this bounding box, for each cell, the point at the minimum Manhattan distance from the
point. If two or more points are at the same minimum Manhattan distance from the point, then it is set to -1/
Finally, count the number of cells per point. We are only interested in finite areas, so if a point falls on the
boundary of the bounding box, it is infinite and we set its area to -1.
:param coords: the coordinates of the points
:return: the index of the point with the largest finite area
# This shows how to consider the last element, we need to add one to the point count.
# We demonstrate this through the explicit case and then a couple of shuffles.
>>> bounding_box_pt([(1, 1), (6, 1), (3, 8), (4, 3), (5, 5), (9, 8)])
17
>>> bounding_box_pt([(1, 1), (6, 1), (3, 8), (4, 3), (9, 8), (5, 5)])
17
>>> A = [(1, 1), (6, 1), (3, 8), (4, 3), (9, 8), (5, 5)]
>>> from random import shuffle
>>> shuffle(A)
>>> bounding_box_pt(A)
17
>>> shuffle(A)
>>> bounding_box_pt(A)
17
"""
# The number of points, and split the coordinates into x-values and y-values.
# Find the largest delta in x coordinates in x-values and y-values, which we use as the dimensions of the
# bounding box.
numps = len(coords)
xs, ys = list(zip(*coords))
xmin, xmax = min(xs), max(xs)
ymin, ymax = min(ys), max(ys)
# Adjust the coordinates so that they fall within the bounding box.
coords_adj = [(x - xmin, y - ymin) for (x, y) in coords]
# Set up an array (x, y) for all points in the bounding box, holding a pair (p, d) such that:
# - p indicates the index of the point closest to adjusted position (x, y); and
# - d indicates the distance via the Manhattan metric from point p to adjusted position (x, y).
# If two points are minimally equidistant to (x, y), then we set its p to -1, indicating that it is
# out of play.
# minpts[x][y] holds pair (p, d) representing the minimum distance seen so far, as point p having distance d.
minpts = [[(0, xmax + ymax + 2)] * (ymax - ymin + 1) for _ in range(xmax - xmin + 1)]
# Iterate over all the adjusted coordinates, find their distances to the (x, y) positions, and determine if
# this is better, or minimally equidistant, in which case, we set the point to -1.
for (p, c) in zip(range(len(coords_adj)+1), coords_adj):
# p is the point index, and c are the adjusted coordinates.
cx, cy = c
# Iterate over all the points in the bounding box and check to see if we should modify minpts by the above
# stated rules.
for (x,y) in [(x, y) for x in range(xmax - xmin + 1) for y in range(ymax - ymin + 1)]:
# The Manhattan distance from p to (x, y).
dist = abs(cx - x) + abs(cy - y)
# Minimally equidistant: set the point idx to -1.
if dist == minpts[x][y][1]:
minpts[x][y] = (-1, dist)
elif dist < minpts[x][y][1]:
minpts[x][y] = (p, dist)
# We are no longer interested in distances, so drop the distance parameter and keep only the point.
pts = [[e[0] for e in L1] for L1 in minpts]
# Count the number of cells "won" by each point.
# If a point lands on the outer border, it falls in an infinite area and is not a viable candidate.
# It seems like we need the + 1 here to consider the last point for its area.
# See the example above in bounding_box_pt doctests.
pcounts = [0] * (numps + 1)
for (x, y) in [(x, y) for x in range(xmax - xmin + 1) for y in range(ymax - ymin + 1)]:
p = pts[x][y]
if x == 0 or y == 0 or x == xmax - xmin or y == ymax - ymin:
pcounts[p] = -1
elif pcounts[p] >= 0:
pcounts[p] += 1
# Now find the index of the point occurring in the maximum finite area.
maxdist = max(pcounts)
# If the max dist is -1, then no point wins. All are infinitely large.
if maxdist == -1:
return None
else:
return maxdist | 833793457f449d8b466a0eaad2dfe13c1418f17f | 39,019 |
def selectionSort(values: list) -> list:
"""
Ordenação por seleção, com busca pelo menor.
"""
for i in range(0, len(values)):
k = i
for j in range(i+1, len(values)):
if values[j] < values[k]:
k = j
values[i], values[k] = values[k], values[i]
return values | 0828fc74d2a501bca2a4a770a18db48a494f336d | 39,020 |
def max_nonadjacent_sum(values):
"""Finds the maximum sum of nonadjacent values."""
taken = skipped = 0
for val in values:
taken, skipped = skipped + val, max(taken, skipped)
return max(taken, skipped) | 196c1941d179f6c373635decc654306056698b50 | 39,021 |
import struct
def floatBitsToInt(b):
"""
Type-Pun a float into an integer
"""
s = struct.pack('>f', b)
return struct.unpack('>L', s)[0] | 1c2abfd2707a3c25f6bd231931abc7b4d7a8df12 | 39,023 |
import secrets
def _server_cookie_secret() -> str:
"""Symmetric key used to produce signed cookies. If deploying on multiple replicas, this should
be set to the same value across all replicas to ensure they all share the same secret.
Default: randomly generated secret key.
"""
return secrets.token_hex() | 18bb523a62f26c2a4620c280390b4e99e1743bf6 | 39,024 |
def lookup(obj):
"""" A function that returns the list of
available method and attribute a given
object have """
return dir(obj) | c19a9a08c130a27da5f28d165dc775f044de4b4c | 39,027 |
import os
def get_existing_labels(api_instance, key):
"""
This service is designed to maintain its' keyed namesapce; so any
keys that are currently set that match that are no longer honored
need to be cleaned from the node. This function queries the
<api_instance> for existing labels that have been set.
Returns:
- a set object which represents the label keys that match the
KEY for this service.
"""
resp = api_instance.read_node(os.environ['NODE_NAME']).to_dict()
all_labels = list(resp['metadata']['labels'].keys())
matched_labels = [label for label in all_labels if label.startswith(key)]
return set(matched_labels) | 536674409920b34329a735a92cec08471a800a88 | 39,028 |
import re
def json_lookup(k, path):
""" Return the value at key k in file path.
:param str k: json key
:param str path: filepath to json file
"""
# Because we know we will be looking up values in non-conforming json files, we don't use json.
# We just parse the text file.
with open(path, 'r') as f:
for line in f:
match = re.compile("^\\s*\"" + k + "\": \"(?P<v>.*)\".*$").match(line)
if match:
return match.group('v')
return None | 516a7c8b6a023748694738f5e2a098a31417208d | 39,031 |
import logging
def get_record(params, record_uid):
"""Return the referenced record cache"""
record_uid = record_uid.strip()
if not record_uid:
logging.warning('No record UID provided')
return
if not params.record_cache:
logging.warning('No record cache. Sync down first.')
return
if not record_uid in params.record_cache:
logging.warning('Record UID %s not found in cache.' % record_uid)
return
cached_rec = params.record_cache[record_uid]
return cached_rec | d9bdec34fe624f58cd52187cc782104d2a924074 | 39,032 |
def change_current_lang_and_return(lang):
"""Change current language of text/speech recognition and return it"""
global CURRENT_LANG
CURRENT_LANG = lang
return CURRENT_LANG | 3f4b377ad9eab98622890cff5296436e08926f38 | 39,033 |
def generate_extension_to_string_mapping(extensions):
"""Returns mapping function from extensions to corresponding strings."""
function = 'const char* ExtensionToString(Extension extension) {\n'
function += ' switch (extension) {\n'
template = ' case Extension::k{extension}:\n' \
' return "{extension}";\n'
function += ''.join([template.format(extension=extension)
for extension in extensions])
function += ' };\n\n return "";\n}'
return function | ff133d9f9e693e5f6b9b4ba97b1cf760da5986e6 | 39,034 |
def long_of_the_tropical_mean_sun(x, val):
"""
Longitude of the tropical mean sun
:param x: value in day
:param val: value for 1 day
:return:
"""
return val * x | 3c9b85cfb7f1cf6b4f0bb6726ee5b5b8f3a73dd8 | 39,036 |
def make_report(df_station_geo, df_hourly, df_station_ranking):
"""
Generate a JSON file with the report
"""
# Keys of each dict should be station_id
report = dict(
{
"geo": df_station_geo,
"hourly_breakdown": df_hourly,
"ranking": df_station_ranking,
}
)
return report | 8e1ff2ac89998dd8d35c7220e1bd0ccf93386048 | 39,037 |
def is_pixel_equal(img1, img2, x, y):
"""
判断两个像素是否相同
:param image1: 图片1
:param image2: 图片2
:param x: 位置x
:param y: 位置y
:return: 像素是否相同
"""
# 取两个图片的像素点
pix1 = img1.load()[x, y]
pix2 = img2.load()[x, y]
threshold = 60
if (abs(pix1[0] - pix2[0] < threshold) and abs(pix1[1] - pix2[1] < threshold) and abs(
pix1[2] - pix2[2] < threshold)):
return True
else:
return False | 673b5e8cad5d863b30f1c22685914db1f17b1e0b | 39,039 |
def create_ngrams(kw_iterable, max_n=False):
"""takes a list of keywords and computes all possible ngrams e.g.
in> ['nice', 'red', 'wine']
out> [
('nice',),
('red',),
('wine',),
('nice', 'red'),
('red', 'wine'),
('nice', 'red', 'wine')
]
"""
kwCount = len(kw_iterable)
output = []
for n in reversed(range(kwCount+1)[1:]):
if n <= max_n:
for tokenIndex in range(kwCount-n+1):
output.append(tuple(kw_iterable[tokenIndex:tokenIndex+n]))
return output | 16a2dab3240214162e1b386e30e912a38e564458 | 39,041 |
def arr_to_json_fixturen(array):
"""Convert given iterable to dict for json transformation.
Desired Format for djangod
{
"id" : None,
"model" : "ocdb.Sector",
"fields" : {
"name" : <mandatory>
"fk_sector" : <optional> *
}
}
*e.g. not for elements with no more parents like continents
"""
data = {}
data["id"] = None
data["model"] = "ocdb.Sector"
data["fields"] = {
"name": array[0],
}
parent = array[1:]
if parent != (None, None):
data["fields"]["fk_sector"] = parent
return data | 45956ae37638342c3929adda3952f16d87f02025 | 39,042 |
def get_proposal_terms(search, tokens):
"""
Used to iterate through the *search* dictionary definition representing
tokens for completion. Terms within this dictionary have a hierarchy to
their definition in which keys are always terms represented as strings and
values are either sub-dictionaries following the same pattern or None in the
case that the term is a leaf node.
:param dict search: The dictionary to iterate through looking for proposals.
:param tokens: List of tokens split on the hierarchy delimiter.
:type tokens: list, str
:return: A list of strings to be used for completion proposals.
:rtype: list
"""
if isinstance(tokens, str):
tokens = [tokens]
found = search.get(tokens[0], {})
if found:
if tokens[1:]:
found = get_proposal_terms(found, tokens[1:])
else:
found = []
else:
token_0 = tokens[0]
found = [term for term in search.keys() if term.startswith(token_0) and term != token_0]
return found | 6c35fc72842e9e76154afaf09dfe4144d8f1fdd8 | 39,043 |
import aiohttp
async def download_file(url) -> bytes:
"""
Асинхронный выкачиватиль файла по ссылке
"""
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
return await response.read() | af3bb99ca4e0ac8440411e65e596e03c672ee213 | 39,044 |
import os
def system(cmd):
"""As os.system(cmd) but writes cmd."""
print(cmd)
return os.system(cmd) | 41d47560dbb423f00e3caca79d26c0ee45c9819f | 39,045 |
def soften(lbc):
"""
Nonlinear softening function.
"""
if lbc < 1E2:
return (lbc/1E2) # Maxes out at 1
elif lbc < 1E3:
return (lbc/1E2)**0.5 # Maxes out at 10^(1/2)
elif lbc < 1E4:
return 3.1622776601683795*(lbc/1E3)**0.33333333 # Maxes out at 10^(5/6)
elif lbc < 1E5:
return 6.812920690579613*(lbc/1E4)**0.25 # Maxes out at 10^(13/12)
elif lbc < 1E6:
return 12.115276586285882*(lbc/1E5)**0.2 # Maxes out at 10^(77/60)
else:
return 19.20141938638802*(lbc/1E6)**0.16666667 | 6d66de43fde877eab3db035f7e6610436a0901f2 | 39,046 |
import os
def file_size(fname: str) -> float:
"""
:param fname:
:return:
"""
statinfo = os.stat(fname)
return statinfo.st_size | 10492f5705c8fe2b78c7cc561105788983977a8e | 39,049 |
def get_station_daily_path(station_id):
"""
Get path to a station daily file.
:param station_id:
:return:
"""
return "/pub/data/ghcn/daily/all/{0}.dly".format(station_id) | 24060b3e6393318b6d7ad8a7dfe2d2d32d934079 | 39,050 |
def DetectSite(source_area):
"""Identify the source website of the file."""
if (source_area.find('{{from vimeo') != -1):
return "Vimeo"
elif (source_area.find('{{from youtube') != -1):
return "YouTube"
elif (source_area.find('videowiki.wmflabs.org') != -1):
return "VideoWiki"
elif (source_area.find('flickr.com/photos') != -1):
return "Flickr"
elif (source_area.find('vimeo.com') != -1):
return "Vimeo"
elif (source_area.find('youtube.com') != -1):
return "YouTube" | fb629a96e93d17a51c15b34a33be19eb59760c21 | 39,051 |
import re
def load_utterInfo(inputFile):
"""
Load utterInfo from original IEMOCAP database
"""
# this regx allow to create a list with:
# [START_TIME - END_TIME] TURN_NAME EMOTION [V, A, D]
# [V, A, D] means [Valence, Arousal, Dominance]
pattern = re.compile(
"[\[]*[0-9]*[.][0-9]*[ -]*[0-9]*[.][0-9]*[\]][\t][a-z0-9_]*[\t][a-z]{3}[\t][\[][0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[, ]+[0-9]*[.][0-9]*[\]]",
re.IGNORECASE,
) # noqa
with open(inputFile, "r") as myfile:
data = myfile.read().replace("\n", " ")
result = pattern.findall(data)
out = []
for i in result:
a = i.replace("[", "")
b = a.replace(" - ", "\t")
c = b.replace("]", "")
x = c.replace(", ", "\t")
out.append(x.split("\t"))
return out | 00727360f5809943c0843dfbaf98cd15a4f290d7 | 39,053 |
def map_stype(mode: str):
"""Map the scattering type in PDFConfig to the stype in the meta in the parser."""
if mode in ('xray', 'sas'):
stype = 'X'
elif mode == 'neutron':
stype = 'N'
else:
raise ValueError(
"Unknown: scattering type: {}. Allowed values: 'xray', 'neutron', 'sas'.".format(mode)
)
return stype | 9855211df369c36329941a3b892898957afc720c | 39,054 |
def sites_difference(site1, site2):
"""Return minimal sequence of nucleotides that should be added at the end
of site1 to make site2 appear."""
for i in range(len(site2), -1, -1):
if site2[:i] == site1[-i:]:
return site2[i:]
return site2 | 3bcd7d4eda4fd23d253b3a5f77255663c13c2ab1 | 39,055 |
def _deserialize_obj(obj):
"""
Given a DeserializationObject, deserialise any tag fields
"""
# Convert any tag fields from m2m to string assignments
Model = obj.object.__class__
if hasattr(Model, '_retag_to_original'):
obj.object = obj.object._retag_to_original()
return obj | 8788b9b90e71459adac5d13f62f88be327957364 | 39,058 |
def to_alternating_case(string: str) -> str:
"""
each lowercase letter becomes uppercase and
each uppercase letter becomes lowercase
:param string:
:return:
"""
return ''.join((char.upper() if char.islower() else char.lower()) for char in string) | 54848601a15b2b70e119da75c364010c3df7cc1f | 39,059 |
from calendar import month_abbr
def b_main(m):
"""
:param m: 经过datepat 匹配到的模式串
:return: 将匹配串进行内部处理 并返回
"""
mon_name = month_abbr[int(m.group(1))]
return '{} {} {}'.format(m.group(3), mon_name, m.group(2)) | ebf663d9ab37c4f4ea4dbb345ee600df3fa3bba6 | 39,060 |
def _rexpr(expr, lev=0):
"""Convert rhythmic expression into prefix notation for evaluation."""
ops = ['+', '-', '*']
if lev < len(ops):
expr = expr.split(ops[lev])
for i, e in enumerate(expr):
expr[i] = _rexpr(e, lev+1)
if len(expr)>1:
# convert to prefix notation: 'q+q+q' => ['+', 'q', 'q', 'q']
expr = [ops[lev]] + expr
else:
expr = expr[0]
return expr | 07e50e43f7513f251c73a0a85e5ffd28993ebe26 | 39,061 |
import os
import io
def readfile(*parts):
"""Return contents of file with path relative to script directory"""
herepath = os.path.abspath(os.path.dirname(__file__))
fullpath = os.path.join(herepath, *parts)
with io.open(fullpath, 'r') as f:
return f.read() | adcd365ff579b9c7c29223600ada5deba4923ffb | 39,063 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.