content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def daylight_saving(m, wd, d):
"""
Assuming that DST start at last Sunday of March
DST end at last Sunday of October
Input:
m: month, wd: week day, d: day of month are int() type
month, week day and day of month count start from 1
Output:
1 if clock need to shift 1 hour forward
-1 if clock need to shift 1 hour backward
0 does not need to adjust clock
"""
if m == 3:
if wd == 7:
if 31 >= d > 24:
return 1
elif m == 10:
if wd == 7:
if 31 >= d > 24:
return -1
else:
return 0 | e40f0c2539a3c06279a9ea200f0faed11c5ea50d | 45,313 |
import importlib
def dynamic_load(module_or_member):
"""
Dynamically loads a class or member of a class.
If ``module_or_member`` is something like ``"a.b.c"``, will perform ``from a.b import c``.
If ``module_or_member`` is something like ``"a"`` will perform ``import a``
:param module_or_member: the name of a module or member of a module to import.
:return: the returned entity, be it a module or member of a module.
"""
parts = module_or_member.split(".")
if len(parts) > 1:
name_to_import = parts[-1]
module_to_import = ".".join(parts[:-1])
else:
name_to_import = None
module_to_import = module_or_member
module = importlib.import_module(module_to_import)
if name_to_import:
to_return = getattr(module, name_to_import)
if not to_return:
raise AttributeError("{} has no attribute {}".format(module, name_to_import))
return to_return
else:
return module | 882a9b4c68cd7d34a6ff5d914b7b819ad62156ae | 45,315 |
def get_site_id(file_name):
"""
Parameters
----------
file_name : file name string
Returns
-------
site id string for file
"""
# Vid-000351067-00-06-2014-10-29-13-20.jpg
return file_name[4:19] | 1cd054ae98354e3db465c758855787f4e6330b22 | 45,316 |
def validate_chunk_width(chunk_width):
"""Validate a chunk-width string , returns boolean.
Expected to be a string representing either an integer, like '20',
or a comma-separated list of integers like '20,30,16'"""
if not isinstance(chunk_width, str):
return False
a = chunk_width.split(",")
assert len(a) != 0 # would be code error
for elem in a:
try:
i = int(elem)
if i < 1:
return False
except:
return False
return True | 1c0140d992fca90c92bf66f3b81cb65a1b48e839 | 45,317 |
def chop_layer(pretrained_dict, layer_name="logits"): #https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2
"""
Removes keys from a layer in state dictionary of model architecture.
:param model: (nn.Module) model with original architecture
:param layer_name: name of layer to be chopped. Must be the terminal layer (not hidden layer)
:return: (nn.Module) model
"""
# filter out weights from undesired keys. ex.: size mismatch.
chopped_dict = {k: v for k, v in pretrained_dict.items() if k.find(layer_name) == -1}
return chopped_dict | 7c977565760273a5aba3db016086893a9bdff6e0 | 45,318 |
def update_odds_json_event(json, event):
"""Function responsible for updating the Odds in the ```Event```
object and returning this modified event"""
for selection in json['markets'][0]['selections']:
for event_selection in event['markets'][0]['selections']:
if event_selection['id'] == selection['id']:
event_selection['odds'] = selection['odds']
return event | b1fb3c475cef19144b566a942a3da7088b2b75e8 | 45,319 |
def GetId(datas: bytes, length: int):
"""
从数据包内获取子站地址——ID
:param datas: 数据包
:param length: 地址长度
:return:
"""
# int length = System.Convert.ToInt32(SqliteProvider.ReadPara(SqliteProvider.icepara, "LinkAddrLength"))
if datas[0] == 0x10: # 固定帧长报文
return int.from_bytes(datas[2:2 + length], byteorder='little') # little big
elif datas[0] == 0x68: # 可变帧长报文
return int.from_bytes(datas[5:5 + length], byteorder='little')
else:
return 0 | e660a516a9418c0755467e7889d3d53dd60a5a5b | 45,322 |
def process_file_list(file_list, verbose=False, **kwargs):
"""
Function that takes a list containing the paths of the mesa models to process, and potentially some relevant
parameters necessary for the processing. Check if the setup parameters provided in the kwargs are included in
the list, and if not, add them.
if ce_parameters is included in the file_list, it will convert the provided ce_parameters to dictionary.
:param file_list: Pandas DataFrame containing at least 1 column with the path of the MESA models to process
:param verbose: If True, print which parameters were added
:param kwargs: all parameters that are required for processing the MESA models and their default values.
:return: Pandas DataFrame containing the path to all models and the necessary parameters.
"""
ce_parameters = kwargs.pop('ce_parameters', None)
# add extraction parameters to the file_list
for setup_par in kwargs.keys():
if setup_par not in file_list.columns:
file_list[setup_par] = kwargs[setup_par]
if verbose:
print('Set default parameter: {} = {} to all models'.format(setup_par, kwargs[setup_par]))
# ce_parameters is treated separately because it should be converted from string to dictionary if already included
if ce_parameters is not None and 'ce_parameters' not in file_list.columns:
file_list['ce_parameters'] = [ce_parameters for i in file_list['path']]
if verbose:
print('Set default parameter: ce_parameters = {} to all models'.format(ce_parameters))
else:
file_list['ce_parameters'] = [eval(p) for p in file_list['ce_parameters']]
return file_list | cf353860aadedaf231a72064eb949bf42af3c16a | 45,323 |
def is_filename_char(x):
"""Return True if x is an acceptable filename character."""
if x.isalnum():
return True
if x in ['-', '_']:
return True
return False | bdf9ec319e16e11db8af3dcdabc1c16dbc8c317d | 45,324 |
def _infer_coreml_output_shape(tf_shape):
"""Infer CoreML output shape from TensorFlow shape.
"""
shape = []
if len(tf_shape) == 1:
shape = [tf_shape[0], 1, 1]
elif len(tf_shape) == 2:
if tf_shape[0] == 1:
# (B,C)
shape = [tf_shape[1]]
else:
shape = None
elif len(tf_shape) == 3:
# since output shape is not required by CoreML and rank-3 tensor in TF is ambiguous, we do not assign a shape
shape = None
elif len(tf_shape) == 4:
assert tf_shape[0] == 1, "Output 4D tensor's first dimension (Batch) " + \
"must be 1."
shape = [tf_shape[3], tf_shape[1], tf_shape[2]] #(C,H,W)
elif len(tf_shape) == 0: # scalar
shape = [1]
else:
raise ValueError('Unrecognized TensorFlow output shape ' + str(tf_shape))
return shape | 67bd6e366c10d772b027326c77fc9907d6048240 | 45,326 |
def carousel(app):
"""Fixture to get the carousel widget of the test app."""
return app.carousel | 8229756e385e285d3dcc6a122d6e73a5b263fc70 | 45,327 |
def mergeOverlap(ra, rb):
"""
merge the overlaped two anchors,ra=[chr,left_start,left_end,right_start,right_end]
"""
nr = [
ra[0],
min([ra[1], rb[1]]),
max([ra[2], rb[2]]),
ra[3],
min([ra[4], rb[4]]),
max([ra[5], rb[5]])
]
if nr[2] <= nr[1] or nr[5] <= nr[4]:
return None
return nr | 84729afa3ec16c20779498c16f68eed60eeece6c | 45,328 |
import os
def config():
"""
Configurate download urls and hash values
Parameters
----------
Returns
----------
dataset_path:
path where datasets are to be downloaded
filenames:
file names to be downloaded
urls:
download urls
hashes:
MD5 hashes of files to be downloaded
"""
dataset_path = os.path.join(os.path.expanduser('~'), '.yolk/datasets/coco')
base_url = 'http://bit.ly/'
postfix_urls = [
'yolk_coco_test2017_zip',
'yolk_coco_train2017_zip',
'yolk_coco_val2017_zip',
'yolk_coco_annotation2017_zip'
]
urls = [base_url + x for x in postfix_urls]
image_filenames = [
'test2017.zip',
'train2017.zip',
'val2017.zip'
]
annotation_filenames = [
'annotations2017.zip'
]
filenames = image_filenames + annotation_filenames
return dataset_path, filenames, urls | 166098059f664b4ada3fc441ce7c670133f54de7 | 45,329 |
from typing import Callable
def create_handler_decorator(func: Callable, add_handler: Callable, name: str):
""" Utility method to create the on_* decorators for each type of event
"""
assert func is None, (
f"{name} must be called before being used as a decorator. "
"Add parenthesis: {name}()"
)
def decorator(func):
add_handler(func)
return func
return decorator | aa0a5be0f84da4d7bf4ebfaf690226862b9d6473 | 45,330 |
def filter_df_by_cluster(df, clusters, number):
"""
Select only the members of a defined cluster.
:param df: pandas dataframe with samples/subjects as index and features as columns.
:param dict clusters: clusters dictionary from get_dendrogram function if div_clusters option was True.
:param int number: cluster number (key).
:return: Pandas dataframe with all the features (columns) and samples/subjects belonging to the defined cluster (index).
"""
return df[df.index.isin(clusters[number])] | 3100b9ca59c24c102d6445babc0a247254cdd5a9 | 45,331 |
def get_default_max_worker_restart(config):
"""gets the default value of --max-worker-restart option if it is not provided.
Use a reasonable default to avoid workers from restarting endlessly due to crashing collections (#226).
"""
result = config.option.maxworkerrestart
if result is not None:
result = int(result)
elif config.option.numprocesses:
# if --max-worker-restart was not provided, use a reasonable default (#226)
result = config.option.numprocesses * 4
return result | 6c12c1b30abc1712a368b550d268c2b5d7638031 | 45,333 |
def add_subtext(subtext, soup):
"""
Adds subtext to the existing subtext variable, used for scraping pages
2 and 3 of hacker news.
"""
subtext += soup.select(".subtext")
return subtext | d1ae890baab1302470b8eb42e522a2da45ae67a1 | 45,335 |
def set_bit(val, bitNo, bit):
""" given a value, which bit in the value to set, and the actual bit
(0 or 1) to set, return the new value with the proper bit flipped """
mask = 1 << bitNo
val &= ~mask
if bit:
val |= mask
return val | 1730c0c6aaaf7cf105e6dc0a890107b9ff651e39 | 45,336 |
import itertools
def get_strategy_map(strategy_list, teams):
"""
Creates a strategy map, with all the possible strategy profiles on the game.
:return: A map with all the possible strategy profiles according the players and strategies available.
"""
strategy_maps = []
strategy_profiles = list(itertools.product(strategy_list, repeat=teams))
for profile in strategy_profiles:
strategy_map = {'name': '',
'map': {}}
# To keep the order preferred by Gambit
for index, strategy in enumerate(reversed(list(profile))):
strategy_name = strategy.name
strategy_map['name'] += strategy_name + "_"
strategy_map['map'][index] = strategy
strategy_map['name'] = strategy_map['name'][:-1]
strategy_maps.append(strategy_map)
return strategy_maps | a4178f2b6768116f4f0fd85b9c76db3e368689d5 | 45,337 |
import numpy
def erosion(state, n):
"""Erosion function for species *n*
"""
phi = state.problem_data['phi']
g = state.problem_data['grav']
dry_tol = state.problem_data['dry_tolerance']
mannings_n = state.problem_data['mannings_n']
s = state.problem_data['s'][n]
d = state.problem_data['d'][n]
theta_c = state.problem_data['theta_c'][n]
u = numpy.where(state.q[0, :] > dry_tol,
state.q[1, :] / state.q[0, :],
numpy.zeros(state.q.shape[1]))
u_star = state.q[0, :] * numpy.sqrt(g * state.q[0, :]) * numpy.abs(mannings_n**2 * u**2 / state.q[0, :]**(4.0 / 3.0))
theta = u_star**2 / (s * g * d)
return numpy.where(theta >= theta_c,
phi * (theta - theta_c) * u / (state.q[0, :] * d**(0.2)),
0.0) | ea873653d80503b4ed04ce4ae3c0e89256d48d97 | 45,338 |
from io import StringIO
def get_q2_comment_lines(md_file_loc):
"""Returns a list of line numbers in the file that start with "#q2:".
These lines should be skipped when parsing the file outside of Q2 (i.e.
in pandas). I guess we could also ostensibly use these lines' types here
eventually, but for now we just skip them.
Notes:
-The line numbers are 0-indexed (so they can easily be thrown in to
pandas.read_csv() as the skiprows parameter)
-This doesn't check check the first line of the file (assumed to be the
header)
-This stops checking lines once it gets to the first non-header line
that doesn't start with "#q2:". Currently, "#q2:types" is the only Q2
"comment directive" available, but ostensibly this could detect future
Q2 comment directives.
-This checks if md_file_loc is of type StringIO. If so, this will
handle it properly (iterating over it directly); otherwise, this
assumes that md_file_loc is an actual filename, and this will open
it using open().
(I realize that ideally this wouldn't have to do any type checking,
but it's either this or do a bunch of weird refactoring to get my test
code working.)
"""
def iterate_over_file_obj_lines(file_obj):
q2_lines = []
line_num = 0
for line in file_obj:
# Don't check for a #q2: comment on the first line of the file,
# since the first line (should) define the file header.
if line_num > 0:
if line.startswith("#q2:"):
q2_lines.append(line_num)
else:
# We assume that all #q2: lines will occur at the start of
# the file. Once we've reached a line that doesn't start
# with "#q2:", we stop checking.
break
line_num += 1
return q2_lines
if type(md_file_loc) == StringIO:
q2_lines = iterate_over_file_obj_lines(md_file_loc)
# HACK: Allow us to read through this StringIO again --
# https://stackoverflow.com/a/27261215/10730311
# Note that we're only ever bothering with StringIOs here during test
# code, so this weirdness should be ignored during normal operation of
# Qurro.
md_file_loc.seek(0)
return q2_lines
else:
with open(md_file_loc, "r") as md_file_obj:
return iterate_over_file_obj_lines(md_file_obj) | a9f5b62ba5ce1de57214f587528f8f8819827f70 | 45,339 |
import string
import random
def generate():
"""
Generates the key.
"""
uni = string.ascii_letters + string.digits + string.punctuation
key = ''.join([random.SystemRandom().choice(uni) for i in range(random.randint(45, 50))])
return key | 124a9e35fd65f1fc4e25126ca94c87bd72b2d9f4 | 45,340 |
def list_found_duplicates(in_list):
"""
Check list for duplicate entries. Return True if duplicates found,
and False if not duplicates found.
>>> in_list = ["hallo", "hello"]
>>> list_found_duplicates(in_list)
False
>>> in_list = ["hallo", "hello", "hollo", "hello"]
>>> list_found_duplicates(in_list)
True
"""
if len(set(in_list)) == len(in_list):
return False
else:
return True | bcdfa07c7a4931baa3522487b015e4ed703ebe0e | 45,341 |
def kappa(A: float, B: float, C: float):
"""
Calculate Ray's asymmetry parameter for a given set of A, B, and C rotational constants.
This parameter determines how asymmetric a molecule is by setting a range between two limits: the prolate (+1)
and the oblate (-1) limits.
Parameters
----------
A, B, C: float
Rotational constant in MHz for each respective axis
Returns
-------
kappa: float
Ray's asymmetry parameter
"""
return (2 * B - A - C) / (A - C) | f2628858582645a43ffbe706d1f838196c6d4f20 | 45,342 |
def mark_coverage(percentage):
"""Return a mark from A to F based on the passed tests percentage.
:param percentage: Percentage of passed unit tests.
:type percentage: float
:return: Mark from A to F.
:rtype: str
"""
mark_table = {
"A": (90, 101),
"B": (80, 90),
"C": (70, 80),
"D": (60, 70),
"F": (0, 59),
}
for mark, mark_range in mark_table.items():
if int(percentage) in range(*mark_range):
return mark | 2adae1e628219d073bb8cfaeb707fea0a1d0882d | 45,343 |
def remove_fx_variables(cube):
"""
Remove fx variables present as cell measures or ancillary variables in
the cube containing the data.
Parameters
----------
cube: iris.cube.Cube
Iris cube with data and cell measures or ancillary variables.
Returns
-------
iris.cube.Cube
Cube without cell measures or ancillary variables.
"""
if cube.cell_measures():
for measure in cube.cell_measures():
cube.remove_cell_measure(measure.standard_name)
if cube.ancillary_variables():
for variable in cube.ancillary_variables():
cube.remove_ancillary_variable(variable.standard_name)
return cube | 09427f418ac25e25e7fa658f17b2e5c346c9b7eb | 45,344 |
import requests
from bs4 import BeautifulSoup
def parsing_beautifulsoup(url):
"""
뷰티풀 수프로 파싱하는 함수
:param url: paring할 URL. 여기선 YES24 Link
:return: BeautifulSoup soup Object
"""
data = requests.get(url)
html = data.text
return BeautifulSoup(html, 'html.parser') | bdcfa0091313151a1155bc59d30e088d33820691 | 45,346 |
import math
def _get_dct_norm_factor(n, inorm, dct_type=2):
"""Normalization factors for DCT/DST I-IV.
Parameters
----------
n : int
Data size.
inorm : {'none', 'sqrt', 'full'}
When `inorm` is 'none', the scaling factor is 1.0 (unnormalized). When
`inorm` is 1, scaling by ``1/sqrt(d)`` as needed for an orthogonal
transform is used. When `inorm` is 2, normalization by ``1/d`` is
applied. The value of ``d`` depends on both `n` and the `dct_type`.
dct_type : {1, 2, 3, 4}
Which type of DCT or DST is being normalized?.
Returns
-------
fct : float
The normalization factor.
"""
if inorm == 'none':
return 1
delta = -1 if dct_type == 1 else 0
d = 2 * (n + delta)
if inorm == 'full':
fct = 1 / d
elif inorm == 'sqrt':
fct = 1 / math.sqrt(d)
else:
raise ValueError('expected inorm = "none", "sqrt" or "full"')
return fct | b6f8e07b6d708f78d616f4a357e94ec3249676b4 | 45,347 |
import argparse
def getCommandArgs():
"""This funciton processes the sys.argv array to get command line arguments.
"""
parser = argparse.ArgumentParser(description='Estimate the number of occurences of a given sequence in a fasta file.')
parser.add_argument('-f','--fasta', metavar='fasta', dest='fasta', type=str, help='Input fasta file', required=True)
parser.add_argument('-s','--sequence', metavar='fileOfSequences', dest='seqs', type=str, help='File with sequences (2 cols, name and sequence)', required=True)
parser.add_argument('-o','--output', metavar='outFile', dest='out', type=str, help='Output file', default="")
parser.add_argument('-b', '--make-bed', metavar="bedFile", dest='bed', type=str, default="", help="Output a bed file with locations of sequence.")
parser.add_argument('-n', '--no-complement', dest="nocomp", action="store_true")
args = parser.parse_args()
return (args) | 3d4c9a2dfca118b23f348150b3709bc1ce6f74a0 | 45,349 |
from typing import Type
def class_to_header_name(type_: Type) -> str:
"""
Take a type and infer its header name.
>>> from kiss_headers.builder import ContentType, XContentTypeOptions, BasicAuthorization
>>> class_to_header_name(ContentType)
'Content-Type'
>>> class_to_header_name(XContentTypeOptions)
'X-Content-Type-Options'
>>> class_to_header_name(BasicAuthorization)
'Authorization'
"""
if hasattr(type_, "__override__") and type_.__override__ is not None:
return type_.__override__
class_raw_name: str = str(type_).split("'")[-2].split(".")[-1]
if class_raw_name.endswith("_"):
class_raw_name = class_raw_name[:-1]
if class_raw_name.startswith("_"):
class_raw_name = class_raw_name[1:]
header_name: str = str()
for letter in class_raw_name:
if letter.isupper() and header_name != "":
header_name += "-" + letter
continue
header_name += letter
return header_name | 29ee3f8cfb542aba9fe2dd5ce31748c7faebb33b | 45,350 |
def resolve_time(delta: int, sep: str = "") -> str:
"""
Converts an int to its human-friendly representation
:param delta: time in seconds
:param sep: string separator
:return: string
"""
if type(delta) is not int:
delta = int(delta)
years, days, hours, minutes = 0, 0, 0, 0
# Calculate best representations of the number
while True:
if delta >= 60 * 60 * 24 * 365: # 1 Year
years += 1
delta -= 31556926
if delta >= 60 * 60 * 24: # 1 Day
days += 1
delta -= 86400
elif delta >= 60 * 60: # 1 hour
hours += 1
delta -= 3600
elif delta >= 60: # 1 minute
minutes += 1
delta -= 60
else:
break
# Form calculations into a string
fields = []
if years:
fields.append(f"{years}y")
if days:
fields.append(f"{days}d")
if hours:
fields.append(f"{hours}h")
if minutes:
fields.append(f"{minutes}m")
fields.append(f"{delta}s")
# If tm is less than a minute, do not add "and".
return sep.join(fields) | 3e623ab912e3add5f1f9a16019692e255ef2b53e | 45,351 |
import numpy
def _weighted_sum_rows(x_matrix, scaling_vector):
"""Return sum of rows in x_matrix, each row scaled by scalar in scaling_vector."""
return numpy.sum(x_matrix * scaling_vector[:, numpy.newaxis], axis=0) | 95533474833de454824facf14ed910b9261ae0b2 | 45,352 |
import uuid
def is_string_uuid(val):
"""
Checks if the given string is a valid UUID
:param val: str
:return: boolean
"""
if val and isinstance(val, str):
try:
uuid.UUID(val)
return True
except ValueError:
pass
return False | 573192cdb1b3a5001309b72bdf49e524984ccbe0 | 45,353 |
import typing
import subprocess
import os
def recover_process(pid: int, match_args: str) -> typing.Optional[str]:
"""Detect/kill a dangling process from an earlier devluster."""
# ps args, formatting, and exit code all tested on mac and linux
cmd = ["ps", "-p", str(pid), "-o", "command"]
p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
assert p.stdout
if p.returncode != 0:
# No such pid found
return None
# Ignore the header; there's no cross-platform way to not print it.
command_found = p.stdout.splitlines()[1].strip().decode("utf8")
if not command_found.startswith(match_args):
# The pid matches but the args do not. Better not to kill it.
return f"chose not to kill pid {pid} whose args don't match '{match_args}'\n"
# Kill the process.
os.kill(pid, 9)
return f"killed old pid {pid} running '{match_args}'\n" | d8a93cecb1d02e5dc54c1da3a8e9000e5e94aad0 | 45,354 |
def secondsFromTime(tstr):
""" Return the given time string in seconds.
"""
tl = tstr.split(':')
secs = 0.0
if len(tl) > 1:
if len(tl) >= 3:
try:
secs = float(tl[0])*3600+float(tl[1])*60.0+float(tl[2])
except:
pass
return secs
else:
try:
secs = float(tl[0])*60+float(tl[1])
except:
pass
return secs
try:
secs = float(tstr)
except:
pass
return secs | a5263afd48b23ef96d27cc186de670de4465ca67 | 45,357 |
from typing import Counter
def most_common(list):
"""Returns the most common value in the given list."""
counts = Counter(list)
return counts.most_common(1)[0][0] | c8ed106c7b726849a687f729f1b808b95b5c3454 | 45,358 |
def expected_vhost_files(plones, inventory_hostname):
""" Give a webserver_virtualhosts list, return a list of the
filenames we expect to find in nginx/sites-enabled.
"""
filenames = set()
for aplone in plones:
for vhost in aplone['webserver_virtualhosts']:
hostname = vhost.get('hostname', inventory_hostname)
protocol = vhost.get('protocol', 'http')
filenames.add("{0}_{1}".format(protocol, hostname.replace('.', '_')))
return filenames | 2eb3f05d796a3aee954e1be7e7fd801127310094 | 45,359 |
def _get_type(obj):
"""Return the type of an instance (old and new-style)"""
return getattr(obj, '__class__', None) or type(obj) | e354a397e573f89a998ee6a25b1971d1d8cd578b | 45,363 |
import json
def pretty_print_dict(model_params):
"""ADD
Parameters
----------
Returns
-------
"""
if 'pooling_func' in model_params.keys():
model_params = {k:v for k,v in model_params.items() if k != 'pooling_func'}
return json.dumps(model_params, indent=4).replace('null', 'None') | 0449287deb683d472777b01a9734f1ef23b80f96 | 45,364 |
def compute_all_squares(of_range):
"""Compute the squares of all the natural numbers in a range."""
# Register the list of squares.
squares = []
for i in range(of_range):
squares.append(i*i)
return squares | 3c5cc90f7538393889f571a5e541c38945399123 | 45,365 |
def get_folds(fold_dir):
"""
Get kth line of the file fold_dir and parse this line and return a list of .mol2 file names.
"""
fp = open(fold_dir)
lines = fp.readlines()
#print(lines)
num_folds = len(lines) # each line contains files of a fold
print('there are {} folds for {}'.format(num_folds, fold_dir))
folds = []
for i in range(num_folds):
line = lines[i]
line = line[0:-1] # remove the '\n' at the end of string
line = line.split(' ')
#line = [file + '-1.mol2' for file in line]
#print(line)
folds.append(line)
return folds | 999e588041730fa70a17173e6df9de4a960be854 | 45,366 |
def evaluate_path(trace, next_state, maze_params):
"""
Evaluates path.
:param trace: trace instance
:param next_state: next state
:param maze_params: maze parameters
:return: outcome of the path
"""
# Agent found goal
if next_state in maze_params.GOAL_STATES:
# Walk backwards in the maze
for state, action, reward in reversed(trace):
if state in maze_params.EXPLORATION_STATES:
if len(trace) == maze_params.MOVES_EXPLORE:
return 'short path (open door)'
else:
return 'NA'
else:
if len(trace) == maze_params.MOVES_EXPLOIT:
return 'long path'
else:
return 'NA'
elif next_state in maze_params.CLOSED_DOOR_STATES or next_state == maze_params.START_STATE:
return 'short path (closed door)'
else:
ValueError('ERROR: CHECK PATH EVALUATION FUNCTION') | e2d5929e310012fa2bd91c5c1bfdc5371b073a56 | 45,367 |
def NouS(txt = 'S ou N?'):
"""Função que roda até receber S ou N(Sim ou Não)
Args:
txt (str, optional): [Texto que vai imprimir na tela qunado chamada]. Defaults to 'S ou N?'.
"""
while True:
try:
resp = str(input(txt)[0]).upper().strip()
print(resp)
except ValueError:
print('Você não digitou uma String')
continue
if resp not in "SN":
print('Digite apenas Sim ou Não')
continue
else:
return(resp) | 798796a79b5c5cfa624f597c93587d9ebbcdcd0a | 45,368 |
def channel_reshape(x, channel_shape):
""" (B, *, H, W) to (B, custom, H, W) """
return x.reshape((x.shape[0],) + channel_shape + x.shape[-2:]) | 7d08dc4fc20686f9797a1e36ddaedbf9ef990a0c | 45,369 |
import torch
def get_num_level_proposals_inside(num_level_proposals, inside_flags):
"""
Get number of proposal in different level
"""
split_inside_flags = torch.split(inside_flags, num_level_proposals)
num_level_proposals_inside = [
int(flags.sum()) for flags in split_inside_flags
]
return num_level_proposals_inside | 6025fb3b2a868eab99fd29653b55c3d957e180f0 | 45,373 |
def load_lfw_identities(lfw_identities_files):
""""
input: celeba_identities_file - path to the file containing CELEB-A IDs
identity_CelebA.txt
image_name_1 person_id_1
...
image_name_n person_id_n
output: identity_info - dictionary of the list image names per id
identity_info[person_id] -> (image_name_1, ..., image_name_n)
image_info[image_id] -> person_id
"""
identity_info = dict()
image_info = dict()
with open(lfw_identities_files) as identities:
lines = identities.readlines()
for idx, identity in enumerate(lines):
identity = identity.rstrip().lstrip().split()
# we have 2 infos per line, image name and identity id
if len(identity) != 2:
continue
image_name = identity[0]
image_id = idx
identity_id = int(identity[1])
if identity_id not in identity_info:
identity_info[identity_id] = []
identity_info[identity_id].append(image_name)
image_info[image_id] = identity_id
return identity_info, image_info | 0e529331676297aaa3d1a1efa246948779d7d39a | 45,374 |
def prep_data(csv_file, split_value, seed_number):
"""
This function places the data into two lists, training set and test set.
"""
return 0 | 4f5ea86f3136dbf03f05c24fca65641a073a356d | 45,376 |
import os
def parse_project_data(name, config):
"""Return project name and directory path."""
project_name = name.strip().lower().replace(' ', '_')
project_dir = os.path.join(config.location, project_name)
return project_name, project_dir | 0fc6e3c3fa195f46da6e8b585851c16aa9a1b9ac | 45,377 |
def conditions_event(iq, cc, wv, time):
"""
Print new sky conditions and local time of conditions change
Example
-------
Parameters
----------
iq : float
Image quality
cc : float
Cloud condition
wv : float
Water vapor
time : 'astropy.time.Time'
local time of conditions change event (local time)
Returns
-------
lines : list of strings
Output info as a list of lines.
"""
if iq < 1:
iq = str(int(round(iq*100, 0))) + '%'
else:
iq = 'Any'
if cc < 1:
cc = str(int(round(cc*100, 0))) + '%'
else:
cc = 'Any'
if wv < 1:
wv = str(int(round(wv*100, 0))) + '%'
else:
wv = 'Any'
fprint = '\n\tAt {0} local time, sky conditions change to iq={1}, cc={2}, wv={3}.'
return [fprint.format(time.iso[11:16], iq, cc, wv)] | bcb4243432017fe48d8c76c7b51429730fb76240 | 45,378 |
def json_mapper(json_: dict) -> dict:
"""
Directly map any json response to a json response.
:param json_: The json obtained from the endpoint call
:type json_: dict
:return: The response in json format
:rtype: dict
"""
return json_ | 1bc08fc801bb2875e8b9f1a94d124cfccede1bc6 | 45,379 |
def is_mbi_format_valid(mbi):
"""
Validate MBI ID format.
This includes the BFD's special case of an "S" in the 2nd position
denoting a synthetic MBI value.
Reference for MBI ID format:
https://www.cms.gov/Medicare/New-Medicare-Card/Understanding-the-MBI-with-Format.pdf
"""
# Character types
CHAR_TYPE_C = "123456789"
CHAR_TYPE_N = "0123456789"
# Type is alpha minus: S,L,O,I,B,Z
CHAR_TYPE_A = "ACDEFGHJKMNPQRTUVWXY"
CHAR_TYPE_AN = CHAR_TYPE_A + CHAR_TYPE_N
# Position mapping list[0 thru 10]:
VALID_VALUES_BY_POS = [CHAR_TYPE_C,
CHAR_TYPE_A + "S",
CHAR_TYPE_AN,
CHAR_TYPE_N,
CHAR_TYPE_A,
CHAR_TYPE_AN,
CHAR_TYPE_N,
CHAR_TYPE_A,
CHAR_TYPE_A,
CHAR_TYPE_N,
CHAR_TYPE_N]
msg = ""
# Check if NoneType.
if mbi is None:
return False, "Empty"
# Check length.
if len(mbi) != 11:
msg = "Invalid length = {}".format(len(mbi))
return False, msg
# Check 11 character positions are valid.
for pos in range(0, 11):
if mbi[pos] not in VALID_VALUES_BY_POS[pos]:
msg = "Invalid char in pos = {}".format(pos)
return False, msg
# Passes validation!
return True, "Valid" | 7ba0b0712321361a669f960d3ea9222e7b00ac8f | 45,381 |
def find_in_nested_dict(dictn, value, depth = 0):
""" return dict containg the value in nested dict
Arguments:
dictn {dict} -- nested dictionart
value {int} -- value to search
Keyword Arguments:
depth {int} -- depth value in nested dictionary (default: {0})
Raises:
ValueError: if value is not present in dict
Returns:
[dict] -- dict containing the value
"""
for k, v in dictn:
if v == value:
return dictn
elif hasattr(v, 'items'): # indicates if dictionary
return find_in_nested_dict(v, value, depth - 1)
else : raise ValueError("Value not found in the nested dictionary") | fcabe2fe5a730871bfddc9529c88c85fd8f2501d | 45,382 |
def confirm_action(msg):
""" Prompts user for confirmation of action. """
print(" [*] {}".format(msg))
prompt = input(" [*] Y/N? ")
if prompt in ["yes", "y"]:
return True
elif prompt in ["no", "n"]:
return False
else:
print(" [-] Please answer with y/n")
return confirm_action(msg) | 1baf657efd150639dd01a75a414d1d6f7fb961a0 | 45,384 |
from typing import List
from typing import Tuple
def minimum_bounding_box(coords: List[Tuple[float, float, float]],
buffer: float = 10.) -> Tuple[Tuple, Tuple]:
"""Calculate the minimum bounding box for a list of coordinates
Parameters
----------
coords : List[Tuple[float, float, float]]
a list of tuples corresponding to x-, y-, and z-coordinates
buffer : float (Default = 10.)
the amount of buffer to add to the minimum bounding box
Returns
-------
center: Tuple[float, float, float]
the x-, y-, and z-coordinates of the center of the minimum bounding box
size: Tuple[float, float, float]
the x-, y-, and z-radii of the minimum bounding box
"""
xs, ys, zs = zip(*coords)
min_x, max_x = min(xs), max(xs)
min_y, max_y = min(ys), max(ys)
min_z, max_z = min(zs), max(zs)
center_x = (max_x + min_x) / 2
center_y = (max_y + min_y) / 2
center_z = (max_z + min_z) / 2
size_x = (max_x - center_x) + buffer
size_y = (max_y - center_y) + buffer
size_z = (max_z - center_z) + buffer
center = center_x, center_y, center_z
size = size_x, size_y, size_z
return center, size | 462628c0c3ae7373d09c16b3895330aa9185c9c4 | 45,385 |
def should_run_crawl(
force_crawl: bool, skip_crawl: bool, searching_path: bool, crawled_recently: bool
) -> bool:
"""
Check whether to run the crawl.
Always crawl if force_crawl is True.
Otherwise, never crawl if skip_crawl is True.
Assuming neither of the above are true, then crawl if searching_path is True or crawled_recently is False.
:param force_crawl: Always crawl if True.
:param skip_crawl: Never crawl if True, unless force_crawl is also True.
:param searching_path: If the above are both false, then crawl if we're searching a path.
:param crawled_recently: If all the above are False, then crawl if this is also False, as we
haven't crawled this path recently.
:return: True if we should crawl.
"""
if force_crawl:
return True
elif skip_crawl:
return False
elif searching_path:
return True
elif not crawled_recently:
return True
return False | de29d392923e2cc34d8987cbd5b55bb25b621552 | 45,386 |
import os
def _get_checked_path(path, mustExist=True, allowNone=True):
"""Convert path to absolute if not None."""
if path in (None, ""):
if allowNone:
return None
else:
raise ValueError("Invalid path %r" % path)
path = os.path.abspath(path)
if mustExist and not os.path.exists(path):
raise ValueError("Invalid path %r" % path)
return path | 5981d3a74b6aa0548f96286f417ee843b4226975 | 45,388 |
def gcd_float(numbers, tol=1e-8):
"""
Returns the greatest common divisor for a sequence of numbers.
Uses a numerical tolerance, so can be used on floats
Args:
numbers: Sequence of numbers.
tol: Numerical tolerance
Returns:
(int) Greatest common divisor of numbers.
"""
def pair_gcd_tol(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b > tol:
a, b = b, a % b
return a
n = numbers[0]
for i in numbers:
n = pair_gcd_tol(n, i)
return n | bf84dc323a84413d49989300e135f66d52d334e8 | 45,389 |
import requests
def get_description(url_data_definition):
"""Getting the descriptions of columns from a data set given in url_data_definition.
Args:
- url_data_definition (str): url of DataProperties data set as String.
Return:
- dict{'column_name':'description'}
"""
# Get JSON format of data set.
url_data_info = "?".join((url_data_definition, "$format=json"))
# print("Data Property:", url_data_info)
data_info = requests.get(url_data_info).json() # Is of type dict()
data_info_values = data_info["value"] # Is of type list
dict_description = {}
# Only dict's containing the key 'Key' has information about table columns.
for i in data_info_values:
if i["Key"] != "":
# Make description shorter, since BigQuery only allows 1024 characters
if i["Description"] is not None and len(i["Description"]) > 1024:
i["Description"] = i["Description"][:1021] + "..."
dict_description[i["Key"]] = i["Description"]
return dict_description | 65859d4c6aa296e7883f847cb56b97d2683043d0 | 45,392 |
from typing import Tuple
def split_reg_path(reg_path: str) -> Tuple[str, str, str]:
"""Splits a full registry path into hive, key, and path.
Examples
----------
>>> split_reg_path(\\REGISTRY\\MACHINE\\SYSTEM\\ControlSet001\\Control\\ComputerName)
(REGISTRY, ComputerName, MACHINE\\SYSTEM\\ControlSet001\\Control)
Parameters
----------
regpath : str
The full registry key
Returns
-------
Tuple[str, str, str]
Hive, registry key, and registry key path
"""
# RegistryKey Node Creation
hive = reg_path.split("\\")[0]
reg_key_path = "\\".join(reg_path.split("\\")[1:-1])
reg_key = reg_path.split("\\")[-1]
return (hive, reg_key, reg_key_path) | 545329fc5a4dacde35c4040e5becff972e67a897 | 45,393 |
def clean_string_input(value: str) -> str:
"""Converts a string to lower case and and removes leading and trailing white spaces.
Parameters
----------
value: str
The user input string.
Returns
-------
str
value.lower().strip()
"""
return value.lower().strip() | 66b6c1a84e6c98c0ae2c9eae655f565ba65a6213 | 45,394 |
import math
def get_color_table_size(num_colors):
"""Total values in the color table is 2**(1 + int(result, base=2)).
The result is a three-bit value (represented as a string with
ones or zeros) that will become part of a packed byte encoding
various details about the color table, used in the Logical
Screen Descriptor block.
"""
nbits = max(math.ceil(math.log(num_colors, 2)), 2)
return '{:03b}'.format(int(nbits - 1)) | a61a1a73ebdb2f0b4ed263329f820daa45e1ce3b | 45,396 |
import dis
def _get_opcodes(codeobj):
"""_get_opcodes(codeobj) -> [opcodes]
Extract the actual opcodes as a list from a code object
>>> c = compile("[1 + 2, (1,2)]", "", "eval")
>>> _get_opcodes(c)
[100, 100, 23, 100, 100, 102, 103, 83]
"""
i = 0
opcodes = []
s = codeobj.co_code
while i < len(s):
code = ord(s[i])
opcodes.append(code)
if code >= dis.HAVE_ARGUMENT:
i += 3
else:
i += 1
return opcodes | c48b80a6d742ad46c2cb034c43b15f19af9c5d3b | 45,397 |
def toa_incoming_shortwave_flux(srad0, srad0u):
"""TOA incoming shortwave flux ``rsdt`` [W m-2].
Computes TOA incoming shortwave flux ``rsdt`` from net top solar radiation and top solar radiation upward.
"""
return srad0 - srad0u | 3a883d5e882b79e33e7005485e7a252bcd60d686 | 45,398 |
import math
def judgeSquareSum(self, c): # ! 这个方法思路比较简单
"""
:type c: int
:rtype: bool
"""
half = int(math.sqrt(c))
while half >= 0: # ! 从sqrt(c)遍历到0
if half ** 2 == c:
return True
another = int(math.sqrt(c - half ** 2))
if another ** 2 == c - half ** 2:
return True
half -= 1
return False | ea6ee977bb1b5effebc80addf67cded28a4579c4 | 45,399 |
def _normalise_options(options):
"""
Return a sequence of (value, label) pairs for all options where each option
can be a scalar value or a (value, label) tuple.
"""
out = []
if hasattr(options, '__call__'):
options = options()
for option in options:
if isinstance(option, tuple):
out.append( option )
else:
out.append( (option, str(option)) )
return out | 624c78695643aac0e7a26583e5196f6c28446ce4 | 45,400 |
def all_3_permutations(perm_list):
"""
Input :
- perm_list : tuple or list like ;
Output : list like : each possible permutations from perm_list parameters
"""
return [[i, j, k]
for i in range(perm_list[0] + 1)
for j in range(perm_list[1] + 1)
for k in range(perm_list[2] + 1)] | c1241f140839e7e358e60c6c573807fd32eb4758 | 45,401 |
import hashlib
def tagged_hash(tag: str, data: bytes) -> bytes:
"""BIP-Schnorr tag-specific key derivation"""
hashtag = hashlib.sha256(tag.encode()).digest()
return hashlib.sha256(hashtag + hashtag + data).digest() | 2127a1b1fbc933d1925d0c9998c9600f205eaf68 | 45,402 |
import os
def tree(root_dir, dest):
"""
Create a list with all files root_dir and its subfolders in an
appropriate format for distutils data_files.
"""
prefix = os.path.dirname(root_dir)
data_files = [
(dest+root[len(prefix):], [os.path.join(root, f) for f in files])
for root, _, files in os.walk(os.path.normpath(root_dir))
]
return data_files | a64a1c5093380876f03e3f2846447400ab0adae8 | 45,403 |
def pop_left(dictionary):
"""
Cuts the first element of dictionary and returns its first element (key:value)
Input/Output:
dictionary = Dictionary of string containing the command lines to use. After reading the dictionary the first element is deleted from the dictionary.
Output:
first_el = first element (values) of the dictionary
"""
if len(dictionary) > 0:
first_el = {list(dictionary.keys())[0]: list(dictionary.values())[0]}
dictionary.pop(list(dictionary.keys())[0])
else:
first_el = None
return first_el | 6da5eab1bd840229d0c964114cb9a97a7fe97d4f | 45,404 |
import json
def decode_response(res):
"""Parse a WebSuccess or WebError response."""
decoded_dict = json.loads(res.data.decode('utf-8'))
return (decoded_dict['status'], decoded_dict['message'],
decoded_dict['data']) | e0e8b74ce31d6db6d77f91730b2a921562911afe | 45,406 |
import numpy
def get_j_measure(
predictor_values_positive, predictor_values_negative,
max_percentile_level, num_bins, min_examples_per_bin):
"""Computes J-measure for one predictor variable.
P = number of positive examples, with predictand of 1
N = number of negative examples, with predictand of 0
:param predictor_values_positive: length-P numpy array of predictor values
for positive examples.
:param predictor_values_negative: length-N numpy array of predictor values
for negative examples.
:param max_percentile_level: Maximum percentile of predictor values to
include in J-measure. For example, if `max_percentile` is 99, values
included will span the 1st to 99th percentiles.
:param num_bins: Number of bins used to compute J-measure.
:param min_examples_per_bin: Minimum number of examples per bin (in both the
positive and negative distributions). Will merge bins to meet this
criterion.
:return: j_measure: J-measure (scalar).
"""
assert not numpy.any(numpy.isnan(predictor_values_positive))
assert len(predictor_values_positive.shape) == 1
assert not numpy.any(numpy.isnan(predictor_values_negative))
assert len(predictor_values_negative.shape) == 1
assert max_percentile_level >= 90.
assert max_percentile_level <= 100.
num_bins = int(numpy.round(num_bins))
assert num_bins >= 10
min_examples_per_bin = int(numpy.round(min_examples_per_bin))
assert min_examples_per_bin >= 10
all_predictor_values = numpy.concatenate((
predictor_values_positive, predictor_values_negative
))
min_value_for_bins = numpy.percentile(
all_predictor_values, 100 - max_percentile_level
)
max_value_for_bins = numpy.percentile(
all_predictor_values, max_percentile_level
)
bin_cutoffs = numpy.linspace(
min_value_for_bins, max_value_for_bins, num=num_bins + 1
)
bin_indices_positive = (
numpy.digitize(predictor_values_positive, bin_cutoffs, right=False) - 1
)
bin_indices_negative = (
numpy.digitize(predictor_values_negative, bin_cutoffs, right=False) - 1
)
num_pos_examples_by_component = []
num_neg_examples_by_component = []
positive_fraction_by_component = []
negative_fraction_by_component = []
pos_example_indices_this_component = numpy.array([], dtype=int)
neg_example_indices_this_component = numpy.array([], dtype=int)
for k in range(num_bins):
pos_example_indices_this_component = numpy.concatenate((
pos_example_indices_this_component,
numpy.where(bin_indices_positive == k)[0]
))
neg_example_indices_this_component = numpy.concatenate((
neg_example_indices_this_component,
numpy.where(bin_indices_negative == k)[0]
))
if (
len(pos_example_indices_this_component) < min_examples_per_bin
and k != num_bins - 1
):
continue
if (
len(neg_example_indices_this_component) < min_examples_per_bin
and k != num_bins - 1
):
continue
num_pos_examples_by_component.append(
len(pos_example_indices_this_component)
)
num_neg_examples_by_component.append(
len(neg_example_indices_this_component)
)
positive_fraction_by_component.append(
float(len(pos_example_indices_this_component)) /
len(predictor_values_positive)
)
negative_fraction_by_component.append(
float(len(neg_example_indices_this_component)) /
len(predictor_values_negative)
)
pos_example_indices_this_component = numpy.array([], dtype=int)
neg_example_indices_this_component = numpy.array([], dtype=int)
num_pos_examples_by_component = numpy.array(
num_pos_examples_by_component, dtype=int
)
num_neg_examples_by_component = numpy.array(
num_neg_examples_by_component, dtype=int
)
positive_fraction_by_component = numpy.array(positive_fraction_by_component)
negative_fraction_by_component = numpy.array(negative_fraction_by_component)
if (
num_pos_examples_by_component[-1] < min_examples_per_bin or
num_neg_examples_by_component[-1] < min_examples_per_bin
):
positive_fraction_by_component[-2] = numpy.average(
positive_fraction_by_component[-2:],
weights=num_pos_examples_by_component[-2:]
)
negative_fraction_by_component[-2] = numpy.average(
negative_fraction_by_component[-2:],
weights=num_neg_examples_by_component[-2:]
)
positive_fraction_by_component = positive_fraction_by_component[:-1]
negative_fraction_by_component = negative_fraction_by_component[:-1]
j_measure_components = (
(negative_fraction_by_component - positive_fraction_by_component) *
numpy.log2(
negative_fraction_by_component / positive_fraction_by_component
)
)
return numpy.sum(j_measure_components) | ac307ba4ad78095e48f89da9cdd8efdb21048ad4 | 45,407 |
def softmax_cross_entropy_with_logits(logits, targets, batch_size):
""" Calculates softmax entropy
Args:
* logits: (NxC) outputs of dense layer
* targets: (NxC) one-hot encoded labels
* batch_size: value of N, temporarily required because Plan cannot trace .shape
"""
# numstable logsoftmax
norm_logits = logits - logits.max()
log_probs = norm_logits - norm_logits.exp().sum(dim=1, keepdim=True).log()
# NLL, reduction = mean
return -(targets * log_probs).sum() / batch_size | 10d7ce3ca92c8287a18b65d4351d895903b2b5c1 | 45,408 |
import argparse
def parser_args():
"""Config for BGCF"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataset", type=str, default="Beauty", help="choose which dataset")
parser.add_argument("-dpath", "--datapath", type=str, default="./scripts/data_mr", help="minddata path")
parser.add_argument("-de", "--device", type=str, default='0', help="device id")
parser.add_argument('--seed', type=int, default=0, help="random seed")
parser.add_argument('--Ks', type=list, default=[5, 10, 20, 100], help="top K")
parser.add_argument('--test_ratio', type=float, default=0.2, help="test ratio")
parser.add_argument('-w', '--workers', type=int, default=8, help="number of process")
parser.add_argument("-ckpt", "--ckptpath", type=str, default="./ckpts", help="checkpoint path")
parser.add_argument("-eps", "--epsilon", type=float, default=1e-8, help="optimizer parameter")
parser.add_argument("-lr", "--learning_rate", type=float, default=1e-3, help="learning rate")
parser.add_argument("-l2", "--l2", type=float, default=0.03, help="l2 coefficient")
parser.add_argument("-wd", "--weight_decay", type=float, default=0.01, help="weight decay")
parser.add_argument("-act", "--activation", type=str, default='tanh', choices=['relu', 'tanh'],
help="activation function")
parser.add_argument("-ndrop", "--neighbor_dropout", type=list, default=[0.0, 0.2, 0.3],
help="dropout ratio for different aggregation layer")
parser.add_argument("-log", "--log_name", type=str, default='test', help="log name")
parser.add_argument("-e", "--num_epoch", type=int, default=600, help="epoch sizes for training")
parser.add_argument('-input', '--input_dim', type=int, default=64, choices=[64, 128],
help="user and item embedding dimension")
parser.add_argument("-b", "--batch_pairs", type=int, default=5000, help="batch size")
parser.add_argument('--eval_interval', type=int, default=20, help="evaluation interval")
parser.add_argument("-neg", "--num_neg", type=int, default=10, help="negative sampling rate ")
parser.add_argument("-g1", "--raw_neighs", type=int, default=40, help="num of sampling neighbors in raw graph")
parser.add_argument("-g2", "--gnew_neighs", type=int, default=20, help="num of sampling neighbors in sample graph")
parser.add_argument("-emb", "--embedded_dimension", type=int, default=64, help="output embedding dim")
parser.add_argument('--dist_reg', type=float, default=0.003, help="distance loss coefficient")
parser.add_argument('-ng', '--num_graphs', type=int, default=5, help="num of sample graph")
parser.add_argument('-geps', '--graph_epsilon', type=float, default=0.01, help="node copy parameter")
return parser.parse_args() | e1e02fce8f948d893912a5561aeb88590de58b43 | 45,409 |
def parse_time(s):
"""Parse timing information from GNU time.
"""
user_time = float()
system_time = float()
percent_of_cpu = int()
wall_clock = float()
maximum_resident_set_size = int()
exit_status = int()
for line in s.decode().format().split('\n'):
line = line.strip()
if line.startswith('User time (seconds):'):
user_time = float(line.split(':')[1].strip())
if line.startswith('System time (seconds):'):
system_time = float(line.split(':')[1].strip())
if line.startswith('Percent of CPU this job got:'):
percent_of_cpu = int(line.split(':')[1].strip().rstrip('%'))
if line.startswith('Elapsed (wall clock) time (h:mm:ss or m:ss):'):
value = line.replace('Elapsed (wall clock) time (h:mm:ss or m:ss):', '').strip()
#hour case
if value.count(':') == 2:
hours = int(value.split(':')[0])
minutes = int(value.split(':')[1])
seconds = float(value.split(':')[2])
total_seconds = (hours * 60 * 60) + (minutes * 60) + seconds
wall_clock = total_seconds
if value.count(':') == 1:
minutes = int(value.split(':')[0])
seconds = float(value.split(':')[1])
total_seconds = (minutes * 60) + seconds
wall_clock = total_seconds
if line.startswith('Maximum resident set size (kbytes):'):
maximum_resident_set_size = int(line.split(':')[1].strip())
if line.startswith('Exit status:'):
exit_status = int(line.split(':')[1].strip())
return {
'user_time': user_time,
'system_time': system_time,
'percent_of_cpu': percent_of_cpu,
'wall_clock': wall_clock,
'maximum_resident_set_size': maximum_resident_set_size,
'exit_status': exit_status,
} | 8f61cffac090054d4e037fa3b6687c366ad1983c | 45,412 |
from typing import Tuple
def create_node(coordinates:Tuple[int, float, float]) -> dict:
"""
Dado o valor do indice e das coordenadas no formtato (indice, x, y),
cria um dicionario com o valores das cidades. Em primeira instancia,
a capacidade de uma cidade eh igual a 0.
"""
node = {
'index': int(coordinates[0]) - 1,
'x': float(coordinates[1]),
'y': float(coordinates[2]),
'capacity': 0 # initialized as 0
}
return node | 97d73d0b01c82f5b7b8f9849d39c42ed8ae26385 | 45,417 |
def set_new_methods(**kwargs):
"""
Semantics:
Set a set of new methods to a class, any quantity of methods.
Args:
**kwargs: name of method given by key, body by value.
Returns:
It returns the new class
"""
def wrapper(cls):
for key in kwargs:
setattr(cls, key, kwargs[key])
return cls
return wrapper | e9e6c4e51f15bf058f64361d185749c8d845b860 | 45,418 |
def _find_family(same_name_df):
"""Return two dataframes of family / others from same_name_df. """
family_size = same_name_df['SibSp'] + same_name_df['Parch'] + 1
family = same_name_df.loc[family_size > 1]
not_family = same_name_df.loc[family_size <= 1]
return family, not_family | df6758ac761dcf744a5ece6f30f95e04901e3881 | 45,419 |
import os
def get_tdata_corr_txt_dir():
"""Return the directory with text correlation sample files"""
return os.path.join(os.path.dirname(__file__),'tdata_corr_txt') | cc8ae4e539255fba8512d02bef6d027200384f46 | 45,422 |
def aggregate_weightedsample(w_sample):
"""represent the random picked sample for training/testing
Args:
w_sample: dictionary representing a random split of the grouped sequences
by their length. it is obtained using :func:`weighted_sample` function
"""
wdata_split = {"train": [], "test": []}
for grouping_var in w_sample:
for data_cat in w_sample[grouping_var]:
wdata_split[data_cat] += w_sample[grouping_var][data_cat]
return {0: wdata_split} | e964c290515b61e7e0b3d24ee92034781969ab26 | 45,423 |
def get_symbols_test(price_over=None, price_under=None):
"""Returns list of symbols within a defined price range"""
if price_over is None:
price_over = 0
if price_under is None:
price_under = 20
# return ['MSFT', 'AAPL', 'FSNN', 'BEBE', 'HELI', 'COSI', 'LNCO', 'TERP',
# 'ICA', 'AXDX', 'VRA', 'PAGP']
return ['MSFT', 'AAPL'] | 701dcca5e017c81f7117fa5fdd8d594606685edd | 45,424 |
def get_warning_messages():
"""Generates warning messages in HTML."""
html_begin = '<span style=" font-style:italic; color:#ffffff;">'
html_end = '</span>'
warning_messages = {
"empty_str": '',
"registered": 'Username is already registered',
"login_required": 'Login is required',
"invalid_login": 'Username doesn\'t exist',
"login_out_of_range": 'Username must be between 4 and 20 in length',
"password_required": 'Password is required',
"invalid_password": 'Password doesn\'t match',
"password_out_of_range": 'Password must be between 4 and 20 in length',
"not_alphanumeric": 'Login can only contain alphanumeric characters',
"banned": 'Account was banned',
}
warning_messages = {key: html_begin + value + html_end for key, value in warning_messages.items()}
return warning_messages | 1fbfa665b361d82fcb85159bdbd04338df34747a | 45,425 |
import warnings
def validate_how(how: str) -> str:
"""Check the given how for join is valid."""
if how == "full":
warnings.warn(
"Warning: While pandas-on-Spark will accept 'full', you should use 'outer' "
+ "instead to be compatible with the pandas merge API",
UserWarning,
)
if how == "outer":
# 'outer' in pandas equals 'full' in Spark
how = "full"
if how not in ("inner", "left", "right", "full"):
raise ValueError(
"The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']",
)
return how | 06b37b9414704b925c4d8e04d4a79330f3a499f9 | 45,428 |
def calc_cell_temperature(absorbed_radiation_Wperm2, T_external_C, panel_properties_PV):
"""
calculates cell temperatures based on the absorbed radiation
:param absorbed_radiation_Wperm2: absorbed radiation on panel
:type absorbed_radiation_Wperm2: np.array
:param T_external_C: drybulb temperature from the weather file
:type T_external_C: series
:param panel_properties_PV: panel property from the supply system database
:type panel_properties_PV: dataframe
:return T_cell_C: cell temprature of PV panels
:rtype T_cell_C: series
"""
NOCT = panel_properties_PV['PV_noct']
# temperature of cell
T_cell_C = T_external_C + absorbed_radiation_Wperm2 * (NOCT - 20) / (
800) # assuming linear temperature rise vs radiation according to NOCT condition
return T_cell_C | 94839ba878fc5aaea8580bd2a97effaf338fcfa4 | 45,430 |
import os
def get_requirements():
""" Get requirements from the requirements.txt """
dirname = os.path.dirname(os.path.realpath(__file__))
requirements_file = os.path.join(dirname, "requirements.txt")
with open(requirements_file, "r") as f:
requirements = f.read().splitlines()
return requirements | 427dfd847a08d3e7c57acc2ef0fc9f4852beb18d | 45,431 |
def get_common_target(scalars):
"""
Extracts the common target from list of scalars while making sure that
targets are equivalent.
Args:
scalars (:py:class:`.Scalars`):
Return:
dict: Common target.
"""
e_targets = [scalar.target_term for scalar in scalars
if scalar.target_term["name"] == "E"]
if e_targets:
if len(e_targets) == 1:
return e_targets[0]
# more than one E-target, check if all entries are identical
for key in ["order", "exponent"]:
entries = [tar[key] for tar in e_targets]
if entries[1:] != entries[:-1]:
raise ValueError("mismatch in target terms!")
return e_targets[0]
else:
return dict(name="f") | ab81541f2911723b1fab840fa06ef7ed6630c22d | 45,432 |
import os
def parse_csv_data(csv_filename: str) -> list[str]:
"""Parses a CSV file into a list of strings.
NOTE: This is a purely demonstrative function made to fit the specification, actual data handling is done with JSON
Args:
csv_filename: The name of the CSV file
Returns:
A list of strings, where each string is one row of the CSV file.
"""
base_location = os.path.abspath(os.path.dirname( __file__ ))
file_path = os.path.abspath(os.path.join(base_location, csv_filename))
with open(file_path, 'r', encoding="utf-8") as csv_file:
csv_rows = csv_file.read().splitlines()
return csv_rows | 930474de2f3b179f9c5efc593a8fff43ad0f7d2d | 45,433 |
def model_key(row, headers, use_problem = True):
""" Generate a unique key for models that make them identifiable. """
model = ["ico_encoder", "article_encoder", "attn", "cond_attn",
"tokenwise_attention", "pretrain_attention",
"tune_embeddings", "no_pretrained_word_embeddings"]
problem = ["article_sections", "data_config"]
key = ""
for id_ in model:
key += row[headers[id_]] + "|||"
if (use_problem):
for id_ in problem:
key += row[headers[id_]] + "|||"
return key | 6b8c66205e4a3184697f51aebede5ab71abe3736 | 45,434 |
def bytes2text(bs):
"""
Converts bytes (or array-like of bytes) to text.
:param bs: Bytes or array-like of bytes.
:return: Converted text.
"""
if type(bs) in (list, tuple) and len(bs) > 0:
if isinstance(bs[0], bytes):
return b''.join(bs).decode(errors='ignore').strip()
if isinstance(bs[0], str):
return ''.join(bs).strip()
else:
raise TypeError
elif isinstance(bs, bytes):
return bs.decode(errors='ignore').strip()
else:
return '' | a145a05211a20eb4787f9d217ba7f4226eb96125 | 45,435 |
import random
def random_x():
"""Return a random x-coordinate for use in default field values."""
return random.randint(0, 1200) | de9327fe2d268a3765c8fb4809ca10872f1b0297 | 45,436 |
import string
def ownBase32():
"""
This function just return the ownBase32 alphabet
"""
oB32 = {i:x for i, x in enumerate(string.ascii_lowercase, 0)}
# Like in old typewriters, l will work as 1 and o will work as 0
# because of little name space z will work as 2
oB32[26] = "3"
# because of little name space, a will work as 4 and s will work as 5
oB32[27] = "6"
oB32[28] = "7"
oB32[29] = "8"
oB32[30] = "9"
oB32[31] = " "
return oB32 | 567d2a03fcf72863883cf54e5dbc39764c7c1e0e | 45,437 |
import os
def repo_path(repo):
"""Return local disk path to repo."""
# Remove '.git' component.
return os.path.dirname(repo.path.rstrip(os.sep)) | 099c2e37691dad2a5fad226d7f59767c55cc2f42 | 45,439 |
def authority(q,ordenado,top):
"""
entrada: uma matriz de posicoes, duas imagens q e i para serem avaliadas, o tamanho do topo
saida: medida de estimativa de eficacia
"""
eficacia = 0.0
for i in ordenado[0:top,q]:
for j in ordenado[0:top,i]:
if (j in ordenado[0:top,q]):
eficacia+=1.0
return eficacia/(top*top) | be8d363f52a56791ff9bda9a322322c5ac5128c6 | 45,441 |
import torch
def get_accuracy_with_logits(y_true: torch.Tensor, y_pred: torch.Tensor) -> float:
"""
Calculate accuracy with logits
:param y_true: torch.Tensor of ints containing true labels. shape = (N,)
:param y_pred: torch.Tensor of predicted logits. shape = (N, N_classes)
:return acc: float which equals to accuracy score
"""
return (y_true == y_pred.argmax(dim=1)).float().mean().item() | e975dad6efdd9221d565f6e7abfee40af20d77f1 | 45,442 |
def modified_binary_search(sorted_vertices, vertex):
""" Modified binary search algorithm to increase performance when removing soundings during the
label-based generalization. """
right, left = 0, 0
vertices_num = len(sorted_vertices)
while right < vertices_num:
i = (right + vertices_num) // 2
if vertex.get_z() < sorted_vertices[i].get_z():
vertices_num = i
else:
right = i + 1
vertices_num = right - 1
while left < vertices_num:
i = (left + vertices_num) // 2
if vertex.get_z() > sorted_vertices[i].get_z():
left = i + 1
else:
vertices_num = i
if left == right-1:
return left
else:
for idx in range(left, right):
if sorted_vertices[idx] == vertex:
return idx | c6f35b7ae86c3eb34ff471dbc47f0b99f59ace4f | 45,443 |
def is_non_empty_string(str_):
"""checks if a string is not an empty string, True if not empty"""
return bool(str_ != '') | b911322a33cb9387c86648bdb7c5be4f6b7f21a1 | 45,444 |
def solve(s):
"""
Walk thru the string, comparing two characters at a time. Add characters
that are not adjacent duplicates to our results.
"""
answer = []
string = list(s)
# Initialize the index we'll use to walk thru the string.
idx = 0
# We index idx+1 to get the next character in the string, so our loop
# condition should make sure idx+1 is less than the total string length.
while (idx + 1) < len(string):
# Compare the last char we put into the result with the current char. If
# They are the same, remove the last char from result and skip the
# current char.
if answer and (answer[-1] == string[idx]):
answer.pop()
idx += 1
# Compare the char at the current index and the char after it. If they
# are not the same, add the char at the current index to our result.
elif string[idx] != string[idx + 1]:
answer.append(string[idx])
idx += 1
else:
idx += 2
if idx < len(string):
if answer and answer[-1] == string[-1]:
answer.pop()
else:
answer.append(string[-1])
# Return the result string, or if it is empty, return "Empty String" as per
# the problem requirements.
if answer:
return "".join(answer)
else:
return "Empty String" | 487675acf5c0e00ecc5d6aaa4f3463d405942762 | 45,445 |
def graph_input_anssel(si0, si1, sj0, sj1, se0, se1, y, f0=None, f1=None, s0=None, s1=None, kw=None, akw=None):
""" Produce Keras task specification from vocab-vectorized sentences.
The resulting 'gr' structure is our master dataset container, as well
as something that Keras can recognize as Graph model input.
* si0, si1: Words as indices in vocab; 0 == not in vocab
* sj0, sj1: Words as indices in GloVe; 0 == not in glove
(or in vocab too, which is preferred; never si0>0 and si1>0 at once)
* se0, se1: Words as embeddings (based on sj; 0 for nonzero-si)
* y: Labels
* f0, f1: NLP flags (word class, overlaps, ...)
* s0, s1: Words as strings
* kw, akw: Scalars for auxiliary pair scoring (overlap scores in yodaqa dataset)
To get unique word indices, sum si0+sj1.
"""
gr = {'si0': si0, 'si1': si1,
'sj0': sj0, 'sj1': sj1,
'score': y}
if se0 is not None:
gr['se0'] = se0
gr['se1'] = se1
if f0 is not None:
gr['f0'] = f0
gr['f1'] = f1
if s0 is not None:
# This is useful for non-neural baselines
gr['s0'] = s0
gr['s1'] = s1
if kw is not None:
# yodaqa-specific keyword weight counters
gr['kw'] = kw
gr['akw'] = akw
return gr | ef3627ebbc4b5fbc9b2c8e02e2cc4aa683ffd298 | 45,446 |
def test_file(test_dir):
"""
Returns path to the file with `filename` and `content` at `test_dir`.
"""
def wrapper(filename: str, content: str):
file = test_dir / filename
file.write_text(content, encoding="utf-8")
return file
return wrapper | e4e9d500ba7c5227add47833bdc8d3be3a8d4252 | 45,447 |
def lazy_binmap(f, xs):
"""
Maps a binary function over a sequence. The function is applied to each item
and the item after it until the last item is reached.
"""
return (f(x, y) for x, y in zip(xs, xs[1:])) | 9adfc08641a625b08031dbea135479a0f95abc32 | 45,448 |
import os
def GetPrebuiltsForPackages(package_root, package_list):
"""Create list of file paths for the package list and validate they exist.
Args:
package_root (str): Path to 'packages' directory.
package_list (list[str]): List of packages.
Returns:
List of validated targets.
"""
upload_targets_list = []
for pkg in package_list:
zip_target = pkg + '.tbz2'
upload_targets_list.append(zip_target)
full_pkg_path = os.path.join(package_root, pkg) + '.tbz2'
if not os.path.exists(full_pkg_path):
raise LookupError('DevInstall archive %s does not exist' % full_pkg_path)
return upload_targets_list | fa09049972d556fdafdfdd30867fed9ed1111540 | 45,450 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.