content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
import copy
def override_repo_refs(repos, override_ref=None, overrides=None):
"""
Apply ref overrides to the `repos` dictionary.
Arguments:
repos (dict): A dict mapping Repository objects to openedx.yaml data.
override_ref (str): a ref to use in all repos.
overrides (dict mapping repo names to refs): refs to use in specific repos.
Returns:
A new dict mapping Repository objects to openedx.yaml data, with refs overridden.
"""
repos = {r: copy.deepcopy(data) for r, data in repos.items()}
overrides = overrides or {}
if override_ref or overrides:
for repo, repo_data in repos.items():
local_override = overrides.get(repo.full_name, override_ref)
if local_override:
repo_data["openedx-release"]["ref"] = local_override
return repos
|
26d264b16dd8ff826362387d08868f99e3c0ab5d
| 49,950
|
def dataset_get_projection_wkt(gdal_ds):
"""returns a gdal dataset's projection in well known text"""
ds_wkt = gdal_ds.GetProjectionRef()
if ds_wkt == '':
ds_wkt = gdal_ds.GetGCPProjection()
return ds_wkt
|
12febdeafc819c0987c7dece168aac1fb89b8275
| 49,954
|
from typing import Optional
from typing import Tuple
from typing import Any
def get_columns_from_str(columns: Optional[str],
separator: str = ',') -> Tuple[Any, ...]:
"""Converts columns input as separated string to tuples for further processing.
A helper function to convert strings containing column names to tuples of
column names.
Args:
columns: List of columns as a string with separators.
separator: Character that separates the column names in the string.
Returns:
A tuple containing the columns names or empty if the column string doesn't
exist or is empty.
"""
if not columns:
return ()
return tuple(columns.split(separator))
|
482bd797b426062fbc36bcabb4af7ca3cc72cfb8
| 49,955
|
def IsLatencyField(field):
"""Check if the field is latency.
Args:
field: string. The name of the field.
Returns:
A boolean indicates if the field contains keyword 'latency'.
"""
return 'latency' in field
|
bd5d330460c000fa3fe1045db5ff26cfe40abf3c
| 49,957
|
def card_str(c):
"""Return string representation of a card."""
return '__23456789TJQKA'[c>>4] + 'SDHC'[c&15]
|
6e452325b323c9c053b19c30f47b7252f06f1ecd
| 49,960
|
def generate_delays(delay, max_delay, multiplier=2):
"""Generator/iterator that provides back delays values.
The values it generates increments by a given multiple after each
iteration (using the max delay as a upper bound). Negative values
will never be generated... and it will iterate forever (ie it will never
stop generating values).
"""
if max_delay < 0:
raise ValueError("Provided delay (max) must be greater"
" than or equal to zero")
if delay < 0:
raise ValueError("Provided delay must start off greater"
" than or equal to zero")
if multiplier < 1.0:
raise ValueError("Provided multiplier must be greater than"
" or equal to 1.0")
def _gen_it():
# NOTE(harlowja): Generation is delayed so that validation
# can happen before generation/iteration... (instead of
# during generation/iteration)
curr_delay = delay
while True:
curr_delay = max(0, min(max_delay, curr_delay))
yield curr_delay
curr_delay = curr_delay * multiplier
return _gen_it()
|
b576362f6a0613e29c52ad10fc35e8b95635acc3
| 49,969
|
from typing import Any
import re
def regular_expression(check_value: Any, item: Any) -> bool:
"""Run a regular expression search given a regex and item to search.
:param check_value: Regular expression.
:type check_value: Any
:param item: Item to search against.
:type item: Any
:return: Bool of comparison.
:rtype: bool
"""
return bool(re.search(check_value, str(item)))
|
e9ed664a0f4fcf3166a5ff9b854a33b0e7472ee5
| 49,974
|
def num_to_alpha(integer):
"""
Transform integer to [a-z], [A-Z]
Parameters
----------
integer : int
Integer to transform
Returns
-------
a : str
alpha-numeric representation of the integer
"""
ascii = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if integer < 52:
return ascii[integer]
else:
raise ValueError('Too large index for einsum')
|
9ec27d1f7c28bb4bded81aa2a7171eca80d3efdc
| 49,981
|
def _make_linear_ramp(white):
""" generate a palette in a format acceptable for `putpalette`, which
expects [r,g,b,r,g,b,...]
"""
ramp = []
r, g, b = white
for i in range(255):
ramp.extend((r*i/255, g*i/255, b*i/255))
return ramp
|
5f75464c3773ca1ebb70331d2e943930e7d2152f
| 49,986
|
def sql_render(hpo_id, cdm_schema, results_schema, vocab_schema, sql_text):
"""
Replace template parameters
:param hpo_id: will be the source name in Achilles report
:param cdm_schema: schema of the cdm
:param results_schema: schema of the results tables
:param vocab_schema: schema of the vocabulary tables
:param sql_text: SQL command text to render
:return: command text with template parameters replaced
"""
result = sql_text
replacements = {'@cdm_database_schema': cdm_schema,
'@results_database_schema': results_schema,
'@vocab_database_schema': vocab_schema}
for raw_placeholder, schema in replacements.items():
placeholder = raw_placeholder + '.' if schema is None else raw_placeholder
replace_with = '' if schema is None else schema
result = result.replace(placeholder, replace_with)
return result.replace('@source_name', hpo_id)
|
02848fed8426cb8fc20dcfaaf845e6c6f7de5708
| 49,987
|
def _as_range_str(iterable):
"""
Return a string representing the range as a string span.
"""
l = list(iterable)
if len(l) > 1:
return '{0}-{1}'.format(l[0], l[-1])
return '{0}'.format(l[0])
|
cab3c70297fc8f75ab6b5d7350462de9d1501a4d
| 49,990
|
def trapez(f, a, b, n):
"""
Computes and returns a trapezoidal approximate integral of f.
f -- function
a -- lower boundary
b -- upper boundary
n -- number of sections
returns approximate integral using the trapezoidal method
"""
h = (b-a)/float(n)
sum_fx = 0.0
for i in range(n-1):
xi = a + (i+1) * h
sum_fx += f(xi)
return h * (f(a) + f(b) + 2.0 * sum_fx) / 2.0
|
79035891f41f01ec7244e218125cf372c9de50c0
| 49,993
|
import math
def image_entropy(im):
"""
Calculate the entropy of an image. Used for "smart cropping".
"""
hist = im.histogram()
hist_size = float(sum(hist))
hist = [h / hist_size for h in hist]
return -sum([p * math.log(p, 2) for p in hist if p != 0])
|
356cd2b1eeb671c5e3e82ddd8a6e2e8c73f38758
| 49,996
|
def set_overlap(source_set, target_set):
"""Compute the overlap score between a source and a target set.
It is the intersection of the two sets, divided by the length of the target set."""
word_overlap = target_set.intersection(source_set)
overlap = len(word_overlap) / float(len(target_set))
assert 0. <= overlap <= 1.
return overlap
|
1feb0f25d711f23c62594d18316f7c96750b42c0
| 49,999
|
def get_ranx0(rng):
"""
Uniformly sample from the feasible space.
Parameters
----------
rng : prng.MRG32k3a object
Returns
-------
x0 : tuple of int
The randomly chosen point
"""
tau = 100
q = 9
mr = range(tau)
x0 = tuple(rng.choice(mr) for i in range(q))
return x0
|
578c0964877f4f1560af9cea3136b04f43b9fed4
| 50,001
|
def sec_to_time(sec):
""" Returns the formatted time H:MM:SS
"""
mins, sec = divmod(sec, 60)
hrs, mins = divmod(mins, 60)
return f"{hrs:d}:{mins:02d}:{sec:02d}"
|
cffe3060a1a98f3dbba53da4666175994c606a31
| 50,002
|
def parse_dec_or_hex(string):
"""Parses a string as either decimal or hexidecimal"""
base = 16 if string.startswith('0x') else 10
return int(string, base)
|
90c8ee433245d0b5c2cc5d41c0007a5dda8039a6
| 50,009
|
def make_arguments(action: str) -> dict:
"""
Make an argument dictionary
Parameters
----------
action : string
The action to execute
"""
return {
"action": action,
"file": None,
"folder": None,
"device": None,
"all": False,
"move_path": None,
"from_device": None,
}
|
2661ec75813af047ccf4f565153d03d0d1f53ab8
| 50,014
|
def calc_company_check_digit(number):
"""Calculate the check digit for the 10-digit ИНН for organisations."""
weights = (2, 4, 10, 3, 5, 9, 4, 6, 8)
return str(sum(weights[i] * int(n)
for i, n in enumerate(number[:9])) % 11 % 10)
|
33c1ca3209fbe819dcbbd80a7b33df1aa6051216
| 50,020
|
def is_string(in_str):
# type: (...) -> bool
"""Test Unicode (text) string literal.
Examples:
>>> is_string('abc')
True
>>> is_string(u'abc')
True
>>> is_string(u'北京')
True
>>> is_string(123)
False
>>> is_string(None)
False
>>> is_string(['a', 'b'])
False
# Python 3
# >>> is_string(b'avoid considering byte-strings as strings.')
# False
"""
return isinstance(in_str, (str, u''.__class__))
|
2b9bba08733119ecc4255c9a8f053d588e97e19d
| 50,021
|
def restoreMember(memberType, name, extra, params, body):
"""Re-creates an XBL member element from parts created by iterateMembers."""
if memberType == "method":
paramText = ""
for param in params:
paramText += """ <parameter name="%s"/>\n""" % param
return """<method name="%s">\n%s <body><![CDATA[%s]]>""" \
% (name, paramText, body)
elif memberType == "handler":
return """<handler %s>\n <![CDATA[%s]]>""" % (extra, body)
elif memberType == "constructor":
return """<constructor>\n <![CDATA[%s]]>""" % (body)
elif memberType == "destructor":
return """<destructor>\n <![CDATA[%s]]>""" % (body)
elif memberType == "property":
return """<property name="%s">""" % (name)
elif memberType == "getter":
return """<getter>\n <![CDATA[%s]]>""" % (body)
elif memberType == "setter":
return """<setter>\n <![CDATA[%s]]>""" % (body)
|
c9a5d816fcd11fd7a8a41723bfb51fd0143174b4
| 50,024
|
def _enable_custom_widget_manager(package_name: str) -> str:
"""Return additional finally block for packages that require a custom widget manager."""
if any([requires_custom_widget_manager in package_name for requires_custom_widget_manager in (
"itkwidgets", )]):
return "\n" + """finally:
import google.colab
google.colab.output.enable_custom_widget_manager()"""
else:
return ""
|
51bc09baf516cc6fd010d8ca3ea4f2a030dbe111
| 50,027
|
from pathlib import Path
def change_file_extensions_to_tif(each_file_dict: dict, file_extensions_to_protect_from_changing_to_tif: list) -> dict:
""" Change all file extensions to tif except those defined to be protected."""
for node_name in ['id', 'filePath', 'description', 'title']:
if node_name in each_file_dict:
node_value = each_file_dict[node_name]
file_extension = Path(node_value).suffix
if file_extension and '.' in file_extension and file_extension.lower() not in file_extensions_to_protect_from_changing_to_tif:
each_file_dict[node_name] = node_value.replace(file_extension, '.tif')
return each_file_dict
|
97fec7c642dfca6eff60f31efff5410bc41a4ee6
| 50,028
|
def make_change(amount):
"""
Assume que amount é um inteiro > 0, em centavos
Retorna o menor número de moedas cujos valores somam a amount.
* Moedas possíveis são 100, 50, 25, 10, 5, 1 centavos
* Um algoritmo ganancioso funciona aqui
"""
out = []
for p in 100, 50, 25, 10, 5, 1:
if amount >= p:
n = amount // p
r = amount - p * n
print(f"valor ={amount} | notas ={n} | nota ={p} | resto ={r}")
amount = r
out.append(n)
print(out)
return sum(out)
|
789460ac42fc3ad005fc67e67325a07d4e539122
| 50,029
|
import hashlib
def md5_hash(file_name):
"""Helper to compute and return a hash of a file"""
return hashlib.md5(open(file_name, 'rb').read()).hexdigest()
|
678ee1663128ff175b997ef144edd11718a82037
| 50,037
|
def bayesian_targeting_policy(tau_pred, contact_cost, offer_accept_prob, offer_cost, value=None):
"""
Applied the Bayesian optimal decision framework to make a targeting decision.
The decision to target is made when the expected profit increase from targeting is strictly
larger than the expected cost of targeting.
tau_pred : array-like
Estimated treatment effect for each observations. Typically the effect on the expected profit.
If tau_pred is the treatment effect on conversion, 'value' needs to be specified.
contact_cost : float or array-like
Static cost that realizes independent of outcome
offer_cost : float or array-like
Cost that realizes when the offer is accepted
value : float or array-like, default: None
Value of the observations in cases where tau_pred is the
change in acceptance probability (binary outcome ITE)
"""
if value:
tau_pred = tau_pred * value
return (tau_pred > (offer_accept_prob * offer_cost - contact_cost)).astype('int')
|
eb22fafd73acd9bcb75114dd18391f865a32f112
| 50,038
|
def _to_encode(s, encoding='utf-8'):
"""
Convert string to unicode as default.
>>> _to_encode('中文') == u'\u4e2d\u6587'
True
"""
return s.decode(encoding)
|
ff61046c36a07d3bc880a4d80fd54d74d96b7817
| 50,039
|
def parse_authors(auth_str):
"""returns list of all author last names from a string with `last_name,
first_initial and last_name, first_inital and etc`
:param auth_str: string containing all author names as exported by
EndNoteX9 default BibTex export
:type auth_str: str
:return: list of author last names
:rtype: list of str
"""
a_list = auth_str.split(' and ')
out = []
for x in a_list:
out.append(x.split(',')[0])
return out
|
a56236c05b72930430e865e9846674b975e5ce29
| 50,040
|
def _extract_dir_data(path):
"""
Expects the directory to be named like: "tmp_<run>_with_<images in run>"
Args:
path: Path of the directory.
Returns:
The run index and image count per run.
"""
split = path.stem.split("_")
run = int(split[1])
count_per_run = int(split[3])
return run, count_per_run
|
748a6b8facd360cb7491d0d6d66c8621cbd93f71
| 50,045
|
def normalize(X, lb, ub):
""" Normalize data between 0 and 1
# Arguments:
X: Input data (scalar/vector/matrix)
lb: Lower boundry (scalar/vector)
ub: Upper boundry (scalar/vector)
# Return:
X normalized (scalar/vector/matrix)
"""
return (X - lb) / (ub - lb)
|
5baf9d1fdb0824418bc52ebec872c95befa5a72d
| 50,052
|
def compute_time_difference_between_tweets(tweet_times):
"""
This function computes the time intervals between two successive tweet times
The times should be absolute times in milli seconds
:param tweet_times: Time of successive tweets in milli seconds
:return: Time interval between two tweets
"""
intervals = list()
# add a single value so that input and output arrays are the same length
intervals.append(0)
for idx in range(0, len(tweet_times) - 1):
# Convert to epoch time and find the difference
intervals.append(tweet_times[idx].timestamp() - \
tweet_times[idx+1].timestamp())
return intervals
|
14cb59d575d08de3160aa644e7b4cd369cd132f3
| 50,065
|
import time
def ctime(val):
""" Convert time in milliseconds since the epoch to a formatted string """
return time.ctime(int(val) // 1000)
|
0512a79ebaccc19422c797cb00e509494d0debf2
| 50,071
|
import asyncio
async def py_normal(title: str):
"""Normal exposed function
Enter -> async sleep 10sec -> Exit
Respond to calls from other clients, even if the call is from one client.
Parameters
----------
title: str
Characters to print out on the server
Returns
----------
True
"""
print(f'Enter py_normal: {title}')
await asyncio.sleep(10)
print(f'Exit py_normal: {title}')
return True
|
27c58d8906a344374f66ed7046385cf1c614063e
| 50,075
|
def to_rgba_bytes(v: int) -> bytes:
"""
Converts an RGBA color int to raw bytes.
"""
return bytes(i & 0xFF for i in (v >> 24, v >> 16, v >> 8, v))
|
242dbe2505c83513a32d1de34be0d5dfe34eabfa
| 50,079
|
def prefixCombiner(prefix, itemlist, glue=''):
"""Returns a list of items where each element is prepend by given
prefix."""
result = []
for item in itemlist:
result.append(prefix + glue + item)
return result
|
be17cd03d246c03abadc932b296726e59c4ef667
| 50,082
|
def add_source_init(module):
"""Looks for a function named ``source_init`` in ``module``;
if found, returns its content as a string.
"""
if hasattr(module, "source_init"):
return getattr(module, "source_init")()
return ""
|
deec406ec3ff1c91557514de6eebad0090977665
| 50,083
|
def build_list(comments, p_id):
"""Takes a query set of comments and a parent id and
returns a list of comments sorted in the appropriate parent-child
order such that first comment = first toplevel comment, second commend = first
child of first comment, third comment = first child of second comment or second
child of first comment and so on"""
comment_list = []
for comment in comments.filter(nparent_id=p_id):
children = comments.filter(nparent_id=comment.id)
if not children:
comment_list.append(comment)
else:
comment_list.append(comment)
comment_list.extend(build_list(comments, comment.id))
return comment_list
|
8c8e2cfc2168dc27a070cc6b2d3b8086ad9cd352
| 50,084
|
from typing import List
from pathlib import Path
def textfile_to_list(filename: str) -> List[str]:
"""Reads a text file and returns a list of its non-empty lines.
Args:
filename: name of the text file
Returns:
list of the non-empty lines.
"""
returned_list = []
with Path(filename).open() as fhandle:
for line in fhandle:
if line.strip():
returned_list.append(line.strip())
return returned_list
|
3d26740706cbb91c5d9c9c8b60b6224b187c7899
| 50,087
|
def has_tokens(node):
"""Has the node any tokens?"""
# node.get_tokens() is a generator, so check if there is at least one
# token (a token object always evaluates to True).
return any(node.get_tokens())
|
85abb999cf3641b36120f696e8517b7bf07ac52d
| 50,088
|
def get_bright_thresh(medianvals):
"""Get the brightness threshold for SUSAN."""
return [0.75*val for val in medianvals]
|
53251094d223cb64ce4a780ed2ff4f59cd6c4a73
| 50,090
|
def _create_dscfg_dict(cfg, dataset):
"""
creates a dataset configuration dictionary
Parameters
----------
cfg : dict
config dictionary
dataset : str
name of the dataset
Returns
-------
dscfg : dict
dataset config dictionary
"""
dscfg = cfg[dataset]
# Path related parameters
dscfg.update({'configpath': cfg['configpath']})
dscfg.update({'basepath': cfg['saveimgbasepath']})
dscfg.update({'path_convention': cfg['path_convention']})
dscfg.update({'procname': cfg['name']})
dscfg.update({'dsname': dataset})
dscfg.update({'solarfluxpath': cfg['solarfluxpath']})
dscfg.update({'colocgatespath': cfg['colocgatespath']})
dscfg.update({'excessgatespath': cfg['excessgatespath']})
dscfg.update({'dempath': cfg['dempath']})
dscfg.update({'cosmopath': cfg['cosmopath']})
dscfg.update({'CosmoRunFreq': cfg['CosmoRunFreq']})
dscfg.update({'CosmoForecasted': cfg['CosmoForecasted']})
dscfg.update({'metranet_read_lib': cfg['metranet_read_lib']})
dscfg.update({'lastStateFile': cfg['lastStateFile']})
dscfg.update({'timeinfo': None})
# Instrument parameters
dscfg.update({'RadarName': cfg['RadarName']})
dscfg.update({'ScanPeriod': cfg['ScanPeriod']})
dscfg.update({'lrxh': cfg['lrxh']})
dscfg.update({'lrxv': cfg['lrxv']})
dscfg.update({'ltxh': cfg['ltxh']})
dscfg.update({'ltxv': cfg['ltxv']})
dscfg.update({'lradomeh': cfg['lradomeh']})
dscfg.update({'lradomev': cfg['lradomev']})
# PAR and ASR variable
if 'par_azimuth_antenna' in cfg:
dscfg.update({'par_azimuth_antenna': cfg['par_azimuth_antenna']})
if 'par_elevation_antenna' in cfg:
dscfg.update({'par_elevation_antenna': cfg['par_elevation_antenna']})
if 'asr_highbeam_antenna' in cfg:
dscfg.update({'asr_highbeam_antenna': cfg['asr_highbeam_antenna']})
if 'asr_lowbeam_antenna' in cfg:
dscfg.update({'asr_lowbeam_antenna': cfg['asr_lowbeam_antenna']})
if 'target_radar_pos' in cfg:
dscfg.update({'target_radar_pos': cfg['target_radar_pos']})
# indicates the dataset has been initialized and aux data is available
dscfg.update({'initialized': False})
dscfg.update({'global_data': None})
# Convert the following strings to string arrays
strarr_list = ['datatype', 'FIELDS_TO_REMOVE']
for param in strarr_list:
if param in dscfg:
if isinstance(dscfg[param], str):
dscfg[param] = [dscfg[param]]
# variables to make the data set available in the next level
if 'MAKE_GLOBAL' not in dscfg:
dscfg.update({'MAKE_GLOBAL': 0})
if 'SUBSTITUTE_OBJECT' not in dscfg:
dscfg.update({'SUBSTITUTE_OBJECT': 0})
if 'FIELDS_TO_REMOVE' not in dscfg:
dscfg.update({'FIELDS_TO_REMOVE': None})
return dscfg
|
dccf2fb826a66a5704ff6fc092b3d9bd7e0982f8
| 50,098
|
import json
def get_set_fields_command(editor, *, field_overrides=None):
"""
Get data for note fields and create JavaScript command that will set the fields in the UI.
This is based on editor.loadNote, however it only sets the fields, rather than everything
else.
field_overrides can be set to override the current value for a field, thus ignoring whatever
value is found in the note. This is needed in some cases because the note data may be stale
compared to the UI. The UI has the most up to date field value, which may not yet be persisted
in the note.
"""
data = []
for fld, val in editor.note.items():
if field_overrides and fld in field_overrides:
val = field_overrides[fld]
data.append((fld, editor.mw.col.media.escape_media_filenames(val)))
return "setFields({});".format(
json.dumps(data)
)
|
c0e0fe0db499a0b0b048baba58b92f23ae09f1c1
| 50,103
|
def sieve_of_eratosthenes(n):
"""
function to find and print prime numbers up
to the specified number
:param n: upper limit for finding all primes less than this value
"""
primes = [True] * (n + 1)
# because p is the smallest prime
p = 2
while p * p <= n:
# if p is not marked as False, this it is a prime
if primes[p]:
# mark all the multiples of number as False
for i in range(p * 2, n + 1, p):
primes[i] = False
p += 1
# getting all primes
primes = [element for element in range(2, n) if primes[element]]
return primes
|
5f108c7264ac1cf7c32abe718b4bc148879c19cb
| 50,107
|
def get_realization_created_nodes(realization_created_nodes, node_id):
"""helper function to help get the nodes and the realization when a node first created"""
nodes = []
for node_org_id, r in realization_created_nodes:
if node_org_id == node_id:
nodes.append((node_org_id, r))
return nodes
|
0b1204de94404ac4ccc779d8285168c332323023
| 50,108
|
def list_usages(client, resource_group_name, account_name):
"""
List usages for Azure Cognitive Services account.
"""
return client.get_usages(resource_group_name, account_name).value
|
1ad73a4f1926895afc768f8f63341ff1a652ebf0
| 50,111
|
def query_filters(composed):
"""
Given a composed query, return list of all filters; for
migration purposes, we only really need first.
"""
r = []
for group in composed:
for rfilter in group:
r.append(rfilter)
return r
|
3f62b9a5f9745330832926bc1b72b3daf66239ef
| 50,115
|
def parse_float(float_str, default=0):
"""Parses the float_str and returns the value if valid.
Args:
float_str: String to parse as float.
default: Value to return if float_str is not valid.
Returns:
Parsed float value if valid or default.
"""
try:
return float(float_str)
except ValueError:
return default
|
156f1e12c3e74f3be0452c5f4122c142252d5026
| 50,118
|
def get_projectname() -> str:
""" Acquire project name """
project_name = input("Project name: ")
return project_name
|
3f28d71ca78bd8e3ef39d11d4bd0c74d390e8f1f
| 50,122
|
def pixels_to_EMU(value):
"""1 pixel = 9525 EMUs"""
return int(value * 9525)
|
c9be7deacae47819ab30d5589dbae555124d6409
| 50,124
|
def decimal_normalize(value):
"""
Normalize decimal value like cut zero ending.
"""
return value.normalize()
|
e9737bcb3d0b09a247ec89c3db257ca62550d5c6
| 50,129
|
from typing import Dict
from typing import Any
from typing import List
def set_user_defined_floats(fha: Dict[str, Any], floats: List[float]) -> Dict[str, Any]:
"""Set the user-defined float values for the user-defined calculations.
:param fha: the functional hazard assessment dict.
:param list floats: the list of float values.
:return: fha; the functional hazard assessment dict with updated float
values.
:rtype: dict
"""
_key = ""
for _idx in [0, 1, 2]:
try:
_key = list(fha.keys())[_idx]
fha[_key] = float(floats[_idx])
except IndexError:
fha[_key] = 0.0
return fha
|
9677337a413eb3f79b7745a0c2546d781c22ee43
| 50,132
|
def explode_tokens(tokenlist):
"""
Turn a list of (token, text) tuples into another list where each string is
exactly one character.
:param tokenlist: List of (token, text) tuples.
"""
result = []
for token, string in tokenlist:
for c in string:
result.append((token, c))
return result
|
123d7788bfb8e47c1ae9618142d9785690f833fe
| 50,137
|
def _is_option_provided(config, option, empty_value_is_valid=False):
"""
Checks whether a parameter is provided in the config. If empty value is not valid then the option will be considered
as not provided if no value is provided for the option.
"""
if not config.has_configuration_option(option):
return False
elif (config.get_configuration_option(option) == '') and (empty_value_is_valid is False):
return False
return True
|
eacc6132470cc97fd167b6b97ebd022b1742e0f7
| 50,139
|
import re
def strip_var_name(var_name):
"""Strips variable name of sub-strings blocking variable name matching.
Removes sub-strings that should be ignored when matching checkpointed variable
names to variable names in the training graph, namely:
- trailing colon + number, e.g. "W:0" --> "W"
- partitioning info., e.g. "/a/part_12/b" --> "a/b".
(Note that checkpointed variables do not have partitioning info in their name,
while model variables do).
Args:
var_name: str, variable name.
Returns:
stripped variable name.
"""
# Strip trailing number, e.g. convert "lstm/W_0:0" to "lstm/W_0".
var_name = re.sub(r':\d+$', '', var_name)
# Strip partitioning info, e.g. convert "W_0/part_3/Adagrad" to "W_0/Adagrad".
var_name = re.sub(r'/part_\d+', '', var_name)
return var_name
|
c7e9ee2a1eae8ff1c5e1bff45cd5aa2c8dcf6012
| 50,142
|
def extractdata(line):
"""For each line, return the x and y values, check whether there is reference value
and if so return the reference value, otherwise return a reference value of 1 """
newArray = (line.split(',')) #
if len(newArray) == 8:
# convert the strings to floats
xvalue = float(newArray[3])
yvalue = float(newArray[5])
refvalue = float(newArray[7])
return xvalue, yvalue, refvalue
if len(newArray) == 6:
# convert the strings to floats
xvalue = float(newArray[3])
yvalue = float(newArray[5])
refvalue = 1
return xvalue, yvalue, refvalue
else:
print("Houston, we have a problem, This line does not appear to be data!:")
print(line)
|
c7cb553bedef333cf9950588757906c7f114d535
| 50,143
|
def extract_task_info(luigi_task):
"""Extract task name and generate a routine id from a luigi task, from the date and test fields.
Args:
luigi_task (luigi.Task): Task to extract test and date parameters from.
Returns:
{test, routine_id} (tuple): Test flag, and routine ID for this task.
"""
test = (luigi_task.test if 'test' in luigi_task.__dict__
else not luigi_task.production)
task_name = type(luigi_task).__name__
routine_id = f'{task_name}-{luigi_task.date}-{test}'
return test, routine_id
|
134e740c7d228c5a078cfe6a5c7a493ad394c0e0
| 50,145
|
def split_evr(version):
"""
Return a tuple of epoch, version, release from a version string
"""
if '~' in version:
epoch, version = version.split('~', 1)
else:
epoch, version = ('0', version)
release = None
if '-' in version:
version, release = version.rsplit('-', 1)
else:
version, release = (version, None)
return epoch, version, release
|
9e437c473b3fb0275f62b5fbf9dad64058d56b50
| 50,146
|
def get_set_of_list_and_keep_sequence(list):
""" Returns the set of the specified list but keeps the sequence of the items."""
seen = set()
return [x for x in list if not (x in seen or seen.add(x))]
|
2730a866d76318f0a4cee1fec6c19c8d562bb2fb
| 50,147
|
import torch
from typing import Tuple
from typing import Optional
def split_features(x: torch.Tensor) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Split complete point cloud into xyz coordinates and features."""
xyz = x[:, :3, :].transpose(1, 2).contiguous()
features = (
x[:, 3:, :].contiguous()
if x.size(1) > 3 else None
)
return xyz, features
|
2a60afd8fdb1c7b2be0fe1a7a6bac2fbe5ce5069
| 50,148
|
def unzip(zipped_list, n):
"""returns n lists with the elems of zipped_list unsplitted.
The general case could be solved with zip(*zipped_list), but here we
are also dealing with:
- un-zipping empy list to n empty lists
- ensuring that all zipped items in zipped_list have lenght n, raising
ValueError if not.
"""
if not zipped_list:
return tuple([[]] * n)
else:
if not all(isinstance(x, tuple) and len(x) == n for x in zipped_list):
raise ValueError
return zip(*zipped_list)
|
73e9774ca196dd358d5f6a5fbd78ad60ef3ed1ff
| 50,150
|
def colWidth(collection, columnNum):
"""Compute the required width of a column in a collection of row-tuples."""
MIN_PADDING = 5
return MIN_PADDING + max((len(row[columnNum]) for row in collection))
|
9a300106cf57fa6a78af37caa3f6b2a74c3e5b2c
| 50,155
|
import uuid
import hashlib
def generate_uuid(basedata=None):
"""Provides a random UUID with no input, or a UUID4-format MD5 checksum of
any input data provided.
:param str basedata: provided data to calculate a uuid
"""
if basedata is None:
return str(uuid.uuid4())
elif isinstance(basedata, str):
checksum = hashlib.md5(basedata.encode()).hexdigest()
return str(uuid.UUID(checksum))
else:
raise TypeError("The 'basedata' must be string or None")
|
60d4f696c796f8ffcc0353e4b71f0e108a13986d
| 50,157
|
import builtins
def max(*args, **kwargs):
"""
Add support for 'default' kwarg.
>>> max([], default='res')
'res'
>>> max(default='res')
Traceback (most recent call last):
...
TypeError: ...
>>> max('a', 'b', default='other')
'b'
"""
missing = object()
default = kwargs.pop('default', missing)
try:
return builtins.max(*args, **kwargs)
except ValueError as exc:
if 'empty sequence' in str(exc) and default is not missing:
return default
raise
|
d46610fd653c416f9084e2ae3f5f8cd02424cb9b
| 50,161
|
def unwrap_args(args_ns):
"""
"Unwrap" the given `argparse` namespace into a dict.
:type args_ns: argparse.Namespace
:rtype: dict
"""
args_dict = {}
for key, value in vars(args_ns).items():
if isinstance(value, list):
value = value[0]
args_dict[key] = value
return args_dict
|
017708d7d8695a5920586c416083e8b99e791a33
| 50,163
|
def is_start_state(state):
"""
Checks if the given state is a start state.
"""
return state.g_pos.value == 0 and state.theta.value == 'N'
|
56e7db462a3e971fd4f894f70d2efd66188f3405
| 50,165
|
def being_declared(string):
"""
Helper method used to see if the function or subroutine is being defined
:param string: the string being checked against the forbidden words
:return: a boolean indicating if it is being declared or called
"""
if 'write' in string or 'function' in string or 'subroutine' in string \
or 'character' in string or 'if' in string or 'result' in string:
being_declared = True
else:
being_declared = False
return being_declared
|
a2346eebcd3832c5db640a05bc6435a99c9127ea
| 50,168
|
def _find_pool(ip_addr, ipv4_pools):
"""
Find the pool containing the given IP.
:param ip_addr: IP address to find.
:param ipv4_pools: iterable containing IPPools.
:return: The pool, or None if not found
"""
for pool in ipv4_pools:
if ip_addr in pool.cidr:
return pool
else:
return None
|
f12d4ee4d1f73ff054b0194d31744d31c1f58ad2
| 50,169
|
def VolumetricFlow(self):
"""Volumetric flow (m^3/hr)."""
stream, mol, phase = self.data
if mol:
c = self.name # c: compound
c.T = stream.T
c.P = stream.P
c.phase = phase
return (c.Vm * mol[0] * 1000.)
else:
return 0.
|
e4a15301a99d43c346df9be465f8bc1a45abe3d7
| 50,171
|
def make_pairs(left_list, right_list, exclude_doubles=False):
"""Takes two lists of words and returns all pairs that can be formed between them."""
pairs = [[left_word, right_word] for left_word in left_list for right_word in right_list]
if exclude_doubles:
pairs = [[word1, word2] for word1, word2 in pairs if word1 != word2]
return pairs
|
19bc84b82d59512c75eb78042740f6ad2f094c05
| 50,175
|
def choices_to_dict(t):
"""
Converts a ChoiceField two-tuple to a dict (for JSON)
Assumes i[0][0] is always unique
"""
d = {}
for i in t:
k = str(i[0])
d[k] = i[1]
return d
|
38e77f3a8389d03943cf29d120818a234573afc5
| 50,178
|
def get_colab_github_url(relative_path: str, repository: str, branch: str) -> str:
"""Get the URL that a file will have on Google Colab when hosted on GitHub."""
return f"https://colab.research.google.com/github/{repository}/blob/{branch}/{relative_path}"
|
3a1bc73294e1e4f77a6631b71fdd0d05d1ba6400
| 50,180
|
def poissons_ratio(vp, vs):
"""
Calculate Poisson's Ratio based on the definition given in the Specfem3D
source code
:type vp: float or np.array
:param vp: P-wave velocity
:type vs: float or np.array
:param vs: S-wave velocity
:rtype: float or np.array
:return: Poissons ratio
"""
return 0.5 * (vp * vp - 2 * vs * vs) / (vp * vp - vs * vs)
|
1f90fe04bb326c1117a38fd46c48347c6d5577bc
| 50,187
|
import math
def order_of_magnitude(x):
"""Calculate the decimal order of magnitude.
Parameters
----------
x : float
Number of which to calculate the decimal order of magnitude.
Returns
-------
int
Order of magnitude of `x`.
"""
return int(math.floor(math.log10(abs(x)))) if x else 0
|
53c0fcfbdb993e2a1c584d416e79c59112f0ceba
| 50,192
|
def bits_to_number(bits):
"""convert the binary representation to the original positive number"""
res = 0
for x in bits:
res = res * 2 + x
return res
|
9280170a3bfbad88363cd886384a2253e83d5db9
| 50,193
|
from typing import Any
from pathlib import Path
import json
def read_json_file(path: str) -> Any:
"""Read result from file
Args:
path (str): path of result file
Returns:
[Any]: content of the file
"""
file_path = Path(path)
if not file_path.is_file():
raise RuntimeError(f'Invalid file to read {path}')
with open(file_path, 'r') as f:
return json.load(f)
|
cab88d63721faa9e94704abfeb885d7f6eceaa63
| 50,195
|
def smart_resize(image, max_size=2000):
"""
Resize image to `max_size` using it's bigger size (either width or height).
This will set wide image width to `max_size` and adjust height accordingly.
:param image: Pillow.Image object
:param max_size: maximum value of width or height in pixels.
:rtype Pillow.Image:
"""
if image.width >= image.height:
new_width = max_size
new_height = int(new_width / image.width * image.height)
else:
new_height = max_size
new_width = int(new_height / image.height * image.width)
return image.resize((new_width, new_height))
|
eb9f8e6a0407eacecce17b12b850544c42009cf3
| 50,196
|
from typing import FrozenSet
from typing import List
def get_weights(solution: FrozenSet[int], weights: List[int]):
"""
Calculates and returns total weight of given knapsack solution
:param solution: Knapsack solution consisting of packed items
:param weights: profit of items
:return: Total weight of given knapsack solution
"""
return sum(weights[item - 1] for item in solution)
|
fcd76946a4dd269324bf4e438b83d3f52afb3582
| 50,197
|
def est_centroid_spectrum(pow_spec, freq):
"""Estimate the centroid of the power spectrum"""
return (pow_spec * freq).sum() / pow_spec.sum()
|
f92ea45c7835031d03fdc84292c1e4f35a27ccec
| 50,199
|
def read_samples(fn):
"""Read samples from the header of a GFF file.
Args:
*fn(str)*: GFF file to read.
Returns:
*(list)*: character list with sample names.
"""
with open(fn) as inh:
for line in inh:
if line.startswith("## COLDATA"):
return line.strip().split(": ")[1].strip().split(",")
raise ValueError("%s doesn't contain COLDATA header." % fn)
|
e48972fecdbf63abc1ba9398fa95126739dcc324
| 50,202
|
def unpack_int(buffer, ptr, length):
""" Unpack an int of specified length from the buffer and advance the pointer """
return (
int.from_bytes(buffer[ptr:(ptr+length)], 'little', signed=False),
ptr + length
)
|
18afc5db9e212b982bea2839ea7bb3600ce27649
| 50,203
|
def make_print_msg_specific(
integration_rate, starting_msg, err_type):
"""
Function is used to make the print message more specific
by replacing 'generic' phrases with phrases that more
completely explain the data quality issue at play.
:param
integration_rate (bool): determines if the data quality
metric to be printed is an 'integration rate' rather
than a problem with data quality. This warrants a
change in how the message will be printed.
starting_msg (str): the message to build off that
will ultimately be displayed
err_type (str): indicates the type of error metric that is
being reported. Used to change what is printed so it is
more appropriate.
:return:
starting_msg (str): the message with the data quality issue
that now has a more specific indicator for the
problem at hand
"""
if integration_rate:
# only one issue; make first informative
starting_msg = starting_msg.replace(
'of data)^', 'of expected concepts are not '
'successfully integrated)')
# series of issues; make first informative
starting_msg = starting_msg.replace(
'of data),^', 'of expected concepts are not '
'successfully integrated),')
# do not make non-first messages overly long
starting_msg = starting_msg.replace(
'of data', ' of concepts not integrated')
elif err_type in ['concept']:
starting_msg = starting_msg.replace(
'of data)^', 'of concept_ids are not '
'properly mapped)')
starting_msg = starting_msg.replace(
'of data),^', 'of concept_ids are not '
'properly mapped),')
starting_msg = starting_msg.replace(
'of data', 'of concept_ids')
elif err_type in ['drug_routes']:
starting_msg = starting_msg.replace(
'of data)^', 'of route_concept_ids '
'are not properly populated)'
)
starting_msg = starting_msg.replace(
'of data),^', 'of route_concept_ids '
'are not properly populated),'
)
starting_msg = starting_msg.replace(
'of data', 'of drugs'
)
elif err_type in ['end_before_begin']:
starting_msg = starting_msg.replace(
'of data)^', 'of end dates precede '
'start dates')
starting_msg = starting_msg.replace(
'of data),^', 'of end dates precede '
'start dates')
elif err_type in ['drug_success']:
starting_msg = starting_msg.replace(
'of data)^', 'of drug ingredients '
'are properly populated)'
)
starting_msg = starting_msg.replace(
'of data),^', 'of drug ingredients '
'are properly populated),'
)
starting_msg = starting_msg.replace(
'of data', 'of drugs'
)
elif err_type in ['sites_measurement']:
starting_msg = starting_msg.replace(
'of data)^', 'of measurement concepts '
'are properly populated)'
)
starting_msg = starting_msg.replace(
'of data),^', 'of measurement concepts '
'are properly populated),'
)
starting_msg = starting_msg.replace(
'of data', 'of measurements'
)
# get rid of lingering underscores
starting_msg = starting_msg.replace('^', '')
return starting_msg
|
7d9945cb0efb7d6d7bd541beff10af98d51f4314
| 50,205
|
def count(pipeObj):
"""Count number of passes (pass-managers excluded) in pipeline object."""
cnt = 0
for c in pipeObj:
if c[0]:
cnt += count(c[1])
else:
cnt += 1
return cnt
|
fcb28dcf4e8cb50d57988c1aab852ae9e633d9a9
| 50,207
|
import requests
def _download_index(category, index_url):
"""
Download the index.
:param category: suffixed category, e.g. 'filters', 'templates'
:param index_url: url to the index. Default: 'https://raw.githubusercontent.com/pandoc-extras/packages/master/<category>.yaml'
:return: the content of the index, which is in YAML
"""
if index_url is None:
index_url = 'https://raw.githubusercontent.com/pandoc-extras/packages/master/{}.yaml'
url = index_url.format(category)
r = requests.get(url)
if r.status_code != 200:
raise IOError("Cannot download index, error {}".format(r.status_code))
return r.text
|
190a4b39f962cae43d281fa91d1614cbebaa681a
| 50,209
|
import math
def round_mult(val, multiple, direction='round'):
"""Rounds :val: to the nearest :multiple:. The argument :direction: should be either 'round', 'up', or 'down'."""
round_func = {'round': round, 'up': math.ceil, 'down': math.floor}
return round_func[direction](val / multiple) * multiple
|
53ba98f1c8a4c623c8831e831b21ff689483f58a
| 50,215
|
def extractKmers(X, k=12):
"""Extract the kmers of length k of X."""
# Length of the sequences
len_seq = len(X[0])
# Initialisation of the set of kmers
k_mers = set()
# Loop over the sequences of X
for x_i in X:
# Loop over the sequence
for l in range(len_seq - k + 1):
# Extract k_mer of x_i
k_mer_i = x_i[l:(l + k)]
# Update k_mers
k_mers.update([k_mer_i])
return list(k_mers)
|
368f6853109e511e80e85b22e063566baef481ba
| 50,218
|
def get_main_entity_from_question(question_object):
"""
Retrieve the main Freebase entity linked in the url field
:param question_object: A question encoded as a Json object
:return: A list of answers as strings
>>> get_main_entity_from_question({"url": "http://www.freebase.com/view/en/natalie_portman", "targetValue": "(list (description \\"Padm\u00e9 Amidala\\"))", "utterance": "what character did natalie portman play in star wars?"})
(['Natalie', 'Portman'], 'URL')
>>> get_main_entity_from_question({"url": "http://www.freebase.com/view/en/j_j_thomson"})
(['J', 'J', 'Thomson'], 'URL')
>>> get_main_entity_from_question({"targetValue": "(list (description Abduction) (description Eclipse) (description \\"Valentine's Day\\") (description \\"New Moon\\"))"})
()
>>> get_main_entity_from_question({"url": "http://www.freebase.com/view/en/j_j_thomson"})
(['J', 'J', 'Thomson'], 'URL')
"""
url = question_object.get('url')
if url:
if "http://www.freebase.com/view/en/" not in url:
return [w.title() for w in url.split()], 'URL'
entity_tokens = url.replace("http://www.freebase.com/view/en/", "").split("_")
return [w.title() for w in entity_tokens], 'URL'
return ()
|
23b449fa5c1f370248dd1b28d751a4a4f6553fac
| 50,223
|
def get_band_index(band_name):
"""Get the write-index value for a Sentinel-2 image band
For bands 1 through 8, we return the band number. For 8A,
we return 9. For bands above 8A, we add one to the band
number.
Args:
band_name (str): the name of the band, e.g. "nir - 8A"
Return:
int
"""
name, num = band_name.split(' - ')
if num.lower() == '8a':
return 9
elif int(num) > 8:
return int(num) + 1
else:
return int(num)
|
16197e5303d259b3502cf20255cd68c514215e3c
| 50,224
|
import math
def area_triangulo(s1: float, s2: float, s3: float) -> float:
""" Área de un triángulo
Parámetros:
s1 (float): Longitud de uno de los lados del triángulo
s2 (float): Longitud de uno de los lados del triángulo
s3 (float): Longitud de uno de los lados del triángulo
Retorno:
float: El área del triángulo redondeado con una cifra decimal.
"""
s = (s1 + s2 + s3) / 2
area = math.sqrt(s * (s - s1) * (s - s2) * (s - s3))
return round(area, 1)
|
432108ca3ecb238e40c82159aa8b47390b6a85a3
| 50,225
|
def getInterestedRange(message_context):
"""Return a (start, end) pair of character index for the match in a MessageContext."""
if not message_context.match:
# whole line
return (0, len(message_context.line))
return (message_context.match.start(), message_context.match.end())
|
f173a09a7281bb79f20e7658932d1c7f4e5ddd43
| 50,232
|
def fix_image_ch(img):
"""Fix image channel so that it locates last in shape."""
if img.shape[0] <= 3:
return img.transpose(1, 2, 0)
return img
|
6e426d1334c79602c308ffc671eacd9302350cd3
| 50,233
|
from typing import Optional
def hex_to_bytearray(hex_str: str) -> Optional[bytearray]:
"""Convert hexstring (starting with 0x) to bytearray."""
return bytearray.fromhex(hex_str[2:]) if hex_str is not None else None
|
a8a5bcbe0eb7cc009ffe07bdbe719b473d831862
| 50,234
|
def reindent(s, numSpaces=4, no_empty_lines=False):
""" Return string s reindented by `numSpaces` spaces
Args:
s (string): string to reindent
numSpaces (int): number of spaces to shift to the right
no_empty_lines (bool): if True remove empty lines
Returns:
reindented string
"""
if no_empty_lines:
lines = [numSpaces * " " + line if line
else line for line in s.splitlines()]
else:
lines = [numSpaces * " " + line for line in s.splitlines()]
return "\n".join(lines)
|
f9a20f1cc51df3108551050d0afe76ff8fa17e1b
| 50,239
|
import typing
def splitPath(s3Path: str) -> typing.Tuple[str, str]:
"""Split a full S3 path into its bucket and key components"""
if not s3Path.startswith('s3://'):
raise ValueError('s3Path must begin with "s3://"')
s3Path = s3Path[5:]
bucketName, key = s3Path.split('/', 1)
return bucketName, key
|
9d5f84bb7f267f39463d636ed942229e873bfbe8
| 50,244
|
def read_dictionary(filepath):
""" Reads the word list provided in the challenge and returns it in the
form of a list of words.
"""
with open(filepath, 'r') as fil:
words = fil.read().splitlines()
return words
|
152a5972f45228cde7dafab32394018928aa640a
| 50,249
|
def import_data(file):
"""
This function imports the data into a list form a file name passed as an argument.
The file should only the data seperated by a space.(or change the delimiter as required in split)
"""
data = []
f = open(str(file), 'r')
for line in f:
current = line.split() #enter your own delimiter like ","
for j in range(0,len(current)):
#current[j] = int(current[j])
current[j] = current[j]
data.append(current)
#print 'finished importing data'
#print data
return data
|
a5c8414d7ffad018e07bd45d46c3b7d0822e822d
| 50,262
|
def firstOccurenceInStr(aList, aString):
""" Return the first element in aList that is contained in the
string aString.
"""
for elem in aList:
if elem in aString:
return elem
else:
return None
|
6f188333111b7efd835519c0f36279aff56632bc
| 50,265
|
def selection_sort(array):
""" Selection sort implementation
Arguments:
- array : (int[]) array of int to sort
Returns:
- array : (int[]) sorted numbers
"""
for slot in range(len(array)-1,0,-1):
maxpos = 0
for index in range(slot+1):
if array[index] > array[maxpos]:
maxpos = index
temp = array[slot]
array[slot] = array[maxpos]
array[maxpos] = temp
return array
|
02efcbf26e01c36177f05143c332de85a1823ac5
| 50,275
|
from typing import List
from typing import Optional
def _get_array_num_elements(array_dims: List[int],
index_depth: Optional[int] = None):
"""Returns the number of elements in a (nested) array.
Returns the number of elements in a (nested) array with dimensions
'array_dims'. If the array is indexed 'index_depth' times. If 'index_depth'
is not specified, the maximum number of possible indices is assumed.
Args:
array_dims: Array dimensions.
index_depth: Depth of index.
Returns:
The number of elements in a (nested) array with dimensions 'array_dims'.
"""
if index_depth is None:
index_depth = len(array_dims) - 1
elem_count = 1
for idx in range(len(array_dims) - index_depth, len(array_dims)):
elem_count = elem_count * array_dims[idx]
return elem_count
|
f363598442145c0c31c03e5c6918e0db6dee1d19
| 50,278
|
def neighbours(guest, plan):
"""Return guests’s adjacent neighbours in plan
Guests at one end of the plan are considered to be sitting next
to the guest at the opposite end.
"""
g_index = plan.index(guest)
left = plan[g_index - 1]
right = plan[(g_index + 1) % len(plan)] # Wrap around to zero
return (left, right)
|
b77f5ce28e794ab4e10bd1b3bb776fb65f4bd4ac
| 50,282
|
from typing import Any
import importlib
def dynamic_import_class(module_name: str, class_name: str) -> Any:
"""
Dynamically imports a class from a given module
Args:
module_name (str): the module to dynamically load
class_name (str): the class to dynamically load
Returns:
Any: the class from the module specified
"""
dclass = None
module = None
# assert module existss
try:
module = importlib.import_module(module_name)
except ImportError:
print("module not found: " + module_name)
# load class from module
try:
dclass = getattr(module, class_name)
except Exception as e:
print(e)
return dclass
|
5c2983655d509154c1d13f2980e54e0d73db0124
| 50,289
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.