content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def make_pkg(pkgname, context):
"""Create a new extension package.
:param pkgname: Name of the package to create.
:param context: Mapping with keys that match the placeholders in the
templates.
:return: True if package creation succeeded or a tuple with False and an
error message in case the creation failed.
:rtype: Bool or Tuple
"""
try:
shutil.copytree(TEMPLATE_DIRNAME, pkgname)
except (OSError, IOError, shutil.Error) as e:
return False, e.strerror
for f in TEMPLATE_FILES:
try:
write_template(pkgname, f, context)
except (OSError, IOError) as e:
return False, e.strerror
return True | 36,800 |
def _save_custom_objects(path, custom_objects):
"""
Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later.
:param path: An absolute path that points to the data directory within /path/to/model.
:param custom_objects: Keras ``custom_objects`` is a dictionary mapping
names (strings) to custom classes or functions to be considered
during deserialization. MLflow saves these custom layers using
CloudPickle and restores them automatically when the model is
loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
"""
import cloudpickle
custom_objects_path = os.path.join(path, _CUSTOM_OBJECTS_SAVE_PATH)
with open(custom_objects_path, "wb") as out_f:
cloudpickle.dump(custom_objects, out_f) | 36,801 |
def view_explorer_node(node_hash: str):
"""Build and send an induction query around the given node."""
node = manager.get_node_by_hash_or_404(node_hash)
query = manager.build_query_from_node(node)
return redirect_to_view_explorer_query(query) | 36,802 |
def shells_main():
"""The function pointed to by `bscan-shells` in console_scripts."""
sys.exit(cli_shells_main()) | 36,803 |
def encode_into_any_base(number, base, encoded_num):
"""Encode number into any base 2-36. Can be fractional or whole.
Parameters:
number: float -- integer representation of number (in base 10)
base: int -- base to convert to
encoded_num: str -- representation (so far) of number in base
Return: str -- string representation of number in the new base
"""
# enocding numbers if it's not fractional
if number % 1 == 0:
return encode_whole_number(number, base)
# encoding numbers that are fractional
else:
# first encoding the part that comes before the radix point
if not str(number)[0] == '0':
int_part = math.floor(number)
encoded_num += encode_whole_number(int_part, base)
# now cut off the integer from number, so it's just a fraction
number = number - int_part
# then encoding the decimal part of the number
return encode_into_any_base(number, base, encoded_num)
else:
# add the radix point to the answer
if encoded_num == '':
encoded_num += '0'
encoded_num += '.'
# convert the fractional part (of the overall number being encoded)
encoded_num += encode_fractional_number(number, base)
return encoded_num | 36,804 |
def handle_forbidden(error: Forbidden) -> Response:
"""Render the base 403 error page."""
return respond(error.description, status=HTTPStatus.FORBIDDEN) | 36,805 |
async def test_form_import_invalid_auth(hass):
"""Test we handle invalid auth on import."""
with patch(
"homeassistant.components.kodi.config_flow.Kodi.ping",
side_effect=InvalidAuthError,
), patch(
"homeassistant.components.kodi.config_flow.get_kodi_connection",
return_value=MockConnection(),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=TEST_IMPORT,
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_auth" | 36,806 |
def getval(l, b, map='sfd', size=None, order=1):
"""Return SFD at the Galactic coordinates l, b.
Example usage:
h, w = 1000, 4000
b, l = numpy.mgrid[0:h,0:w]
l = 180.-(l+0.5) / float(w) * 360.
b = 90. - (b+0.5) / float(h) * 180.
ebv = dust.getval(l, b)
imshow(ebv, aspect='auto', norm=matplotlib.colors.LogNorm())
"""
l = numpy.atleast_1d(l)
b = numpy.atleast_1d(b)
if map == 'sfd':
map = 'dust'
if map in ['dust', 'd100', 'i100', 'i60', 'mask', 'temp', 'xmap']:
fname = 'SFD_'+map
else:
fname = map
maxsize = { 'd100':1024, 'dust':4096, 'i100':4096, 'i60':4096,
'mask':4096 }
if size is None and map in maxsize:
size = maxsize[map]
if size is not None:
fname = fname + '_%d' % size
fname = 'maps/' + fname
### TODO
### dust_dir was wrong
fname = os.path.join(os.environ['DUST_DIR'], fname)
if not os.access(fname+'_ngp.fits', os.F_OK):
raise Exception('Map file %s not found' % (fname+'_ngp.fits'))
if l.shape != b.shape:
raise ValueError('l.shape must equal b.shape')
out = numpy.zeros_like(l, dtype='f4')
for pole in ['ngp', 'sgp']:
m = (b >= 0) if pole == 'ngp' else b < 0
if numpy.any(m):
hdulist = pyfits.open(fname+'_%s.fits' % pole)
w = wcs.WCS(hdulist[0].header)
x, y = w.wcs_world2pix(l[m], b[m], 0)
out[m] = map_coordinates(hdulist[0].data, [y, x], order=order, mode='nearest')
print fname
return out | 36,807 |
def format_timedelta(tdelta):
"""Return the timedelta as a 'HH:mm:ss' string."""
total_seconds = int(tdelta.total_seconds())
hours, remainder = divmod(total_seconds, 60*60)
minutes, seconds = divmod(remainder, 60)
return "{0:02d}:{1:02d}:{2:02d}".format(hours, minutes, seconds) | 36,808 |
def test_add_with_set_input_extend():
"""Input is a set, not a string key with an extend."""
context = Context({
'arbset': {1, 2},
'add': {
'set': PyString('arbset'),
'addMe': 3
}})
add.run_step(context)
context['add']['addMe'] = [4, 5]
context['add']['unpack'] = True
add.run_step(context)
assert context['arbset'] == {1, 2, 3, 4, 5}
assert len(context) == 2 | 36,809 |
def video_data_to_df(videos_entries, save_csv):
"""
Creating a dataframe from the video data stored as tuples
:param videos_entries: (list) list of tuples containing topics, subtopics, videos and durations
:param save_csv: (boolean) condition to specify if the df is saved locally as a csv file
:return dfx: (dataframe) df with all data arranged in dataframe
"""
## Generating dataframe from tuples
dfx = pd.DataFrame(videos_entries)
## Assigning the df's column names based
dfx.columns = videos_df_colnames
## Rounding the length values (mins)
dfx["video_length_[mins]"] = round(dfx["video_length_[mins]"], 2)
## Adding column with the video length time in hours
dfx["video_length_[hrs]"] = round(dfx["video_length_[mins]"]/60, 2)
## Sorting values
dfx.sort_values(by=["topic", "subtopic", "video"], inplace=True)
## Restarting index
dfx.reset_index(inplace=True, drop=True)
## Saving a local copy of the df
if save_csv:
dfx.to_csv(usmle_videos_csv_copy_path + usmle_2020_videos_df_filename)
return dfx | 36,810 |
def create_mysql_entitySet(username, databaseName):
""" Create a new entity set in the databaseName """
password = get_password(username)
entitySetName = request.json['entitySetName']
attributes = request.json['attributes']
addToSchema(request.get_json(),"mysql")
pks = []
sql = "CREATE TABLE " + username + "_" + databaseName + "." + entitySetName + " ("
for attribute in attributes:
print(attribute, attributes[attribute])
print(attributes[attribute]['DataType'])
sql += " " + attribute + " " + attributes[attribute]['DataType']
if attributes[attribute]['NN'] == 1:
sql += " NOT NULL"
if attributes[attribute]['AI'] == 1:
sql += " AUTO_INCREMENT"
if attributes[attribute]['PK'] == 1:
pks.append(attribute)
sql += ","
sql += "PRIMARY KEY (" + pks[0]
for i in range(1,len(pks)):
sql += "," + pks[i]
sql += "));"
try:
cnx = connectSQLServerDB(username, password, username + "_" + databaseName)
mycursor = cnx.cursor()
mycursor.execute(sql)
cnx.close()
return jsonify(success=1, message="Entity Set '" + entitySetName + "' created successfully")
except mysql.connector.Error as err:
return jsonify(success=0, error_code=err.errno, message=err.msg) | 36,811 |
def test_symmetry_with_laue_group_override(dials_data, tmpdir):
"""Simple test to check that dials.symmetry, with overridden laue group, completes"""
result = procrunner.run(
[
"dials.symmetry",
"laue_group=P121",
"change_of_basis_op=-b,-a,-c",
dials_data("l_cysteine_dials_output") / "20_integrated_experiments.json",
dials_data("l_cysteine_dials_output") / "20_integrated.pickle",
dials_data("l_cysteine_dials_output") / "25_integrated_experiments.json",
dials_data("l_cysteine_dials_output") / "25_integrated.pickle",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("symmetrized.refl").check()
assert tmpdir.join("symmetrized.expt").check()
expts = load.experiment_list(
tmpdir.join("symmetrized.expt").strpath, check_format=False
)
assert str(expts[0].crystal.get_space_group().info()) == "P 1 21 1"
# Verify that the unit cell has been reindexed correctly
assert expts[0].crystal.get_unit_cell().parameters() == pytest.approx(
(8.21578444269, 5.4815363434, 12.1457047712, 90.0, 90.0, 90.0)
) | 36,812 |
def m6(X, Y, Xp, Yp, alpha=1.0, prev='ident', post='ident', **kwargs):
"""Computes a matrix with the values of applying the kernel
:math:`m_4` between each pair of elements in :math:`X` and :math:`Y`.
Args:
X: Numpy matrix.
Y: Numpy matrix.
Xp: Numpy matrix with the probabilities of each category in *X*.
Yp: Numpy matrix with the probabilities of each category in *Y*.
alpha (float): Argument for the inverting function *h*.
prev (string): Function to transform the data before composing.
Values: ``'ident'``, ``'f1'`` or a function.
post (string): Function to transform the data after composing.
Values: ``'ident'``, ``'f1'``, ``'f2'`` or a function.
kwargs (dict): Arguments required by *prev* or *post*.
Return:
Numpy matrix of size :math:`m_X \\times m_Y`.
Since the code is vectorised any function passed in *prev* or *post*
must work on numpy arrays.
"""
h = lambda x: (1.0 - x ** alpha) ** (1.0 / alpha)
prevf = get_vector_function(prev, kwargs)
postf = get_vector_function(post, kwargs)
xm, xn = X.shape
ym, yn = Y.shape
Xp = h(Xp)
Yp = h(Yp)
G = np.zeros((xm, ym))
for i in range(xm):
I = np.tile(X[i], (ym, 1))
Ip = np.tile(Xp[i], (ym, 1))
EQ = I == Y
NE = I != Y
a = 2.0 * np.sum(prevf(Ip * EQ), axis=1)
b = np.sum(prevf(Ip * NE), axis=1)
c = np.sum(prevf(Yp * NE), axis=1)
dx = np.sum(prevf(1.0 - Ip * NE), axis=1)
dy = np.sum(prevf(1.0 - Ip * NE), axis=1)
d = dx + dy
apd = a + d
G[i, :] = apd / (apd + 2.0 * (b + c))
return postf(G) | 36,813 |
def serve_communications_and_statuses(erpnext_support_user, erpnext_support_issues, bench_site):
"""
returns a dict of support issue communications and statuses
response = {
"issue_name_1": {
"communications": [],
"status": "status",
"last_sync_on": "last_sync_on"
},
"issue_name_2": {
"communications": [],
"status": "status",
"last_sync_on": "last_sync_on"
}
}
"""
authenticate_erpnext_support_user(erpnext_support_user)
sync_time = get_datetime_str(now_datetime())
res = {}
time.sleep(5)
for erpnext_support_issue in json.loads(erpnext_support_issues):
if not erpnext_support_issue.get("frappe_issue_id"):
continue
# Sync Communications for Issue
fields = ["name", "subject", "content", "recipients", "has_attachment", "creation"]
filters = [
["reference_doctype", "=", "Issue"],
["reference_name", "=", erpnext_support_issue.get("frappe_issue_id")],
["communication_medium", "=", "Email"],
["sent_or_received", "=", "Sent"],
["creation", ">", get_datetime(erpnext_support_issue.get("last_sync_on"))]
]
communications = call(frappe.get_all, doctype="Communication", filters=filters, fields=fields, order_by="creation ASC")
# Sync Attachments for Communications
communications = get_attachments(communications)
# Sync Status for Issue
frappe_issue = frappe.get_doc("Issue", erpnext_support_issue.get("frappe_issue_id"))
res[erpnext_support_issue.get("name")] = {
"communications": communications,
"status": "Open" if frappe_issue.get("status") not in ["Open", "Closed"] else frappe_issue.get("status"),
"priority": frappe_issue.get("priority"),
"resolution_by": get_datetime_str(frappe_issue.resolution_by) if frappe_issue.resolution_by else None,
"last_sync_on": sync_time,
"release": frappe_issue.get("release")
}
return json.dumps(res) | 36,814 |
def answer_view(answerid):
"""route to view a specific answer"""
return jsonify({"answer":"Your updated answer: {} ".format(user_answers[answerid])}) | 36,815 |
def jwk_factory(acct_priv_key_path: str) -> _JWKBase:
"""generate jwk object according private key file"""
with open(acct_priv_key_path, 'rb') as f:
acct_priv = serialization.load_pem_private_key(
data=f.read(),
password=None,
backend=default_backend()
)
if isinstance(acct_priv, rsa.RSAPrivateKey):
jwk = JWKRSA(
priv_key=acct_priv,
n=acct_priv.public_key().public_numbers().n,
e=acct_priv.public_key().public_numbers().e
)
elif isinstance(acct_priv, ec.EllipticCurvePrivateKey):
if isinstance(acct_priv.curve, ec.SECP256R1):
jwk = JWKES256(acct_priv)
else:
raise NotImplementedError(
f'ecdsa curve {acct_priv.curve} not implemented'
)
else:
raise TypeError(f'key type {type(acct_priv)} not supported')
return jwk | 36,816 |
def execute(
scan_definition: str | Path,
df: DataFrame,
*,
soda_server_client: SodaServerClient | None = None,
) -> ScanResult:
"""
Execute a scan on a data frame.
Parameters
----------
scan_definition : Union[str, Path]
The path to a scan file or the content of a scan file.
df: DataFrame
The data frame to be scanned.
soda_server_client : Optional[SodaServerClient] (default : None)
A soda server client.
Returns
-------
out : ScanResult
The scan results.
"""
scan_yml = create_scan_yml(scan_definition)
df.createOrReplaceTempView(scan_yml.table_name)
scan = create_scan(scan_yml, soda_server_client=soda_server_client)
scan.execute()
return scan.scan_result | 36,817 |
def select_best_model(scouseobject):
"""
Selects the best model out of those fitted - that with the smallest aic
value
Parameters
----------
scouseobject : Instance of the scousepy class
"""
for key in scouseobject.indiv_dict.keys():
spectrum = scouseobject.indiv_dict[key]
models = spectrum.models
ncomps = [mod.ncomps for mod in models]
findduds = (np.asarray(ncomps) == 0.0)
if np.any(np.asarray(findduds)):
idx = np.squeeze(np.where(findduds == True))
dud = models[idx]
models.remove(dud)
else:
dud=None
if np.size(models) != 0:
aic = [mod.aic for mod in models]
idx = np.squeeze(np.where(aic == np.min(aic)))
model = models[idx]
models.remove(model)
else:
model = dud
dud = None
if dud is None:
add_bf_model(scouseobject.indiv_dict[key], model)
update_model_list(scouseobject.indiv_dict[key], models)
else:
models.append(dud)
add_bf_model(scouseobject.indiv_dict[key], model)
update_model_list(scouseobject.indiv_dict[key], models) | 36,818 |
def jaccard_similarity(emb1: np.ndarray, emb2: np.ndarray) -> float:
""" 计算特征向量的Jaccard系数
:param emb1: shape = [feature,]
:param emb2: shape = [feature,]
:return: Jaccard 系数
"""
up = np.double(np.bitwise_and((emb1 != emb2), np.bitwise_or(emb1 != 0, emb2 != 0)).sum())
down = np.double(np.bitwise_or(emb1 != 0, emb2 != 0).sum())
d1 = (up / down)
return d1 | 36,819 |
def check_job_order_correct(filename):
"""
1 -> 2 -> 3 ->
-> 4 ->
5 -> 6
"""
precedence_rules = [[1, 2],
[2, 3],
[1, 4],
[5, 6],
[3, 6],
[4, 6]]
index_re = re.compile(r'.*\.([0-9])["\]\n]*$')
job_indices = defaultdict(list)
with open(filename) as ii:
for linenum, l in enumerate(ii):
m = index_re.search(l)
if not m:
raise "Non-matching line in [%s]" % filename
job_indices[int(m.group(1))].append(linenum)
for job_index in job_indices:
job_indices[job_index].sort()
for before, after in precedence_rules:
if job_indices[before][-1] >= job_indices[after][0]:
raise "Precedence violated for job %d [line %d] and job %d [line %d] of [%s]" | 36,820 |
def test_list_nmtoken_max_length_nistxml_sv_iv_list_nmtoken_max_length_1_2(mode, save_output, output_format):
"""
Type list/NMTOKEN is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/NMTOKEN/Schema+Instance/NISTSchema-SV-IV-list-NMTOKEN-maxLength-1.xsd",
instance="nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-maxLength-1-2.xml",
class_name="NistschemaSvIvListNmtokenMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 36,821 |
def _share_secret_int_indices(s_i: int, n: int, t: int) -> Tuple[Dict[int, int], List[PointG1]]:
""" Computes n shares of a given secret such that at least t + 1 shares are required for recovery
of the secret. Additionally returns the commitents to the coefficient of the polynom
used to verify the validity of the shares.
Assumes nodes use the indices [1, 2, ..., n].
See share_secret function of a generalized variant with arbitary indices.
"""
coefficients = [s_i] + [
random_scalar() for j in range(t)
] # coefficients c_i0, c_i1, ..., c_it
def f(x: int) -> int:
""" evaluation function for secret polynomial
"""
return (
sum(coef * pow(x, j, CURVE_ORDER) for j, coef in enumerate(coefficients)) % CURVE_ORDER
)
shares = {x: f(x) for x in range(1, n + 1)}
commitments = [multiply(G1, coef) for coef in coefficients]
return shares, commitments | 36,822 |
def auto_type(key, redis=None, default=None, o=True):
"""Returns datatype instance"""
if redis is None:
redis = config.redis
key = compress_key(key)
if redis.exists(key):
datatype = redis.type(key)
if datatype == 'string':
test_string = RedisString(key, redis=redis).data
if isinstance(test_string, dict):
datatype = 'dict-string'
elif isinstance(test_string, list):
datatype = 'list-string'
elif isinstance(test_string, basestring):
datatype = 'string'
elif isinstance(test_string, int):
datatype = 'string'
elif isinstance(test_string, float):
datatype = 'string'
return TYPE_MAP.get(datatype)(key, redis=redis, o=o)
else:
if default:
try:
return TYPE_MAP.get(default)(key, redis=redis, o=o)
except KeyError:
raise ValueError('Provide a valid default redis type.')
return None | 36,823 |
def get_user_data_dir(app_name=DEFAULT_APP_NAME, auto_create=True) -> Path:
"""
Get platform specific data folder
"""
return _get_user_dir(
app_name=app_name,
xdg_env_var='XDG_DATA_HOME', win_env_var='APPDATA',
fallback='~/.local/share', win_fallback='~\\AppData\\Roaming', macos_fallback='~/Library',
auto_create=auto_create
) | 36,824 |
def unlink(annotation):
"""
Unlinks *annotation* from its neighbors in a doubly-linked list.
:param annotation: The annotation to unlink.
:type annotation: :class:`~gatenlphiltlab.Annotation`
"""
# if surrounded
if annotation.previous and annotation.next:
annotation.previous.next = annotation.next
annotation.next.previous = annotation.previous
# if right edge
elif annotation.previous:
annotation.previous.next = None
# if left edge
elif annotation.next:
annotation.next.previous = None | 36,825 |
def parse_evidence(
fixed_labels=None,
evidence_files=None,
molecules=None,
evidence_score_field=None,
return_raw_csv_data=False,
unimod_file_list=None,
):
"""
Reads in the evidence file and returns the final formatted fixed labels,
the evidence lookup, which is passed to the isotopologue library and the
final formatted molecules (fixed labels are stripped form the molecules).
Note:
Output .csv files from `Ursgal`_ (`Documentation`_) can directly be
used. Also `mzTab`_ files can be used as input.
.. _Ursgal:
https://github.com/ursgal/ursgal
.. _Documentation:
http://ursgal.readthedocs.io/en/latest/
.. _mzTab:
http://www.psidev.info/mztab
Args:
fixed_labels (dict): dict with fixed labels, example format is shown
below.
evidence_files (list): list of evidence file paths.
molecules (list): list of additional molecules
evidence_score_field (str): specify fieldname which holds the search
engine score (Default is "PEP")
Example fixed label format::
{
'C' : [
{
'element': {
'O': 1,
'H': 3,
'14N': 1,
'C': 2
},
'evidence_mod_name': 'Carbamidomethyl'
},
]
}
Returns:
tuple: final formatted fixed label dict, evidence lookup, list of molecules
"""
if molecules is None:
molecules = []
if evidence_score_field is None:
evidence_score_field = "PEP" # default
unimod_parser = pyqms.UnimodMapper(xml_file_list=unimod_file_list)
fixed_mod_lookup = {}
amino_acid_2_fixed_mod_name = ddict(list)
formatted_fixed_labels = None
evidence_lookup = None
molecule_set = set()
all_fixed_mod_names = set()
if fixed_labels is not None and len(fixed_labels.keys()) != 0:
formatted_fixed_labels = {}
for aa, fixed_mod_info_dict_list in fixed_labels.items():
for fixed_mod_info_dict in fixed_mod_info_dict_list:
if isinstance(fixed_mod_info_dict["element_composition"], dict):
tmp_cc_factory = pyqms.ChemicalComposition()
tmp_cc_factory.add_chemical_formula(
fixed_mod_info_dict["element_composition"]
)
else:
tmp_cc_factory = fixed_mod_info_dict["element_composition"]
# print(type(tmp_cc_factory))
# print(fixed_mod_info_dict)
if aa not in formatted_fixed_labels.keys():
formatted_fixed_labels[aa] = []
formatted_fixed_labels[aa].append(tmp_cc_factory.hill_notation_unimod())
# save it under name and amino acid!
fixed_mod_lookup[fixed_mod_info_dict["evidence_mod_name"]] = dc(
tmp_cc_factory
)
amino_acid_2_fixed_mod_name[aa].append(
fixed_mod_info_dict["evidence_mod_name"]
)
all_fixed_mod_names.add(fixed_mod_info_dict["evidence_mod_name"])
tmp_cc_factory.clear()
cc_factory = pyqms.ChemicalComposition()
# this is the lookup for the lib with the evidences
# tmp_evidences = ddict(list)
tmp_evidences = {}
csv_raw_data_to_return = {}
# tmp_charges_of_evidences = set()
for evidence_file in evidence_files:
input_is_csv = False
evidence_lookup = {}
with codecs.open(
evidence_file, mode="r", encoding="utf-8"
) as openend_evidence_file:
# first buffer the file here depending on mztab andf csv input
if evidence_file.upper().endswith("CSV"):
dict_reader = csv.DictReader(openend_evidence_file)
modification_fieldname = "Modifications"
rt_fieldname = "Retention Time (s)"
seq_fieldname = "Sequence"
input_is_csv = True
elif evidence_file.upper().endswith("MZTAB"):
dict_reader = csv.DictReader(
[row for row in openend_evidence_file if row[:3] in ["PSM", "PSH"]],
delimiter="\t",
)
modification_fieldname = "modifications"
rt_fieldname = "retention_time"
seq_fieldname = "sequence"
else:
print(
"The format {0} is not recognized by the pyQms adaptor function".format(
os.path.splitext(evidence_file)[1]
)
)
input_buffer = []
for line_dict in dict_reader:
input_buffer.append(line_dict)
csv_raw_data_to_return[evidence_file] = input_buffer
for line_dict in input_buffer:
modifications = line_dict.get(modification_fieldname, "")
if modifications == "":
molecule = line_dict[seq_fieldname]
else:
if input_is_csv:
formatted_mods = line_dict[modification_fieldname]
else:
formatted_mods = []
# 2-UNIMOD:4,3-UNIMOD:4
for pos_and_unimod_id in line_dict[
modification_fieldname
].split(","):
pos, unimod_id = pos_and_unimod_id.split("-")
unimod_name = unimod_parser.id2first_name(unimod_id.split(":")[1])
formatted_mods.append("{0}:{1}".format(unimod_name, pos))
formatted_mods = ";".join(formatted_mods)
molecule = "{0}#{1}".format(
line_dict[seq_fieldname], formatted_mods
)
dict_2_append = {}
rt = line_dict.get(rt_fieldname, "")
# seconds is the standard also for mzTab
if rt != "":
dict_2_append["RT"] = float(rt) / 60.0 # always in min
score = line_dict.get(evidence_score_field, "")
if score != "":
dict_2_append["score"] = float(score)
dict_2_append["score_field"] = evidence_score_field
else:
dict_2_append["score"] = "None"
dict_2_append["score_field"] = "None"
if molecule not in tmp_evidences.keys():
tmp_evidences[molecule] = {"evidences": [], "trivial_names": set()}
for trivial_name_key in [
"proteinacc_start_stop_pre_post_;", # old ursgal style
"trivial_name", # self defined name
"Protein ID", # new ursgal style
"accession", # mzTab style
]:
additional_name = line_dict.get(trivial_name_key, "")
if additional_name != "":
# use set to remove double values
tmp_evidences[molecule]["trivial_names"].add(additional_name)
if 'trivial_name' not in dict_2_append.keys():
dict_2_append['trivial_name'] = additional_name
else:
dict_2_append['trivial_name'] += ';{0}'.format(additional_name)
tmp_evidences[molecule]["evidences"].append(dict_2_append)
mod_pattern = re.compile(r""":(?P<pos>[0-9]*$)""")
all_molecules = list(molecules)
if len(tmp_evidences.keys()) > 0:
all_molecules += list(tmp_evidences.keys())
for molecule_and_mods in sorted(all_molecules):
# try to convert trivial name set to list for conveniences
try:
tmp_evidences[molecule_and_mods]["trivial_names"] = sorted(
list(set(tmp_evidences[molecule_and_mods]["trivial_names"]))
)
except:
pass
# print(molecule_and_mods)
if "#" in molecule_and_mods:
molecule, modifications = molecule_and_mods.split("#")
else:
molecule = molecule_and_mods
modifications = None
fixed_label_mod_addon_names = []
if modifications is not None:
mods_to_delete = []
mod_list = modifications.split(";")
for pos_in_mod_list, mod_and_pos in enumerate(mod_list):
# OLD STYLE, no ':' in mod allowed!
# mod, pos = mod_and_pos.split(':')
# NEW STYLE, SILAC does not crash...
for match in mod_pattern.finditer(mod_and_pos):
pos = int(match.group("pos"))
mod = mod_and_pos[: match.start()]
break
modded_aa = molecule[int(pos) - 1]
if (
formatted_fixed_labels is not None
and modded_aa in formatted_fixed_labels.keys()
and mod in all_fixed_mod_names
):
fixed_label_mod_addon_names.append(mod)
mods_to_delete.append(pos_in_mod_list)
for modpos_2_remove in sorted(mods_to_delete, reverse=True):
mod_list.pop(modpos_2_remove)
if len(mod_list) > 0:
molecule = "{0}#{1}".format(molecule, ";".join(mod_list))
else:
# nosetest does not line else and pass
# molecule = molecule
pass
else:
# fail check if fixed mod is not in the modifications!
# add all fixed modification!
if formatted_fixed_labels is not None:
for aa in molecule:
if aa in formatted_fixed_labels.keys():
for mod_name in amino_acid_2_fixed_mod_name[aa]:
fixed_label_mod_addon_names.append(mod_name)
# print(molecule)
if molecule.startswith("+"):
cc_factory.add_chemical_formula(molecule)
elif "#" in molecule:
try:
sequence, modifications = molecule.split("#")
except ValueError:
raise ValueError(f"Invalid Sequence too many # ({molecule})")
cc_factory.use(sequence=sequence, modifications=modifications)
else:
cc_factory.use(sequence=molecule)
if len(fixed_label_mod_addon_names) != 0:
for fixed_mod_name in fixed_label_mod_addon_names:
cc_factory.add_chemical_formula(fixed_mod_lookup[fixed_mod_name])
complete_formula = cc_factory.hill_notation_unimod()
molecule_set.add(molecule)
if molecule_and_mods in tmp_evidences.keys():
if complete_formula not in evidence_lookup.keys():
evidence_lookup[complete_formula] = {}
evidence_lookup[complete_formula][molecule_and_mods] = tmp_evidences[
molecule_and_mods
]
cc_factory.clear()
molecule_list = list(molecule_set)
if return_raw_csv_data:
return (
formatted_fixed_labels,
evidence_lookup,
molecule_list,
csv_raw_data_to_return,
)
else:
return formatted_fixed_labels, evidence_lookup, molecule_list | 36,826 |
def large_plants(plants):
"""
This function compares the values assigned to the key <"Max height"> to the average among all
the values assigned to <"Max height"> using the <avg_height()> function.
Parameters:
plants (list): list of dictionaries representing plants and their characteristics.
Returns:
list: list of dictionaries that have a value assigned to <"Max height"> larger than the average
of all values assigned to <"Max height">.
"""
pass | 36,827 |
def calculateDominantFrequency(signal, fs, fMin = 0, fMax = None, applyWindow = True,
fftZeroPaddingFactor = 1
):
"""
calculates the dominant frequency of the given signal
@param signal input signal
@param fs sampling frequency
@param fMin the minimum frequency [Hz] that should be considered
@param fMax the maximum frequency [Hz] that should be considered. If None
(default), we'll take half the Nyquist frequency.
@param applyWindow if True, we'll apply a HANN window before
calculating the FFT
@param fftZeroPaddingFactor if greater than one, we'll append the
appropriate number of zeros to the signal before calculating the FFT
"""
n = len(signal)
signalTmp = copy.deepcopy(signal)
if applyWindow:
fftWindow = createLookupTable(len(signalTmp), LOOKUP_TABLE_HANN)
signalTmp *= fftWindow
if fftZeroPaddingFactor > 1:
m = int(round(n * fftZeroPaddingFactor))
signalTmp = numpy.append(signalTmp, numpy.zeros(m - n))
spectrumX, spectrumY = calculateFFT(signalTmp, fs, len(signalTmp),
applyWindow = False, convertToDb = True,
spectrumType = AMPLITUDE_SPECTRUM)
binWidth = spectrumX[1] - spectrumX[0]
idx1 = 0
if fMin > 0:
idx1 = int(round(fMin / float(binWidth)))
idx2 = -1
if fMax > 0:
idx2 = int(round(fMax / float(binWidth)))
domFreq = numpy.nan
try:
domFreq, dummy = generalUtility.findArrayMaximum(spectrumY, idx1, idx2, doInterpolate = True)
domFreq *= binWidth
except Exception as e:
pass
# domFreq = None
# eMax = None
# if fMax is None:
# fMax = fs / 2.0
# for i in range(len(spectrumY)):
# f = spectrumX[i]
# if f >= fMin and f <= fMax:
# if domFreq is None:
# domFreq = spectrumX[i]
# eMax = spectrumY[i]
# else:
# if spectrumY[i] > eMax:
# domFreq = spectrumX[i]
# eMax = spectrumY[i]
# print domFreq, domFreq2
return domFreq | 36,828 |
def check_title(file_path):
"""
return 'has title' if found
no title, None, if not found
file_path is full path with file name and extension
"""
#print('is text file: ', tool.is_utf8_text_file(file_path))
if tool.is_utf8_text_file(file_path):
with open(file_path, 'r') as f:
text = f.read()
head = text[:300]
if tool.has_title(head):
return 'has title'
else:
return 'no title'
pass
pass
return None | 36,829 |
def permute_adjacency_twin(t1,t2) -> Tuple[torch.Tensor,torch.Tensor]:
"""
Makes a permutation of two adjacency matrices together. Equivalent to a renaming of the nodes.
Supposes shape (n,n)
"""
n,_ = t1.shape
perm = torch.randperm(n)
return t1[perm,:][:,perm],t2[perm,:][:,perm] | 36,830 |
def rad_extract(eventfiles,center,radius_function,return_cols=['PULSE_PHASE'],cuts=None,apply_GTI=True,theta_cut=66.4,zenith_cut=105,return_indices=False):
""" Extract events with a radial cut.
Return specified columns and perform additional boolean cuts.
Return is in form of a dictionary whose keys are column names
(and 'DIFFERENCES') and values are numpy arrays with the column
values. These will have been concatenated if there are multiple
FT1 files.
========= =======================================================
Argument Description
========= =======================================================
eventfiles -- a list of FT1 filenames
center -- a SkyDir giving the center of the radial cut
radius_function -- can be either a float specifying a cookier cutter
radial cut, or a function taking as arguments the energy
and event_class and speciying the radius in degrees, e.g.
def radius(energy,event_class):
return numpy.where(event_class,2,1)*(energy/1000)**-0.75
========= =======================================================
Keyword Description
========= =======================================================
return_cols ['RA','DEC','ENERGY','EVENT_CLASS','PULSE_PHASE'] -
a list of FT1 column names to return
cuts None - an optional list of boolean cuts to apply,
e.g., ['ENERGY > 100']
NB -- cuts not yet implemented!!
no_cuts [False] do not apply default zenith and incidence angle cuts
apply_GTI [True] accept or reject an event based on GTI if True;
else ignore GTI
return_indices [False] if True, return an array giving the index in the
original file of each event; obviously only useful in the
case of a single event file
========= =======================================================
"""
if not hasattr(radius_function,'__call__'):
simple_scalar = True
rval = radius_function
radius_function = lambda e,event_class: rval
else:
simple_scalar = False
eventfiles = __FITS_parse__(eventfiles)
from collections import defaultdict,deque
coldict = defaultdict(deque)
cols = {}
cut_cols = ['ZENITH_ANGLE','THETA','TIME']
keys = list(set(['RA','DEC','ENERGY','CONVERSION_TYPE']+cut_cols+return_cols))
accepted = 0
total = 0
for eventfile in eventfiles:
#e = pf.open(eventfile,memmap=1)
#nrows = e[1].data.shape[0]
#e.close()
nrows = pyfits.getheader(eventfile,'EVENTS')['NAXIS2']
for key in keys:
cols[key] = np.empty(nrows,dtype=float)
PythonUtilities.get_float_col(cols[key],eventfile,'EVENTS',key)
rad = radius_function(cols['ENERGY'],cols['CONVERSION_TYPE'])
tmask = trap_mask(cols['RA'],cols['DEC'],center,rad)
tmask &= (cols['ZENITH_ANGLE'] < zenith_cut) & (cols['THETA'] < theta_cut)
if apply_GTI:
tmask &= get_gti_mask(eventfile,cols['TIME'])
print ('GTI will remove %d of %d photons.'%((~tmask).sum(),len(tmask)))
if simple_scalar:
rmask,diffs = rad_mask(cols['RA'][tmask],cols['DEC'][tmask],center,rad)
else:
rmask,diffs = rad_mask(cols['RA'][tmask],cols['DEC'][tmask],center,rad[tmask])
for key in keys:
coldict[key].append(cols[key][tmask][rmask])
if return_indices:
if 'EVENT_INDICES' not in return_cols:
return_cols.append('EVENT_INDICES')
coldict['EVENT_INDICES'].append(np.arange(len(tmask))[tmask][rmask])
coldict['DIFFERENCES'].append(diffs)
accepted += tmask.sum()
total += len(tmask)
for key in coldict.keys():
if (key in cut_cols) and not (key in return_cols):
cols.pop(key)
continue
cols[key] = np.concatenate([x for x in coldict[key]])
if key in INT_TYPES: cols[key] = cols[key].astype(int)
print ('Cuts removed %d of %d photons.'%(total-accepted,total))
return cols | 36,831 |
def save_collate_preds(collate_preds, collate_true):
"""Save the predictions of the slides to a csv when in eval mode.
"""
print(len(collate_preds) == len(collate_true))
print('Writing collate predictions to collate_preds.csv')
with open('collate_preds.csv', 'w') as f:
f.write('slide_ids, gt, pred, \n')
for key, pred in collate_preds.items():
f.write('%s,%d,%d,\n' % (key, collate_true[key], pred)) | 36,832 |
def _create_pdf(image_dir: str, output_dir: str, pdf_name: str) -> None:
"""Combine all downloaded images into a single PDF
Args:
image_dir (str): Directory containing the image files
output_dir (str): Directory where the output PDF should be saved
pdf_name (str): What to name the PDF
Returns:
None
"""
directory: str = os.path.join(image_dir, '*.jpg')
# makes sure the images are in the correct order by page number
images: list = sorted(glob(directory))
output_path: str = '{}/{}'.format(output_dir, pdf_name)
pdf: FPDF = FPDF(unit='in', format='letter')
for image in images:
pdf.add_page()
pdf.image(image, 0, 0, 8.5, 11)
os.remove(image)
pdf.output(output_path, "F") | 36,833 |
def goggles():
"""
This function illustrates two glasses the bondage of goggles.
"""
# Outer glasses
right_outer_glasses = GOval(100, 100, x=415, y=180)
right_outer_glasses.filled = True
right_outer_glasses.fill_color = 'silver'
window.add(right_outer_glasses)
left_outer_glasses = GOval(100, 100, x=315, y=180)
left_outer_glasses.filled = True
left_outer_glasses.fill_color = 'silver'
window.add(left_outer_glasses)
# Inner glasses
right_inner_glasses = GOval(60, 60, x=435, y=200)
right_inner_glasses.filled = True
right_inner_glasses.fill_color = 'white'
window.add(right_inner_glasses)
left_inner_glasses = GOval(60, 60, x=335, y=200)
left_inner_glasses.filled = True
left_inner_glasses.fill_color = 'white'
window.add(left_inner_glasses)
# Bondage
right_bondage = GRect(25, 20, x=515, y=220)
right_bondage.filled = True
right_bondage.fill_color = 'black'
window.add(right_bondage)
left_bondage = GRect(25, 20, x=290, y=220)
left_bondage.filled = True
left_bondage.fill_color = 'black'
window.add(left_bondage) | 36,834 |
def strtobool(value):
"""Cast a string to a bool."""
if value is None:
return None
if type(value) is bool:
return value
return distutils.util.strtobool(value) | 36,835 |
def init_lstm(input_lstm):
"""
Initialize lstm
"""
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.weight_ih_l'+str(ind))
bias = np.sqrt(6.0 / (weight.size(0)/4 + weight.size(1)))
nn.init.uniform(weight, -bias, bias)
weight = eval('input_lstm.weight_hh_l'+str(ind))
bias = np.sqrt(6.0 / (weight.size(0)/4 + weight.size(1)))
nn.init.uniform(weight, -bias, bias)
if input_lstm.bias:
for ind in range(0, input_lstm.num_layers):
weight = eval('input_lstm.bias_ih_l'+str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1
weight = eval('input_lstm.bias_hh_l'+str(ind))
weight.data.zero_()
weight.data[input_lstm.hidden_size: 2 * input_lstm.hidden_size] = 1 | 36,836 |
def list_to_exp(str_list, term_padding_exp=r'\b', compile=True):
"""
Returns a regular expression (compiled or not) that will catch any of the strings of the str_list.
Each string of the str_list will be surrounded by term_padding_exp (default r'\b' forces full word matches).
Note: Also orders the strings according to length so that no substring will overshadow a superstring.
"""
str_list = util_ulist.sort_as(str_list, list(map(len, str_list)), reverse=True)
exp = term_padding_exp + '(' + '|'.join(str_list) + ')' + term_padding_exp
if compile:
return re.compile(exp)
else:
return exp | 36,837 |
def prepare_ddp_loader(loader: DataLoader, num_processes: int, process_index: int) -> DataLoader:
"""
Transfers loader to distributed mode. Experimental feature.
Args:
loader: pytorch dataloder
num_processes (:obj:`int`, `optional`, defaults to 1):
The number of processes running concurrently.
process_index (:obj:`int`, `optional`, defaults to 0):
The index of the current process.
Returns:
DataLoader: pytorch dataloder with distributed batch sampler.
"""
ddp_dataset = loader.dataset
# Iterable dataset doesn't like batch_sampler, but DataLoader creates a default one for it
if isinstance(ddp_dataset, IterableDataset):
ddp_batch_sampler = None
else:
ddp_batch_sampler = BatchSamplerShard(
loader.batch_sampler,
num_processes=num_processes,
process_index=process_index,
)
# We ignore all of those since they are all dealt with by our new_batch_sampler
ignore_kwargs = [
"batch_size",
"shuffle",
"sampler",
"batch_sampler",
"drop_last",
"generator",
]
kwargs = {
k: getattr(loader, k, _PYTORCH_DATALOADER_KWARGS[k])
for k in _PYTORCH_DATALOADER_KWARGS
if k not in ignore_kwargs
}
# Need to provide batch_size as batch_sampler is None for Iterable dataset
if ddp_batch_sampler is None:
kwargs["drop_last"] = loader.drop_last
kwargs["batch_size"] = loader.batch_size
loader = DataLoader(dataset=ddp_dataset, batch_sampler=ddp_batch_sampler, **kwargs)
return loader | 36,838 |
def __validate_exchange(value: str) -> str:
"""
Check to see if passed string is in the list of possible Exchanges.
:param value: Exchange name.
:return: Passed value or No Return
"""
valid_values = EXCHANGE_VALUES
if value in valid_values:
return value
else:
logging.error(
f"Invalid exchange value: {value}. Valid options: {valid_values}"
) | 36,839 |
def main(content, title="", classes=[]):
"""Generate a 'Material for MkDocs' admonition.
"""
md = markdown.markdown(content)
return '<div class="admonition {0}">\n'.format(" ".join(classes)) + \
' <p class="admonition-title">{0}</p>\n'.format(title) + \
' <p>{0}</p>\n'.format(md) + \
'</div>' | 36,840 |
def get_bprop_matrix_set_diag(self):
"""Generate bprop for MatrixSetDiag"""
get_dtype = P.DType()
def bprop(x, y, z, out, dout):
input_shape = F.shape(x)
batch_shape = input_shape[:-2]
matrix_shape = input_shape[-2:]
diag_shape = batch_shape + (_get_min(matrix_shape),)
grad_shape = F.shape(dout)
grad_dtype = get_dtype(dout)
assist = _get_matrix_diag_part_assist(grad_shape, grad_dtype)
dx = inner.MatrixSetDiag()(dout, P.Zeros()(diag_shape, grad_dtype), assist)
dy = inner.MatrixDiagPart()(dout, assist)
dz = zeros_like(z)
return dx, dy, dz
return bprop | 36,841 |
def fake_sph_grid_ds(hsml_factor=1.0):
"""Returns an in-memory SPH dataset useful for testing
This dataset should have 27 particles with the particles arranged uniformly
on a 3D grid. The bottom left corner is (0.5,0.5,0.5) and the top right
corner is (2.5,2.5,2.5). All particles will have non-overlapping smoothing
regions with a radius of 0.05, masses of 1, and densities of 1, and zero
velocity.
"""
from yt import load_particles
npart = 27
x = np.empty(npart)
y = np.empty(npart)
z = np.empty(npart)
tot = 0
for i in range(0, 3):
for j in range(0, 3):
for k in range(0, 3):
x[tot] = i + 0.5
y[tot] = j + 0.5
z[tot] = k + 0.5
tot += 1
data = {
"particle_position_x": (x, "cm"),
"particle_position_y": (y, "cm"),
"particle_position_z": (z, "cm"),
"particle_mass": (np.ones(npart), "g"),
"particle_velocity_x": (np.zeros(npart), "cm/s"),
"particle_velocity_y": (np.zeros(npart), "cm/s"),
"particle_velocity_z": (np.zeros(npart), "cm/s"),
"smoothing_length": (0.05 * np.ones(npart) * hsml_factor, "cm"),
"density": (np.ones(npart), "g/cm**3"),
"temperature": (np.ones(npart), "K"),
}
bbox = np.array([[0, 3], [0, 3], [0, 3]])
return load_particles(data=data, length_unit=1.0, bbox=bbox) | 36,842 |
def jobdict(program, jobname):
"""Usage: jobname
Print the jobdict for the named job.
"""
print(jobname)
for key, value in program.disco.jobpack(jobname).jobdict.items():
print("\t{0}\t{1}".format(key, value)) | 36,843 |
def test_custom_exception() -> None:
"""User-defined exception."""
class CustomError(Exception):
"""A custom exception."""
try:
raise CustomError("expr", "msg")
except CustomError as ce:
assert ce.args == ("expr", "msg") | 36,844 |
def ffplay_video(source: str, x: int = None, y: int = None, video_size: str = None,
pixel_format: str = None, fs: bool = False, an: bool = False,
vn: bool = False, sn: bool = False, f: str = None, s: str = None,
sync: str = None, ss: float = None, t: float = None, vf: str = None,
af: str = None, seek_interval: int = None, window_title=None,
show_mode=None, loop: int = None):
"""
Examples:
ffplay -f rawvideo -pixel_format yuv420p -s 480*480 texture.yuv
ffplay -f rawvideo -pixel_format rgb24 -s 480*480 texture.rgb
ffplay video.mp4 -sync audio
ffplay video.mp4 -sync video
ffplay video.mp4 -sync ext
'"""
run_ffplay(source, x=x, y=y, video_size=video_size, pixel_format=pixel_format,
fs=fs, an=an, vn=vn, sn=sn, f=f, s=s, sync=sync, ss=ss, t=t, vf=vf,
af=af, seek_interval=seek_interval, window_title=window_title,
showmode=show_mode, loop=loop) | 36,845 |
async def _async_get_image_sessions(device: Device) -> dict[str, ImageSession]:
"""Return image events for the device."""
events = await device.event_media_manager.async_image_sessions()
return {e.event_token: e for e in events} | 36,846 |
def make_element_weight_parser(weight_column):
""" Parameterize with the column - this allows us
to generate data from different analysis result types.
"""
def parse_element_weight(csv_row):
name = csv_row[0]
weight = float(csv_row[weight_column]) # Assert not zero?
return name, weight
return parse_element_weight | 36,847 |
async def aio_aws_client(
service_name: str, *args, **kwargs
) -> aiobotocore.client.AioBaseClient:
"""
Yield an asyncio AWS client with an option to provide a client-specific config; this is a
thin wrapper on ``aiobotocore.session.get_session().create_client()`` and the additional
kwargs as passed through to ``session.create_client(**kwargs)``.
It is possible to pass through additional args and kwargs
including `config: aiobotocore.config.AioConfig`
(the default is :py:func:`aio_aws_default_config`)
.. code-block::
s3_endpoint = "http://localhost:5555"
client_config = botocore.client.Config(
read_timeout=120,
max_pool_connections=50,
)
async with aio_aws_client(
"s3", endpoint_url=s3_endpoint, config=client_config
) as client:
assert "aiobotocore.client.S3" in str(client)
assert isinstance(client, aiobotocore.client.AioBaseClient)
:param service_name: an AWS service for a client, like "s3", try
:py:meth:`session.get_available_services()`
:yield: aiobotocore.client.AioBaseClient
.. seealso::
- https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
session = aio_aws_session()
async with session.create_client(service_name, *args, **kwargs) as client:
yield client | 36,848 |
def test_multiple_words(sro, syllabics):
"""
Test transcoding multiple words. The test inputs here can be trivially
converted back-and-forth.
"""
assert sro2syllabics(sro) == syllabics
assert syllabics2sro(syllabics) == sro | 36,849 |
def jitterer(out, z):
"""This function jitters the x axis
1: matrix of layer activations of the form:
2. which layer number to do
outputs a transposed matrix of no of neurons rows and no of data columns"""
Jx=np.ones(out[z].T.shape)
for i in range(out[z].T.shape[0]):
'this is the number of neurons'
for j in range(out[z].T.shape[1]):
'this is the number of data'
Jx[i,j] = i + 1 + np.random.uniform(-0.25,0.25)
return Jx | 36,850 |
def deconstruct_full_path(filename: str) -> Tuple[str, str]:
"""
Returns a tuple with the parent folder of the file and the file's name.
Parameters
----------
filename : str
The path (with filename) that will be deconstructed.
Returns
-------
Tuple[str, str]
A tuple where the first element is the path of the parent folder, and the second is the
file's name.
"""
posix_path = PurePosixPath("/") / filename
return str(posix_path.parent), posix_path.name | 36,851 |
def bbx_to_world(cords, vehicle):
"""
Convert bounding box coordinate at vehicle reference to world reference.
Parameters
----------
cords : np.ndarray
Bounding box coordinates with 8 vertices, shape (8, 4)
vehicle : opencda object
Opencda ObstacleVehicle.
Returns
-------
bb_world_cords : np.ndarray
Bounding box coordinates under world reference.
"""
bb_transform = Transform(vehicle.bounding_box.location)
# bounding box to vehicle transformation matrix
bb_vehicle_matrix = x_to_world_transformation(bb_transform)
# vehicle to world transformation matrix
vehicle_world_matrix = x_to_world_transformation(vehicle.get_transform())
# bounding box to world transformation matrix
bb_world_matrix = np.dot(vehicle_world_matrix, bb_vehicle_matrix)
# 8 vertices are relative to bbx center, thus multiply with bbx_2_world to
# get the world coords.
bb_world_cords = np.dot(bb_world_matrix, np.transpose(cords))
return bb_world_cords | 36,852 |
def load_data():
""" Loading data and padding """
training_set, testing_set = imdb.load_data(num_words = 10000)
x_train, y_train = training_set
x_test, y_test = testing_set
x_train_padded = sequence.pad_sequences(x_train, maxlen = 100)
x_test_padded = sequence.pad_sequences(x_test, maxlen = 100)
return x_train_padded, y_train, x_test_padded, y_test | 36,853 |
def poor_joke_validator(joke):
"""Responds with a 'HAHA' which means the user's wish wasn't grantedor else says 'Granted' """
pass | 36,854 |
def rotate_to_calibrated_axis(
data: np.ndarray, ref_val_0: complex, ref_val_1: complex
) -> np.ndarray:
"""
Rotates, normalizes and offsets complex valued data based on calibration points.
Parameters
----------
data
An array of complex valued data points.
ref_val_0
The reference value corresponding to the 0 state.
ref_val_1
The reference value corresponding to the 1 state.
Returns
-------
:
Calibrated array of complex data points.
"""
rotation_anle = np.angle(ref_val_1 - ref_val_0)
norm = np.abs(ref_val_1 - ref_val_0)
offset = ref_val_0 * np.exp(-1j * rotation_anle) / norm
corrected_data = data * np.exp(-1j * rotation_anle) / norm - offset
return corrected_data | 36,855 |
def filter_graph_data(df: pd.DataFrame, x_col: str, x_range: Optional[Tuple[int, int]], file_cols: List[str],
file_tuple: FileTuple) -> Optional[pd.DataFrame]:
"""
Filter data relevant for the graph from the dataframe.
:param df: The dataframe to filter
:param x_col: Name of the column that has the data for the x-axis, only used if x_range is given
:param x_range: (min, max) tuple for filtering the values for the x-axis, or None for no filter
:param file_cols: Column names that define values for which separate graphs are generated
:param file_tuple: The set of values for the file_cols that are used in this graph
:return:
"""
gdf_filter = True
if x_range is not None:
gdf_filter = (df[x_col] >= x_range[0]) & (df[x_col] < x_range[1])
for col_name, col_val in zip(file_cols, file_tuple):
gdf_filter &= df[col_name] == col_val
gdf = df.loc[gdf_filter]
return None if gdf.empty else gdf | 36,856 |
def test_srx_to_srp(sraweb_connection):
"""Test if srx is converted to srp correctly"""
df = sraweb_connection.srx_to_srp("SRX663254")
assert list(df["study_accession"]) == ["SRP044932"] | 36,857 |
def test_list_unsigned_short_max_length_3_nistxml_sv_iv_list_unsigned_short_max_length_4_1(mode, save_output, output_format):
"""
Type list/unsignedShort is restricted by facet maxLength with value 8.
"""
assert_bindings(
schema="nistData/list/unsignedShort/Schema+Instance/NISTSchema-SV-IV-list-unsignedShort-maxLength-4.xsd",
instance="nistData/list/unsignedShort/Schema+Instance/NISTXML-SV-IV-list-unsignedShort-maxLength-4-1.xml",
class_name="NistschemaSvIvListUnsignedShortMaxLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 36,858 |
def get_question_summary_from_model(question_summary_model):
"""Returns a domain object for an Oppia question summary given a
question summary model.
Args:
question_summary_model: QuestionSummaryModel. The QuestionSummary model
object to fetch corresponding QuestionSummary domain object.
Returns:
QuestionSummary. The domain object corresponding to the given question
summary model.
"""
return question_domain.QuestionSummary(
question_summary_model.id,
question_summary_model.question_content,
question_summary_model.misconception_ids,
question_summary_model.interaction_id,
question_summary_model.question_model_created_on,
question_summary_model.question_model_last_updated
) | 36,859 |
async def test_full_recurring_schedule(event_loop: AbstractEventLoop) -> None:
"""Test full recurring SwitcherV2Schedule object."""
schedule = SwitcherV2Schedule(
event_loop, 0, [unhexlify(DUMMY_FULL_RECCURING_SCHEDULE_DATA)]
)
await wait([schedule.init_future])
assert schedule.schedule_id == DUMMY_FULL_RECCURING_SCHEDULE_ID
assert schedule.enabled
assert schedule.recurring
await assert_lists_equal(schedule.days, DUMMY_FULL_RECCURING_DAYS_LIST)
assert schedule.start_time == DUMMY_FULL_RECCURING_START_TIME
assert schedule.end_time == DUMMY_FULL_RECCURING_END_TIME
assert schedule.duration == DUMMY_FULL_RECCURING_DURATION
assert schedule.schedule_data == DUMMY_FULL_RECCURING_SCHEDULE_DATA_BYTES | 36,860 |
def processLine(line):
"""Process a single line of input, returning a single line of output as a string.
Input on stdin is
<input path>\t<output fmt>\t<aligner>\t<fiducials>\t<output parameters>
where:
- <input path> is a local path of the input image to align (not a url),
- <output fmt> is a format string which will generate the output path. It's given a dict with:
dfij: doifj
blah: difj
- <aligner> is the name of the aligner to use,
- <fiducials> is a list of 'key@value' pairs, joined using ::
These are used for determining feature locations, which the aligners are defined relative to.
Any extra fiducials (not needed by the given aligner) are ignored.
If there is a missing fiducial, an error is returned.
- <output parameters> is an optional list of 'key@value' pairs, joined using '::'
These are used for defining parameters about the output. Currently, we support:
crop: 'x0,y0,x1,y1' rect from which to extract features from. This is
first cut from the image and provides the extents relative to which
the feature locations are assumed to be located.
[default: no crop]
width: the width to scale the cropped input to (aspect ratio NOT preserved)
[default: original resolution]
height: the height to scale the cropped input to (aspect ratio NOT preserved)
[default: original resolution]
drawfids: how to draw fiducials on output. options:
none: don't draw fiducials [default]
circle: draw a circle
rectangle: draw a rectangle
drawfidsline: the color to draw fiducial outlines in, as any valid color string (only if drawfids is on)
[default: green]
drawfidsfill: the color to fill drawn fiducials in, as any valid color string (only if drawfids is on)
[default: green]
drawfidsr: the radius of the circle to draw fiducials in
[default: 3]
outfmt: the output format to print on stdout. This is a standard python format string,
to which we'll pass a dictionary with the following fields:
basename: input file basename
inpath: input file path
outpath: output file path
outfmt: the passed-in output file format string
aligner: the passed-in aligner string
fiducials: the passed-in input parameters string
outparams: the passed-in output parameters string
[default: '%(inpath)s\t%(outpath)s']
errfmt: what to print in case of error, again as a python format string.
The fmtdict is like in 'fmt', and also containing:
errortype: a python exception type name
errormsg: the error message
[default: 'error']
A full input string might look like:
FIXME
"""
#TODO test out various outfmt options
#TODO how to specify if we want to write EXIF or not?
from collections import defaultdict
fmtdict = defaultdict(str)
DEFAULT_OUTPARAMS = defaultdict(str)
DEFAULT_OUTPARAMS['outfmt'] = DEFAULT_OUTPUT_FMT
DEFAULT_OUTPARAMS['errfmt'] = DEFAULT_ERROR_FMT
DEFAULT_OUTPARAMS['drawfids'] = 'none'
DEFAULT_OUTPARAMS['drawfidsline'] = 'green'
DEFAULT_OUTPARAMS['drawfidsfill'] = 'green'
DEFAULT_OUTPARAMS['drawfidsr'] = 3
# parse elements
els = line.split('\t')
try:
# input and output
fmtdict['inpath'] = inpath = els.pop(0)
fmtdict['basename'] = basename = os.path.basename(inpath)
fmtdict['outpathfmt'] = outpathfmt = els.pop(0)
#print path, basename, fmtdict, outfmt
# aligner
fmtdict['aligner'] = aligner = els.pop(0)
#print aligner
# fiducials
fmtdict['fiducials'] = fiducials = els.pop(0)
fiducials = parseFiducials(fiducials)
# output params
outparams = dict(**DEFAULT_OUTPARAMS)
#print outparams
if els:
# output params are optional, so we don't want to raise an exception here
fmtdict['outparams'] = els.pop(0)
#print fmtdict['outparams']
outparams.update(str2kvdict(fmtdict['outparams'], sep='@', dlm='::'))
#print outparams
# at this stage, we have everything we need
# first make sure the file exists and open it
if not os.path.exists(inpath): raise IOError('Image does not exist')
im = Image.open(inpath)
# process the image
a = Aligner(name=aligner)
aligned, params = a.align(im, fiducials=fiducials, outparams=outparams)
fmtdict.update(params)
outparams['outfmt'] = outparams['outfmt'].replace(r'\t', '\t').replace(r'\n', '\n')
# save the output image
fmtdict['outpath'] = outpath = outpathfmt % fmtdict
#print outpathfmt, inpath, basename, fmtdict, outpath
fmtdict['outpathfmt'] = fmtdict['outpathfmt'].replace(r'\t', '\t').replace(r'\n', '\n')
saveImage(aligned, outpath, params)
# generate the output string
ret = outparams['outfmt'] % (fmtdict)
return ret
except Exception, e:
raise
# add the error values to the fmtdict
fmtdict['errortype'] = type(e).__name__
try:
fmtdict['errormsg'] = e
except Exception:
pass
# generate and return the error string
errstr = outparams['errfmt'] % fmtdict
return errstr | 36,861 |
def main() -> None:
"""Extends index.yaml file."""
with open(INDEX_YAML_PATH, 'r', encoding='utf-8') as f:
index_yaml_dict = yaml.safe_load(f)
with open(WEB_INF_INDEX_YAML_PATH, 'r', encoding='utf-8') as f:
web_inf_index_yaml_dict = yaml.safe_load(f)
if web_inf_index_yaml_dict['indexes'] is None:
return
# There is a possibility that an index in index.yaml may exist in
# ../cloud_datastore_emulator_cache/WEB-INF/index.yaml with different
# order of properties. We don't need to append those indexes. So we will
# compare sorted dictionaries. Deepcopy is used here to avoid changing the
# order of index_yaml_dict after sorting temp_index_yaml_dict.
temp_index_yaml_dict = copy.deepcopy(index_yaml_dict)
for kind in temp_index_yaml_dict['indexes']:
kind['properties'] = sorted(
kind['properties'], key=lambda x: x['name']
)
new_kinds = []
for kind in web_inf_index_yaml_dict['indexes']:
# Deepcopy is used here to avoid changing the order of kind in
# temp_index_yaml_dict after sorting temp_web_inf_kind.
temp_web_inf_kind = copy.deepcopy(kind)
temp_web_inf_kind['properties'] = sorted(
temp_web_inf_kind['properties'], key=lambda x: x['name'])
if temp_web_inf_kind not in temp_index_yaml_dict['indexes']:
new_kinds.append(kind)
if len(new_kinds) == 0:
return
index_yaml_dict['indexes'] += new_kinds
# The yaml dump function doesn't add new lines between kinds
# automatically. So we add new lines manually using replace
# function.
new_index_yaml_dict = yaml.safe_dump(
index_yaml_dict, default_flow_style=False, sort_keys=False
)
index_yaml = new_index_yaml_dict.replace('- kind', '\n- kind')
with open(INDEX_YAML_PATH, 'w', encoding='utf-8') as f:
f.write(index_yaml) | 36,862 |
def refraction(alt_degrees, temperature_C, pressure_mbar):
"""Given an observed altitude, return how much the image is refracted.
Zero refraction is returned both for objects very near the zenith,
as well as for objects more than one degree below the horizon.
"""
r = 0.016667 / tan((alt_degrees + 7.31 / (alt_degrees + 4.4)) * DEG2RAD)
d = r * (0.28 * pressure_mbar / (temperature_C + 273.0))
return where((-1.0 <= alt_degrees) & (alt_degrees <= 89.9), d, 0.0) | 36,863 |
def reduce_mem_usage(df):
"""
iterate through all the columns of a dataframe and modify the data type to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
logger.info('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
logger.info('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
logger.info('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df | 36,864 |
def retry_pattern():
"""Retry pattern decorator used when connecting to snowflake
"""
return backoff.on_exception(backoff.expo,
snowflake.connector.errors.OperationalError,
max_tries=5,
on_backoff=log_backoff_attempt,
factor=2) | 36,865 |
def get_vrf_route_targets(
device, address_family, rt_type, vrf=None, route_distinguisher=None
):
""" Get route target value from a device
Args:
address_family ('str'): address family value
rt_type ('str'): route target type
ex.) rt_type = 'import' OR
rt_type = 'export' OR
rt_type = 'both'
vrf('str'): vrf name
route_distinguisher ('str'): route distinguisher value
Returns:
Route target value
None
Raises:
None
"""
log.info(
"Getting route target of type {rt_type} for device {dev_name}".format(
rt_type=rt_type, dev_name=device.name
)
)
cli_command = ["show vrf detail {vrf}", "show vrf detail"]
if vrf:
cmd = cli_command[0].format(vrf=vrf)
else:
cmd = cli_command[1]
try:
raw_out = device.execute(cmd)
out = device.parse(cmd, output=raw_out)
except SchemaEmptyParserError:
return None
if not vrf:
vrf = "default"
try:
if not route_distinguisher:
route_distinguisher = out[vrf]["route_distinguisher"]
if "multicast" not in raw_out:
address_family = address_family.split()[0]
route_targets = out[vrf]["address_family"][address_family][
"route_targets"
][route_distinguisher]
if (
route_targets["rt_type"] == rt_type
or route_targets["rt_type"] == "both"
):
return route_targets["route_target"]
except KeyError as e:
return None
return None | 36,866 |
async def create(ctx, name, *, message):
"""Creates a B* tag with your code"""
# try:
createTag(ctx.author, name, message)
await ctx.send(f"Tag `{name}` created!")
# except:
# await ctx.send("Tag creation failed") | 36,867 |
def test_order_asc():
"""order works with asc"""
col = Column("foo", str).asc()
expr = Order(col)
assert str(expr) == "| order by\n\tfoo asc" | 36,868 |
def privGetElevationLocs(locs, dataProvider, dataProviderArgs):
"""
FIXME
"""
try:
dataProvider = dataProvider.lower()
except:
pass
# NOTE: Neither mapquest, pgRouting, nor OSRM are supported.
# FIXME -- None is not allowed
if (elevDataProviderDictionary[dataProvider] == 'ors-online'):
locsWithAlt = orsGetElevation(locs, dataProviderArgs['APIkey'])
return locsWithAlt
elif (elevDataProviderDictionary[dataProvider] == 'usgs'):
locsWithAlt = usgsGetElevation(locs)
return locsWithAlt
elif (elevDataProviderDictionary[dataProvider] == 'elevapi'):
locsWithAlt = elevapiGetElevation(locs, dataProviderArgs['APIkey'])
return locsWithAlt | 36,869 |
def _format_field(value, parts, conv, spec, want_bytes=False):
"""Format a replacement field."""
for k, part, _ in parts:
if k:
if part.isdigit():
value = value[int(part)]
else:
value = value[part]
else:
value = getattr(value, part)
if conv:
value = ((conv == 'r') and '%r' or '%s') % (value,)
if hasattr(value, '__format__'):
value = value.__format__(spec)
elif hasattr(value, 'strftime') and spec:
value = value.strftime(str(spec))
else:
value = _strformat(value, spec)
if want_bytes and isinstance(value, unicode):
return str(value)
return value | 36,870 |
def set_powerups(*args, **kwargs): # real signature unknown
""" Sets a player's powerups. """
pass | 36,871 |
def mixed_string_list_one_valid():
"""Return mixed strings."""
return _MIXED_STRING_LISTS_ONE_VALID_ | 36,872 |
def generate_project(project):
"""for generating a format project"""
directory = os.getcwd()
template_dir = TEMPLATES_PATH
# error if
if not re.search(r'^[A-Z]\w*$', project):
print 'Error: Project names must begin with a capital letter. \
\nand contain only letters, numbers and underscores.'
sys.exit(1)
elif exists(project):
print "Error: project %r already exists" % project
sys.exit(1)
copy.copy_helper(project, directory, template_dir) | 36,873 |
def test_retry_connect():
"""Test retry_connect.""" | 36,874 |
def deployments_update(deployment_name, new_name, default_version, yaml_file, quiet):
"""
Update a deployment.
If you only want to update the name of the deployment or the default deployment version,
use the options `<new_name>` and `<default_version>`.
If you want to update the deployment input/output fields, description or labels, please use a yaml file to define
the new deployment.
\b
For example:
```
deployment_description: Deployment created via command line.
deployment_labels:
my-key-1: my-label-1
my-key-2: my-label-2
input_fields:
- name: param1
data_type: int
- name: param2
data_type: string
output_fields:
- name: param1
data_type: int
- name: param2
data_type: string
```
"""
project_name = get_current_project(error=True)
yaml_content = read_yaml(yaml_file)
deployment = api.DeploymentUpdate(name=new_name, default_version=default_version)
if 'deployment_description' in yaml_content:
deployment.description = yaml_content['deployment_description']
if 'deployment_labels' in yaml_content:
deployment.labels = yaml_content['deployment_labels']
if 'input_fields' in yaml_content and isinstance(yaml_content['input_fields'], list):
deployment.input_fields = [
api.DeploymentInputFieldCreate(name=item['name'], data_type=item['data_type'])
for item in yaml_content['input_fields']
]
if 'output_fields' in yaml_content and isinstance(yaml_content['output_fields'], list):
deployment.output_fields = [
api.DeploymentInputFieldCreate(name=item['name'], data_type=item['data_type'])
for item in yaml_content['output_fields']
]
client = init_client()
client.deployments_update(project_name=project_name, deployment_name=deployment_name, data=deployment)
client.api_client.close()
if not quiet:
click.echo("Deployment was successfully updated") | 36,875 |
def log_mlflow(gv, args):
"""Log data into MLFlow."""
import mlflow
mlflow.log_params(vars(args))
gv.keep("train_loss", "test_loss", "correct") >> mlflow.log_metrics | 36,876 |
def make_parser():
"""Create the argument parser, derived from the general scripts parser."""
parser = get_parser(
__doc__,
('A file containing a list of files/file paths to be read. These '
'should be nxml or txt files.')
)
parser.add_argument(
dest='output_name',
help=('Results will be pickled in files '
'<output_name>_stmts.pkl and <output_name>_readings.pkl.')
)
return parser | 36,877 |
def script_names():
"""Returns the sequence of example script names."""
result = [str(pathlib.Path(s).with_suffix('.py')) for s in _stem_names()]
return result | 36,878 |
def service_event_log(
simulated_log: alarms.Manager, active_log_events: mcu_pb.ActiveLogEvents,
simulated_log_receiver: lists.ReceiveSynchronizer[mcu_pb.LogEvent]
) -> None:
"""Output outstanding events."""
result = simulated_log.output()
if result is None:
return
simulated_log_receiver.input(result.next_log_events)
if result.active_log_events is not None:
active_log_events.id = result.active_log_events.id | 36,879 |
def test_convert_output_type_nornir(runner):
"""
Test that the motherstarter convert outputs nornir
files to the correct location.
Args:
runner: The runner which simulates command-line
inputs, replicating an end user.
Returns:
N/A
Raises:
N/A
"""
# Assign output_type to a variable
ot = "nornir"
# Execute command and assign to variable
result = runner.invoke(ms.convert, ["-o", ot])
# Assign expected strings to variables, for further validation.
expected_output_type = f"DEBUG - Output type is: {ot}"
expected_output_inv_file = (
"INFO - File output location: motherstarter/outputs/nr/inventory/groups.yaml"
)
expected_output_groups_file = (
"INFO - File output location: motherstarter/outputs/nr/inventory/hosts.yaml"
)
# Perform assertion tests to ensure variables are in the expected outputs
assert result.exit_code == 0
assert expected_output_type in result.output
assert expected_output_inv_file in result.output
assert expected_output_groups_file in result.output | 36,880 |
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / (union)
#print('jac: ', jaccard.max(), jaccard.min())
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard | 36,881 |
def expand_image(_img, block, stride, deform=True):
"""
Args:
_img: numpy array
block: size of the blocks required
stride: step size
Returns: array of blocks
"""
if deform:
_img=_img.astype('float32')
ims_Z=np.zeros([_img.shape[0],block[1],block[0]])
f_img=np.zeros([block[2],block[1],block[0]])
for z in range(0,_img.shape[0]):
ims_Z[z,:,:]=cv2.resize(_img[z,:,:], (block[0],block[1]))
for x in range(0,ims_Z.shape[2]):
f_img[:,:,x]=cv2.resize(ims_Z[:,:,x], (block[1],block[2]))
f_img=[f_img]
else:
to_pad = []
pad = False
for i in range(len(_img.shape)):
if _img.shape[i] < block[i]:
pad = True
to_pad.append(block[i])
else:
to_pad.append(_img.shape[i])
if pad:
print(f"Enttire image must be padded: {_img.shape}, must be padded")
_img = pad_nd_image(_img, new_shape=to_pad)
a_img = view_as_windows(_img, block, step=stride)
f_img = a_img.reshape(-1, *a_img.shape[-3:])
# Make sure blocks are padded
for s in f_img:
if s.shape != block:
print(f"Shape: {s.shape}, must be padded to match: {block}")
s = pad_nd_image(s, new_shape=block)
assert s.shape == block, "Padding failed"
return f_img | 36,882 |
def hubstatus_cli(hub, eater_name, color):
"""
A dumpling eater.
Connects to nd-hub (the dumpling hub) and continually prints summary status
information from any SystemStatusChef dumplings. This is a system
monitoring dumpling eater which can be used to keep an eye on nd-hub.
"""
global PRINT_COLOR
PRINT_COLOR = color
if PRINT_COLOR:
colorama.init() # Needed for Windows console.
eater = netdumplings.DumplingEater(
name=eater_name,
hub=hub,
chef_filter=['SystemStatusChef'],
on_connect=on_connect,
on_dumpling=on_dumpling,
on_connection_lost=on_connection_lost,
)
eater.run() | 36,883 |
def test_convert_fueltypes_sectors_ktoe_gwh():
"""Testing function
"""
in_value = {'enduse': {'sector': np.zeros((2))}}
in_value['enduse']['sector'][0] = 10
in_value['enduse']['sector'][1] = 20
expected = {'enduse': {'sector': np.zeros((2))}}
expected['enduse']['sector'][0] = 10 * 11.6300000
expected['enduse']['sector'][1] = 20 * 11.6300000
# call function
out_value = conversions.convert_fueltypes_sectors_ktoe_gwh(in_value)
np.testing.assert_array_almost_equal(out_value['enduse']['sector'][0], expected['enduse']['sector'][0])
np.testing.assert_array_almost_equal(out_value['enduse']['sector'][1], expected['enduse']['sector'][1]) | 36,884 |
def disabled(reason='No reason given'):
"""Decorator that disables a command."""
# pylint:disable=missing-docstring,unused-argument
def actual_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
raise DisabledCommandException('This command is disabled: %s' % reason)
wrapper.tag = Tag.disabled
wrapper.original_func = func
return wrapper
return actual_decorator | 36,885 |
def get_tensor_model_parallel_group():
"""Get the tensor model parallel group the caller rank belongs to."""
assert _TENSOR_MODEL_PARALLEL_GROUP is not None, \
'intra_layer_model parallel group is not initialized'
return _TENSOR_MODEL_PARALLEL_GROUP | 36,886 |
async def test_form_errors_get_info(hass, error):
"""Test we handle errors."""
exc, base_error = error
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"aioshelly.get_info", side_effect=exc,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": base_error} | 36,887 |
def normalize(data):
"""Normalizes the values of incoming data
Args:
data (dict): Dictionary of response data
Returns:
dict
"""
normalized_data = {}
for key in data:
value = str(data[key])
key = key.lower()
# Strip all fields and reduce multiple spaces to a single whitespace
value = value.strip()
value = re.sub(r"\s+", " ", value)
if key == "name":
value = string.capwords(value)
elif key == "age":
if value is not None and len(value) > 0:
value = int(value)
else:
value = None
elif key in ("gender", "favorite_colors"):
value = value.lower()
if key in ("email", "favorite_colors", "finished"):
value = value.replace(" ", "")
if key == "finished":
value = bool(value.capitalize())
normalized_data[key] = value
return normalized_data | 36,888 |
def five_fold(data_set):
"""[summary]
Args:
data_set (List of Sample objects): The Samples to be partitioned
Returns:
fold: where fold is list of len n in n-fold of (train,test) where train and test are lists of Samples
"""
partition_index = int( len(data_set) / 5 )
s = 0
fold = []
for i in range(5): #0-4
tr = data_set.copy()
n = s + partition_index # was -1
te = tr[s:n]
del tr[s:s + partition_index]
fold.append( (tr,te) )
s += partition_index
return fold | 36,889 |
def test(n=10):
""" Demo multiplication of two square matrices of given dimension 'n' """
print('Making two random square matrices of dimension',n,'...')
m1 = MatrixFactory.makeRandom(n, n)
m2 = MatrixFactory.makeRandom(n, n)
print(m1)
print(m2)
m3 = m1*m2
print(m3) | 36,890 |
def train(p_number, params, shared_net, optimizer, frames, episodes, steps=20):
"""Train function used for Multiprocessing"""
################### train function ######################
torch.manual_seed(params.seed + p_number)
env = create_atari(params)
env.unwrapped.seed(params.seed + p_number)
# create a duplicate network
net = copy.deepcopy(shared_net)
device = net.device
agent = A3CGruAgent(net, env, frames, episodes)
while True:
net.load_state_dict(shared_net.state_dict())
for step in range(steps):
agent.step()
if agent.done:break
R = torch.zeros(1,1).to(device)
if not agent.done:
_, value , _ = agent.model(agent.state, None)
R = value.data
agent.values.append(R)
policy_loss = 0
value_loss = 0
gae = torch.zeros(1, 1).to(device)
for i in reversed(range(len(agent.rewards))):
R = R * params.gamma + agent.rewards[i]
advantage = R - agent.values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimataion
delta_t = agent.rewards[i] + params.gamma * \
agent.values[i + 1].data - agent.values[i].data
gae = gae * params.gamma * params.tau + delta_t
policy_loss = policy_loss - agent.log_probs[i] * \
gae - 0.01 * agent.entropies[i]
agent.model.zero_grad()
(policy_loss + 0.5 * value_loss).backward()
share_grads(agent.model, shared_net)
optimizer.step()
agent.clear()
if agent.done:
agent.reset()
# if episodes.value >= 1:
# break | 36,891 |
async def listen_and_arbitrate(isTest, backend):
"""Listens for bounties & vote reveals to establish ground truth"""
if not check_address(address):
# Always exit. Unusable with a bad address
fatal_error(True, "Invalid address %s" % address, 7)
scheduler = SchedulerQueue()
scanner = backend.Scanner()
headers = {'Authorization': api_key} if api_key else {}
async with aiohttp.ClientSession(headers=headers) as session:
# Get base_nonce and bounty registry parameters
await get_base_nonce(session)
voting_window = await get_vote_window(session)
reveal_window = await get_reveal_window(session)
if not voting_window or not get_reveal_window:
# Cannot vote/settle without this info
fatal_error(True, "Failed to get bounty windows.", 14)
if not await post_stake(session):
# Always exit, because it is unusable without staking
fatal_error(True, "Failed to Stake Arbiter.", 9)
async with websockets.connect(ws_url, extra_headers=headers) as ws:
while True:
message = json.loads(await ws.recv())
if message["event"] == "block":
number = message["data"]["number"]
if number % 100 == 0:
logging.info('Block %s', number)
asyncio.get_event_loop().create_task(scheduler.execute_scheduled(number))
elif message["event"] == "bounty":
bounty = message["data"]
asyncio.get_event_loop().create_task(handle_bounty(isTest, session, scheduler, reveal_window, voting_window, scanner, bounty)) | 36,892 |
def _query_trembl(accessions: List[str], format: str) -> str:
"""Searches TrEMBL server for UniProt entries based on accession.
The server to use is set as an environment variable 'TREMBL_SERVER'.
Normally this would be the internal TrEMBL server which contains the most
up-to-date version of the database.
Args:
accessions: list of UniProt accessions to be passed as query
parameter.
format: format of matched UniProt entries (txt, fasta, xml, list are
valid formats).
Returns:
str: UniProt entries in flat file format.
"""
server = os.environ["TREMBL_SERVER"]
url = f"{server}/uniprot/?"
query = f"id:{' OR id:'.join(i for i in accessions)}"
params = {"query": query, "format": format}
uniprot_query = requests.get(url, params=params)
uniprot_query.raise_for_status()
return uniprot_query.text | 36,893 |
def simple_broken_app_client() -> FlaskClient:
"""Create client for demo Flask app that fails internally."""
handle_spec = HandleSpec(
failing_func,
'/fail'
)
app = create_app([handle_spec])
client = app.test_client()
yield client | 36,894 |
def get_english_info(content_section):
"""
The english source section can have multiple publishers and volume counts. The criteria is that
the publisher with the largest volume count is most likely the one we want so sort the lines in
the section and grab data from the first line.
"""
english_section = [m.strip("\n") for m in content_section[24] if type(m) is bs4.element.NavigableString and m != "\n"]
english_section.sort()
eng_status, eng_volumes = None, None
try:
eng_volumes = int(re.search(r'\d+', english_section[0]).group())
#obj.eng_status = "Complete" if "Complete" in english_section else "Ongoing"
if ("Complete" or "Completed") in english_section[0]:
eng_status = "Complete"
elif "Ongoing" in english_section[0]:
eng_status = "Ongoing"
elif ("Cancelled" or "Canceled") in english_section[0]:
eng_status = "Cancelled"
elif "Hiatus" in english_section[0]:
eng_status = "Hiatus"
elif "Dropped" in english_section[0]:
eng_status = "Dropped"
else:
eng_status = "Unknown"
except AttributeError:
print(f"\t---> Attribute error: No english volumes")
except IndexError:
print("\t---> Index Error: No english volumes")
return eng_status, eng_volumes | 36,895 |
def test_format_error_message():
"""
Test ``format_error_message``.
"""
assert format_error_message([]) == ""
response = {
"version": "0.6",
"reqId": "0",
"status": "error",
"errors": [
{
"reason": "invalid_query",
"message": "INVALID_QUERY",
"detailed_message": "Invalid query: NO_COLUMN: C",
},
],
}
assert format_error_message(response["errors"]) == "Invalid query: NO_COLUMN: C" | 36,896 |
def load_factual_vec(fname, vocab, k):
"""
Loads 300x1 word vecs from FACTBANK compiled word embeddings
"""
word_vecs = {}
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = numpy.dtype('float32').itemsize * layer1_size
for line in xrange(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == ' ':
word = ''.join(word)
break
if ch != '\n':
word.append(ch)
if word in vocab:
word_vecs[word] = numpy.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
return word_vecs | 36,897 |
def vaccine(date):
"""
Auxiliary function.
Download data about vaccination in Cantabria from the Ministry of Health, Consumer Affairs and Social Welfare.
https://www.mscbs.gob.es
Args:
date(str): Date in format %Y%m%d
Returns: DataFrame with vaccination data from first day (2021/02/04) to the present day.
"""
try:
prefix_url = 'https://www.mscbs.gob.es/profesionales/saludPublica/ccayes/alertasActual/nCov/documentos/' \
'Informe_Comunicacion_'
suffix_url = '.ods'
nfile = f'{prefix_url}{date}{suffix_url}'
file_vaccine = pd.read_excel(nfile, engine='odf')
file_vaccine.set_index('Unnamed: 0', inplace=True)
vcant = file_vaccine.loc['Cantabria']
vcant = pd.DataFrame(vcant).T
vcant.index = [datetime.datetime.strptime(date, "%Y%m%d").strftime("%Y/%m/%d")]
return vcant
except Exception as e:
date = datetime.datetime.strptime(date, "%Y%m%d").strftime("%Y/%m/%d")
print(f"Error downloading vaccination data for {date}")
# print(e)
| 36,898 |
def cache(f):
"""A decorator to cache results for a given function call.
Note: The caching is only done on the first argument, usually "self".
"""
ret = {}
def _Wrapper(*args, **kwargs):
self = args[0]
if self not in ret:
ret[self] = f(*args, **kwargs)
return ret[self]
return _Wrapper | 36,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.