content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from typing import Any
def suggested_params(**kwargs: Any) -> transaction.SuggestedParams:
"""Return the suggested params from the algod client.
Set the provided attributes in ``kwargs`` in the suggested parameters.
"""
params = _algod_client().suggested_params()
for key, value in kwargs.items():
setattr(params, key, value)
return params | 678f207bdd1152f6a8cb98e9c4964dd43b9dd761 | 25,000 |
def norm_rl(df):
"""
Normalizes read length dependent features
"""
rl_feat = ["US_r", "US_a", "DS_a", "DS_r", "UXO_r", "UXO_a", "DXO_r", "DXO_a", "UMO_r", "UMO_a", "DMO_r", "DMO_a", "MO_r", "MO_a", "XO_r", "XO_a"]
rl = df['MO_r'].max()
df[rl_feat] = df[rl_feat]/rl
return df | 0f7c36a447f04bc647773e99a83f59f3789849d4 | 25,001 |
import json
def sendRequest(self, channel, params=None):
"""发送请求"""
# 生成请求
d = {}
d['event'] = 'addChannel'
d['channel'] = channel
# 如果有参数,在参数字典中加上api_key和签名字段
if params is not None:
params['api_key'] = apiKey
params['sign'] = buildMySign(params, secreteKey)
d['parameters'] = params
# 使用json打包并发送
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
return True
except websocket.WebSocketConnectionClosedException:
self.reconnect()
return False | ddaae8800e0fbcf69ce1e15abba4725ef70eadfa | 25,002 |
def altCase(text: str):
"""
Returns an Alternate Casing of the Text
"""
return "".join(
[
words.upper() if index % 2 else words.lower()
for index, words in enumerate(text)
]
) | 1d8c25f9b81e360c254ac10ce105f99ca890a87c | 25,003 |
def makeObjectArray(elem, graph, num, tag=sobject_array):
"""
Create an object array of num objects based upon elem, which becomes the
first child of the new object array
This function also can create a delay when passed a different tag
"""
p = elem.getparent()
objarray = etree.Element(etree.QName(p, tag))
ref = getNewRef()
objarray.set(sreference, ref)
Xref.update(ref, objarray, p.get(sreference), makeNewName(tag, objarray), graph, tag=tag)
objarray.set(scount, str(num))
p.append(objarray)
p.remove(elem)
objarray.append(elem)
Xref.update(elem.get(sreference), tagref=ref)
return changeDelayOrArrayCount(objarray, graph) | 5ef896b6514dc0d5ce00bbf0322c9e775fb4a152 | 25,004 |
def convert_escaped_utf8_literal(
text: str
) -> str:
"""Convert any escaped UTF-8 hexadecimal character bytes into the proper
string characters(s).
This function will convert a string, that may contain escaped UTF-8
literal hexadecimal bytes, into a string with the proper characters.
Args:
text (:obj:`str`): The string that may have escaped UTF8 hexadecimal.
Raises:
UnicodeDecodeError: if any of the escaped hexadecimal characters
are not proper UTF8 bytes.
:rtype:
:obj:`str`
A string with each escaped UTF8 hexadecimal character converted
into the proper character.
Examples:
Basic usage:
>>> from flutils.strutils import convert_raw_utf8_escape
>>> a = 'test\\\\xc2\\\\xa9'
>>> convert_escaped_utf8_literal(a)
'test©'
This function is intended for cases when the value of an environment
variable contains escaped UTF-8 literal characters (bytes) that need
to be converted to proper characters::
$ export TEST='test\\\\xc2\\\\xa9'
$ python
::
>>> import os
>>> from flutils.strutils import convert_raw_utf8_escape
>>> a = os.getenv('TEST')
>>> a
'test\\\\xc2\\\\xa9'
>>> convert_escaped_utf8_literal(a)
'test©'
"""
from flutils.codecs import register_codecs # pylint:disable=C0415
register_codecs()
text_bytes = text.encode('utf-8')
text = text_bytes.decode('raw_utf8_escape')
return text | 17dc6da0c0f4aef9a3586f139874760bbfcf4823 | 25,005 |
def convert_mg_l_to_mymol_kg(o2, rho_0=1025):
"""Convert oxygen concentrations in ml/l to mymol/kg."""
converted = o2 * 1/32000 * rho_0/1000 * 1e6
converted.attrs["units"] = "$\mu mol/kg$"
return converted | 5925cf1f5629a0875bdc777bc3f142b9a664a144 | 25,006 |
import xml
from typing import List
def parse_defines(root: xml.etree.ElementTree.Element,
component_id: str) -> List[str]:
"""Parse pre-processor definitions for a component.
Schema:
<defines>
<define name="EXAMPLE" value="1"/>
<define name="OTHER"/>
</defines>
Args:
root: root of element tree.
component_id: id of component to return.
Returns:
list of str NAME=VALUE or NAME for the component.
"""
xpath = f'./components/component[@id="{component_id}"]/defines/define'
return list(_parse_define(define) for define in root.findall(xpath)) | 0f2b06581d89f9be3ff4d733e1db9b56e951cc89 | 25,007 |
def make_f_beta(beta):
"""Create a f beta function
Parameters
----------
beta : float
The beta to use where a beta of 1 is the f1-score or F-measure
Returns
-------
function
A function to compute the f_beta score
"""
beta_2 = beta**2
coeff = (1 + beta_2)
def f(global_, local_, node):
"""Compute the f-measure
Parameters
----------
global_ : np.array
All of the scores for a given query
local_ : np.array
The scores for the query at the current node
node : skbio.TreeNode
The current node being evaluated
"""
p = len(global_) / len(local_)
r = len(local_) / node.ntips
return coeff * (p * r) / ((beta_2 * p) + r)
return f | f0e6993ac956171c58415e1605706c453d3e6d61 | 25,008 |
def _autohint_code(f, script):
"""Return 'not-hinted' if we don't hint this, else return the ttfautohint
code, which might be None if ttfautohint doesn't support the script.
Note that LGC and MONO return None."""
if script == 'no-script':
return script
if not script:
script = noto_fonts.script_key_to_primary_script(_get_font_info(f).script)
return noto_data.HINTED_SCRIPTS.get(script, 'not-hinted') | 4341098cbd9581ef989a65d352493fe28c7ddbd7 | 25,009 |
def infostring(message=""):
"""Info log-string.
I normally use this at the end of tasks.
Args:
message(str): A custom message to add.
Returns:
(str)
"""
message.rstrip().replace("\n", " ")
return tstamp() + "\t## INFO ## " + message + "\n" | 14e3012ad9c6e4c7cd10ea885098e31a3eef3ead | 25,010 |
def handler(event, _):
"""
Lambda handler
"""
# Input event:
# https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html#cognito-user-pools-lambda-trigger-event-parameter-shared
logger.debug({
"message": "Input event",
"event": event
})
# Never confirm users
event["response"] = {
"autoConfirmUser": False,
"autoVerifyPhone": False,
"autoVerifyEmail": False
}
# Only care about the ConfirmSignUp action
# At the moment, the only other PostConfirmation event is 'PostConfirmation_ConfirmForgotPassword'
if event["triggerSource"] not in ["PreSignUp_SignUp", "PreSignUp_AdminCreateUser"]:
logger.warning({
"message": "invalid triggerSource",
"triggerSource": event["triggerSource"]
})
return event
# Prepare the event
eb_event = process_request(event)
# Send the event to EventBridge
send_event(eb_event)
# Always return the event at the end
return event | 0482f29b0088b6bbf72b8027b2da83b802f4d551 | 25,011 |
def geo_point_n(arg, n):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
Parameters
----------
arg : geometry
n : integer
Returns
-------
PointN : geometry scalar
"""
op = ops.GeoPointN(arg, n)
return op.to_expr() | 6da6e0f362fac5e63093e0b93b5cf75b4f05bb5f | 25,012 |
def replace_pasture_scrubland_with_shrubland(df, start_col, end_col):
"""Merge pasture and scrubland state transitions into 'shrubland'.
1. Remove transitions /between/ scrubland and pasture and vice versa.
2. Check there are no duplicate transitions which would be caused by an
identical set of conditions leading from or to both pasture and
scrubland being merged.
3. Rename all instances of either 'scrubland' or 'pasture' to 'shrubland'
4. Check for duplicates again.
"""
df = remove_transitions_bw_pasture_and_scrubland(df, start_col, end_col)
duplicates_start = duplicates_start_with_pasture_or_scrubland(df,
start_col, end_col)
assert len(duplicates_start.index) == 0, "No duplicates expected."
duplicates_end = duplicates_end_with_pasture_or_scrubland(df,
start_col, end_col)
assert len(duplicates_end.index) == 0, "No duplicates expected."
for col in [start_col, end_col]:
for lct in [MLct.SCRUBLAND.alias, MLct.PASTURE.alias]:
df.loc[:,col] = df[col].replace(lct, AsLct.SHRUBLAND.alias)
cond_cols = ["succession", "aspect", "pine", "oak", "deciduous", "water"]
cond_cols += [start_col, end_col]
assert len(df[df.duplicated(cond_cols)].index) == 0, "There should be "\
+ "no duplicated rows."
return df | 9f3102a157e8fbaad1cca3631a117ab45470bae3 | 25,013 |
from typing import List
def get_storage_backend_descriptions() -> List[dict]:
"""
Returns:
"""
result = list()
for backend in SUPPORTED_STORAGE_BACKENDS:
result.append(get_storage_backend(backend).metadata)
return result | 2ec097a7c70788da270849332f845316435ac746 | 25,014 |
from typing import Tuple
def get_business_with_most_location() -> Tuple:
"""
Fetches LA API and returns the business with most locations
from first page
:return Tuple: business name and number of locations
"""
response = _fetch_businesses_from_la_api()
business_to_number_of_location = dict()
if response.status_code == 200:
businesses_list = response.json()
for active_business in businesses_list:
business_name = active_business["business_name"]
if business_name not in business_to_number_of_location:
business_to_number_of_location[business_name] = 1
else:
business_to_number_of_location[business_name] += 1
(
business_name_from_max,
number_of_locations,
) = _get_max_business_occurrence(business_to_number_of_location)
else:
raise ServiceUnavailable()
return business_name_from_max, number_of_locations | ee0d7f432387e4587f615bd294cc8c8276d5baf1 | 25,015 |
from typing import AbstractSet
def degrees_to_polynomial(degrees: AbstractSet[int]) -> Poly:
"""
For each degree in a set, create the polynomial with those
terms having coefficient 1 (and all other terms zero), e.g.:
{0, 2, 5} -> x**5 + x**2 + 1
"""
degrees_dict = dict.fromkeys(degrees, 1)
return Poly.from_dict(degrees_dict, x) | 6c6a27b499f766fae20c2a9cf97b7ae0352e7dc5 | 25,016 |
def validate_set_member_filter(filter_vals, vals_type, valid_vals=None):
"""
Validate filter values that must be of a certain type or
found among a set of known values.
Args:
filter_vals (obj or Set[obj]): Value or values to filter records by.
vals_type (type or Tuple[type]): Type(s) of which all ``filter_vals``
must be instances.
valid_vals (Set[obj]): Set of valid values in which all ``filter_vals``
must be found.
Return:
Set[obj]: Validated and standardized filter values.
Raises:
TypeError
ValueError
"""
filter_vals = to_collection(filter_vals, vals_type, set)
if valid_vals is not None:
if not all(filter_val in valid_vals for filter_val in filter_vals):
raise ValueError(
"not all values in filter are valid: {}".format(
filter_vals.difference(valid_vals)
)
)
return filter_vals | a48639473ed0ac303776d50fb4fb09fa45a74d8e | 25,017 |
def update_many(token, checkids, fields, customerid=None):
""" Updates a field(s) in multiple existing NodePing checks
Accepts a token, a list of checkids, and fields to be updated in a
NodePing check. Updates the specified fields for the one check.
To update many checks with the same value, use update_many
:type token: string
:param token: Your NodePing API token
:type checkids: dict
:param checkids: CheckIDs with their check type to update
:type fields: dict
:param fields: Fields in check that will be updated
:type customerid: string
:param customerid: subaccount ID
:rtype: dict
:return: Return information from NodePing query
"""
updated_checks = []
for checkid, checktype in checkids.items():
url = "{0}/{1}".format(API_URL, checkid)
url = _utils.create_url(token, url, customerid)
send_fields = fields.copy()
send_fields.update({"type": checktype.upper()})
updated_checks.append(_query_nodeping_api.put(url, send_fields))
return updated_checks | b96d3e29c6335b6bbec357e42a008faa654c72ab | 25,018 |
def main(start, end, csv_name, verbose):
"""Run script conditioned on user-input."""
print("Collecting Pomological Watercolors {s} throught {e}".format(s=start, e=end))
return get_pomological_data(start=start, end=end, csv_name=csv_name, verbose=verbose) | fd0c619f8e24929e705285bc9330ef1d21825d8b | 25,019 |
import hashlib
def _sub_fetch_file(url, md5sum=None):
"""
Sub-routine of _fetch_file
:raises: :exc:`DownloadFailed`
"""
contents = ''
try:
fh = urlopen(url)
contents = fh.read()
if md5sum is not None:
filehash = hashlib.md5(contents).hexdigest()
if md5sum and filehash != md5sum:
raise DownloadFailed("md5sum didn't match for %s. Expected %s got %s" % (url, md5sum, filehash))
except URLError as ex:
raise DownloadFailed(str(ex))
return contents | 0a92aaa55661469686338631913020a99aab0d8c | 25,020 |
def get_path_to_config(config_name: str) -> str:
"""Returns path to config dir"""
return join(get_run_configs_dir(), config_name) | 3a4092c66ea18929d001e7bb4e8b5b90b8a38439 | 25,021 |
import io
def get_orig_rawimage(raw_file, debug=False):
"""
Read a raw, original LRIS data frame.
Ported from LOWREDUX long_oscan.pro lris_oscan()
Parameters
----------
raw_file : :obj:`str`
Filename
debug : :obj:`bool`, optional
Run in debug mode (doesn't do anything)
Returns
-------
raw_img : `numpy.ndarray`_
Raw image for this detector.
hdu : `astropy.io.fits.HDUList`_
Opened fits file
exptime : :obj:`float`
Exposure time read from the file header
rawdatasec_img : `numpy.ndarray`_
Data (Science) section of the detector as provided by setting the
(1-indexed) number of the amplifier used to read each detector pixel.
Pixels unassociated with any amplifier are set to 0.
oscansec_img : `numpy.ndarray`_
Overscan section of the detector as provided by setting the
(1-indexed) number of the amplifier used to read each detector pixel.
Pixels unassociated with any amplifier are set to 0.
"""
# Open
hdul = io.fits_open(raw_file)
head0 = hdul[0].header
# TODO -- Check date here and error/warn if not after the upgrade
image = hdul[0].data.astype(float)
# Get post, pre-pix values
postpix = head0['POSTPIX']
prepix = head0['PREPIX']
post_buffer1 = 4
post_buffer2 = 8
namps = head0['NUMAMPS']
# get the x and y binning factors...
binning = head0['BINNING']
xbin, ybin = [int(ibin) for ibin in binning.split(',')]
rawdatasec_img = np.zeros_like(image, dtype=int)
oscansec_img = np.zeros_like(image, dtype=int)
datacol = namps * (prepix // xbin) + np.arange(namps) * 1024 // xbin
postcol = datacol[namps - 1] + (1024 + post_buffer1) // xbin
for iamp in range(namps): #= 0, namps - 1L
biascols = np.arange((postpix - post_buffer2) // xbin) + (
iamp * postpix) // xbin + postcol
oscansec_img[:, biascols] = iamp+1
imagecols = np.arange(1024 // xbin) + iamp * 1024 // xbin
rawdatasec_img[:,imagecols + namps*(prepix // xbin)] = iamp+1
return image, hdul, float(head0['ELAPTIME']), \
rawdatasec_img, oscansec_img | 724fe9058a7430db565014922f3fd65a7756b743 | 25,022 |
def scan_db_and_save_table_info(data_source_id, db_connection, schema, table):
"""Scan the database for table info."""
table_info = get_table_info(
{}, schema, table, from_db_conn=True, db_conn=db_connection
)
old_table_info = fetch_table_info(data_source_id, schema, table, as_obj=True)
data_source_metadata = DataSourceMetadata(
data_source_id=data_source_id,
metadata_type="table_info",
metadata_param=get_metadata_param_str([schema, table]),
metadata_info=table_info,
)
data_source_metadata.save(commit=True)
if old_table_info:
old_table_info.delete(commit=True)
return table_info | 050183b68891ff0ab0f45435d29206a5800b704c | 25,023 |
def _get_non_heavy_neighbor_residues(df0, df1, cutoff):
"""Get neighboring residues for non-heavy atom-based distance."""
non_heavy0 = df0[df0['element'] != 'H']
non_heavy1 = df1[df1['element'] != 'H']
dist = spa.distance.cdist(non_heavy0[['x', 'y', 'z']], non_heavy1[['x', 'y', 'z']])
pairs = np.array(np.where(dist < cutoff)).T
if len(pairs) == 0:
return [], []
# Use the found pairs to find unique pairings of residues.
res0 = non_heavy0.iloc[pairs[:, 0]][['pdb_name', 'model', 'chain', 'residue']]
res1 = non_heavy1.iloc[pairs[:, 1]][['pdb_name', 'model', 'chain', 'residue']]
res0 = res0.reset_index(drop=True)
res1 = res1.reset_index(drop=True)
# We concatenate so that we can find unique _pairs_.
res = pd.concat((res0, res1), axis=1)
res = res.drop_duplicates()
# # Split back out now that we have found duplicates.
res0 = res.iloc[:, range(4)]
res1 = res.iloc[:, range(4, 8)]
res0 = res0.reset_index(drop=True)
res1 = res1.reset_index(drop=True)
return res0, res1 | b27a341cb1e5e5dd74c881036d7002a107270cd5 | 25,024 |
def j0(ctx, x):
"""Computes the Bessel function `J_0(x)`. See :func:`besselj`."""
return ctx.besselj(0, x) | c2defd50be3feb3791f5be5709e5312d1e232590 | 25,025 |
def mysql2df(host, user, password, db_name, tb_name):
"""
Return mysql table data as pandas DataFrame.
:param host: host name
:param user: user name
:param password: password
:param db_name: name of the pydb from where data will be exported
:param tb_name: name of the table from where data will be exported
"""
# Create a connection object
# dialect+driver://username:password@host:port/pydb
connect_string = "mysql+pymysql://{}:{}@{}/{}".format(user, password, host, db_name)
engine = db.create_engine(connect_string, encoding='latin1', echo=True, pool_pre_ping=True)
connection = engine.connect()
session = sessionmaker(bind=engine)()
metadata = db.MetaData()
try:
# print the table column names
tb = db.Table(tb_name, metadata, autoload=True, autoload_with=engine)
print(tb.columns.keys())
# Retrieve table data: 'SELECT * FROM table'
sql_query = 'SELECT * FROM {}'.format(tb_name)
df = pd.read_sql(sql_query, connection)
return df
except Exception as e:
print('Error: {}'.format(str(e)))
finally:
engine.dispose()
session.close() | a4ea75b9fa13e6cb48650e69f5d8216f24fdaf07 | 25,026 |
def is_int(number):
""" Check if a variable can be cast as an int.
@param number: The number to check
"""
try:
x = int(number)
return True
except:
return False | e8e8956942d96956cb34b424b34fb028620f8be1 | 25,027 |
import pathlib
def get_versions(api_type=DEFAULT_TYPE):
"""Search for API object module files of api_type.
Args:
api_type (:obj:`str`, optional):
Type of object module to load, must be one of :data:`API_TYPES`.
Defaults to: :data:`DEFAULT_TYPE`.
Raises:
:exc:`exceptions.NoVersionFoundError`:
If no API module files matching :data:`PATTERN` are found.
Returns:
:obj:`list` of :obj:`dict`
"""
path = pathlib.Path(__file__).absolute().parent
pattern = PATTERN.format(api_type=api_type)
matches = [p for p in path.glob(pattern)]
if not matches:
error = "Unable to find any object modules matching pattern {r!r} in {p!r}"
error = error.format(p=format(path), r=pattern)
raise exceptions.NoVersionFoundError(error)
versions = []
for match in matches:
name = match.stem
vparts = name.split("_")
vtype = vparts.pop(0)
vparts = utils.versions.split_ver(vparts)
vstr = utils.versions.join_ver(vparts)
versions.append(
{
"ver_str": vstr,
"ver_parts": vparts,
"api_type": vtype,
"module_file": name,
"module_path": match,
}
)
versions = sorted(versions, key=lambda x: x["ver_parts"], reverse=True)
return versions | 58b2df442901b080db12951ab48991371689e955 | 25,028 |
def model_flux(parameters_dict, xfibre, yfibre, wavelength, model_name):
"""Return n_fibre X n_wavelength array of model flux values."""
parameters_array = parameters_dict_to_array(parameters_dict, wavelength,
model_name)
return moffat_flux(parameters_array, xfibre, yfibre) | c3cf75fab6b8b4965aefeebf82d40378bcd1de19 | 25,029 |
def new_rnn_layer(cfg, num_layer):
"""Creates new RNN layer for each parameter depending on whether it is bidirectional LSTM or not.
Uses the fast LSTM implementation backed by CuDNN if a GPU is available.
Note: The normal LSTMs utilize sigmoid recurrent activations so as to retain compatibility CuDNNLSTM:
see the following github issue for more details: https://github.com/keras-team/keras/issues/8860
:param cfg: configuration of CharGen instance
:param num_layer: ordinal number of the rnn layer being built
:return: 3D tensor if return sequence is True
"""
gpu_no = len(K.tensorflow_backend._get_available_gpus())
if gpu_no > 0:
print('GPU is available...')
if cfg['bidirectional']:
return Bidirectional(CuDNNLSTM(cfg['rnn_size'],
return_sequences=True),
name='rnn_{}'.format(num_layer))
return CuDNNLSTM(cfg['rnn_size'],
return_sequences=True,
name='rnn_{}'.format(num_layer))
else:
print('No GPU available...')
if cfg['bidirectional']:
return Bidirectional(LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid'),
name='rnn_{}'.format(num_layer))
return LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
name='rnn_{}'.format(num_layer)) | 341ca96549f40c8607e44b9ef353313107a8fb0a | 25,030 |
def firfreqz(h, omegas):
"""Evaluate frequency response of an FIR filter at discrete frequencies.
Parameters
h: array_like
FIR filter coefficient array for numerator polynomial.
e.g. H(z) = 1 + a*z^-1 + b*z^-2
h = [1, a, b]
"""
hh = np.zeros(omegas.shape, dtype='complex128')
for ii, aa in enumerate(h):
hh[:] = hh[:] + aa * np.exp(-1j * omegas*ii)
return hh | 4463b1dcd73090d2dedbdd0e78066e4d26d19655 | 25,031 |
import pickle
def write_np2pickle(output_fp: str, array, timestamps: list) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a pickle file.
Parameters
----------
output_fp : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
ensure_parent_exists(output_fp)
with open(output_fp, "wb") as f:
pickle.dump((array, timestamps), f)
return True | 96956829a41f3955440693f0d754b013a218e941 | 25,032 |
from re import S
import logging
import webbrowser
def run(
client_id_: str, client_secret_: str, server_class=HTTPServer, handler_class=S, port=8080
) -> str:
"""
Generates a Mapillary OAuth url and prints to screen as well as opens it automatically in a browser. Declares some
global variables to pull data from the HTTP server through the GET endpoint.
"""
# These global variables are defined so that we can pass data to / get data from the GET endpoint
global client_id
global client_secret
global access_token
client_id = client_id_
client_secret = client_secret_
server_address = ("localhost", port)
httpd = server_class(server_address, handler_class)
logging.info("Starting httpd and opening Mapillary to authenticate...")
try:
# Print the OAuth link to console and also tries to open it directly in the browser
auth_url = AUTH_URL.format(client_id)
logging.info(
"Please authenticate (if browser didn't automatically open): {}".format(auth_url)
)
webbrowser.open_new_tab(auth_url)
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info("Stopping httpd...")
return access_token | 152e4c0ae5c20b8e39693478ff5d06c1cc5fa8a5 | 25,033 |
def sort_by_rank_change(val):
"""
Sorter by rank change
:param val: node
:return: nodes' rank value
"""
return abs(float(val["rank_change"])) | ff5730e7cc765949dcfcfd4a3da32947ce3a411a | 25,034 |
def ping():
"""always 200"""
status = 200
return flask.Response(response='\n', status=status, mimetype='application/json') | 8407d4ef4188badbeff5ba34868d530b06dd5158 | 25,035 |
import json
import logging
def lambda_handler(event=None, context=None):
"""Entry point for lambda, simple try/except/finally with return and raise values"""
print(f"EVENT: {json.dumps(event)}")
try:
response = actions(event)
return response
except Exception as e:
logging.debug(f"Exception: {e}")
raise
finally:
pass | 5224552193839d56cd264104a226227ff459223d | 25,036 |
def add_gtid_ranges_to_executed_set(existing_set, *new_ranges):
"""Takes in a dict like {"uuid1": [[1, 4], [7, 12]], "uuid2": [[1, 100]]} (as returned by e.g. parse_gtid_range_string)
and any number of lists of type [{"server_uuid": "uuid", "start": 1, "end": 3}, ...]. Adds all the ranges in the lists to
the ranges in the dict and returns a new dict that contains minimal representation with both the old and new ranges."""
all_ranges = []
for server_uuid, ranges in existing_set.items():
for rng in ranges:
all_ranges.append({
"end": rng[1],
"server_uuid": server_uuid,
"start": rng[0],
})
for rng in new_ranges:
all_ranges.extend(rng)
return partition_sort_and_combine_gtid_ranges(all_ranges) | 47a71f2a55054d83092ffbb2119bcab7760f28a8 | 25,037 |
def fetch_rgb(img):
"""for outputing rgb values from click event to the terminal.
:param img: input image
:type img: cv2 image
:return: the rgb list
:rtype: list
"""
rgb_list = []
def click_event(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
red = img[y, x, 2]
blue = img[y, x, 0]
green = img[y, x, 1]
print(red, green, blue) # prints to command line
strRGB = str(red) + "," + str(green) + "," + str(blue)
rgb_list.append([red, green, blue])
cv2.imshow('original', img)
cv2.imshow('original', img)
cv2.setMouseCallback("original", click_event)
cv2.waitKey(0)
cv2.destroyAllWindows
return rgb_list | 16ff0359d47eb31a4f9c529740a9813680937e22 | 25,038 |
import functools
def _get_date_filter_consumer(field):
"""date.{lt, lte, gt, gte}=<ISO DATE>"""
date_filter = make_date_filter(functools.partial(django_date_filter, field_name=field))
def _date_consumer(key, value):
if '.' in key and key.split(".")[0] == field:
prefix, qualifier = key.split(".", maxsplit=1)
try:
return date_filter(qualifier, value)
except ValueError as e:
raise InvalidFilterError(str(e))
return {}
return _date_consumer | 37b7938ef5cebd29d487ec1e53cfc86d13a726d3 | 25,039 |
def data_path(fname):
"""
Gets a path for a given filename. This ensures that relative filenames to
data files can be used from all modules.
model.json -> .../src/data/model.json
"""
return join(dirname(realpath(__file__)), fname) | 294c91a041227fd9da6d1c9c8063de283281e85e | 25,040 |
def _parse_special_functions(sym: sp.Expr, toplevel: bool = True) -> sp.Expr:
"""
Recursively checks the symbolic expression for functions which have be
to parsed in a special way, such as piecewise functions
:param sym:
symbolic expressions
:param toplevel:
as this is called recursively, are we in the top level expression?
"""
args = tuple(arg if arg.__class__.__name__ == 'piecewise'
and sym.__class__.__name__ == 'piecewise'
else _parse_special_functions(arg, False)
for arg in sym.args)
fun_mappings = {
'times': sp.Mul,
'xor': sp.Xor,
'abs': sp.Abs,
'min': sp.Min,
'max': sp.Max,
'ceil': sp.functions.ceiling,
'floor': sp.functions.floor,
'factorial': sp.functions.factorial,
'arcsin': sp.functions.asin,
'arccos': sp.functions.acos,
'arctan': sp.functions.atan,
'arccot': sp.functions.acot,
'arcsec': sp.functions.asec,
'arccsc': sp.functions.acsc,
'arcsinh': sp.functions.asinh,
'arccosh': sp.functions.acosh,
'arctanh': sp.functions.atanh,
'arccoth': sp.functions.acoth,
'arcsech': sp.functions.asech,
'arccsch': sp.functions.acsch,
}
if sym.__class__.__name__ in fun_mappings:
return fun_mappings[sym.__class__.__name__](*args)
elif sym.__class__.__name__ == 'piecewise' \
or isinstance(sym, sp.Piecewise):
if isinstance(sym, sp.Piecewise):
# this is sympy piecewise, can't be nested
denested_args = args
else:
# this is sbml piecewise, can be nested
denested_args = _denest_piecewise(args)
return _parse_piecewise_to_heaviside(denested_args)
if sym.__class__.__name__ == 'plus' and not sym.args:
return sp.Float(0.0)
if isinstance(sym, (sp.Function, sp.Mul, sp.Add, sp.Pow)):
sym._args = args
elif toplevel and isinstance(sym, BooleanAtom):
# Replace boolean constants by numbers so they can be differentiated
# must not replace in Piecewise function. Therefore, we only replace
# it the complete expression consists only of a Boolean value.
sym = sp.Float(int(bool(sym)))
return sym | b560521ceee7cb4db16b808e44b1e538e236c00e | 25,041 |
from sys import path
import glob
def load_oxfordiiitpets(breed=True) -> core.SceneCollection:
"""Load the Oxford-IIIT pets dataset. It is not divided into
train, validation, and test because it appeared some files were missing
from the trainval and test set documents
(e.g., english_cocker_spaniel_164).
Args:
breed: Whether to use the breeds as the class labels. If False, the
class labels are limited to dog or cat.
Returns:
A scene collection containing the dataset
"""
image_dir = utils.get_file(
origin="http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz",
file_hash="67195c5e1c01f1ab5f9b6a5d22b8c27a580d896ece458917e61d459337fa318d",
cache_subdir=path.join("datasets", "oxfordiiitpets"),
hash_algorithm="sha256",
extract=True,
archive_format="tar",
extract_check_fn=lambda directory: len(
glob(path.join(directory, "images", "*.jpg"))
)
== 7390,
)
annotations_dir = utils.get_file(
origin="http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz",
file_hash="52425fb6de5c424942b7626b428656fcbd798db970a937df61750c0f1d358e91",
cache_subdir=path.join("datasets", "oxfordiiitpets"),
hash_algorithm="sha256",
extract=True,
archive_format="tar",
extract_check_fn=lambda directory: len(
glob(path.join(directory, "annotations", "xmls", "*.xml"))
)
== 3686,
)
filepaths = glob(path.join(annotations_dir, "annotations", "xmls", "*.xml"))
image_dir = path.join(image_dir, "images")
collection = load_voc(
filepaths=filepaths,
annotation_config=core.AnnotationConfiguration(["dog", "cat"]),
image_dir=image_dir,
)
if not breed:
return collection
assert all(
len(s.annotations) in [1, 2] for s in collection.scenes
), "An error occurred handling pets dataset"
labels = [
"_".join(path.splitext(path.split(f)[1])[0].split("_")[:-1]) for f in filepaths
]
annotation_config = core.AnnotationConfiguration(sorted(set(labels)))
return core.SceneCollection(
scenes=[
scene.assign(
annotations=[
a.assign(category=annotation_config[label])
for a in scene.annotations
],
annotation_config=annotation_config,
)
for scene, label in zip(collection.scenes, labels)
],
annotation_config=annotation_config,
) | a53e242f04df7a04455dc693111d3eed5820b15d | 25,042 |
import re
def _do_process_purpose(action):
""" Does all the 'hard work' in processing the purpose. Returns a single
line of the form
symbol, ex_date(yyyy-mm-dd), purpose(d/b/s), ratio(for b/s), value(for d),
"""
symbol = action.sym.upper()
purpose = action.purpose.lower()
ex_date = action.ex_date
fv = float(action.fv)
actions = []
if purpose.find('div') >= 0:
#r = re.compile(r'(?:.*?)(?P<Div>(?:div.*?)?((?:(rs.*?)|(\s+))\d+\.?\d*((?:\/-)|%)?))')
#r = re.compile(r'(?:.*?)(?P<Div>(?:div.*?)((?:(rs\.*?)|(\s+))\d+\.?\d*(?:\/-)?)|(\d+%))')
#r = re.compile(r'(?:.*?)(?P<div>(?:(?:div.*?)(\\d+%)|(?:div.*?(rs\\.?)?)\\s*(\\d+\\.?\\d*)))')
for x in _div_regex.finditer(purpose):
for _, v in x.groupdict().items():
v = re.sub(_rsr_regex, '', v)
for y in _num_per_r.finditer(v):
z = y.group()
if z.find('%') > 0:
div = float(z.replace('%', '')) * (fv/100)
else:
div = float(z)
actions.append(CorpAction(symbol, ex_date, 'D', 1.0, div))
if purpose.find('bon') >= 0:
y = _bonus_regex.search(purpose)
if y:
n, d = float(y.group(1)), float(y.group(2))
ratio = n / (n+d)
action = CorpAction(symbol, ex_date, 'B', ratio, 0.0)
actions.append(action)
module_logger.debug("CorpAction: %s", str(CorpAction))
if purpose.find('spl') >= 0:
y = _split_regex.search(purpose)
if y:
d, n = float(y.group(1)), float(y.group(2))
ratio = n / d
action = CorpAction(symbol, ex_date, 'S', ratio, 0.0)
actions.append(action)
module_logger.debug("CorpAction: %s", str(CorpAction))
return actions | 60d704e22a754f28639a81b4a49b9aba858cb50f | 25,043 |
def get_airflow_config(version, timestamp, major, minor, patch, date, rc):
"""Return a dict of the configuration for the Pipeline."""
config = dict(AIRFLOW_CONFIG)
if version is not None:
config['VERSION'] = version
else:
config['VERSION'] = config['VERSION'].format(
major=major, minor=minor, patch=patch, date=date, rc=rc)
config['MFEST_COMMIT'] = config['MFEST_COMMIT'].format(timestamp=timestamp)
# This works becuse python format ignores keywork args that arn't pressent.
for k, v in config.items():
if k not in ['VERSION', 'MFEST_COMMIT']:
config[k] = v.format(VERSION=config['VERSION'])
return config | 87c76949dba717b801a8d526306d0274eb193cc5 | 25,044 |
def find_duplicates(treeroot, tbl=None):
"""
Find duplicate files in a directory.
"""
dup = {}
if tbl is None: tbl = {}
os.path.walk(treeroot, file_walker, tbl)
for k,v in tbl.items():
if len(v) > 1:
dup[k] = v
return dup | 0a959e443b7a4f5c67e57b8fc7bf597fee96065a | 25,045 |
def attach_capping(mol1, mol2):
"""it is connecting all Nterminals with the desired capping
Arguments:
mol1 {rdKit mol object} -- first molecule to be connected
mol2 {rdKit mol object} -- second molecule to be connected - chosen N-capping
Returns:
rdKit mol object -- mol1 updated (connected with mol2, one or more)
"""
count = 0
# detects all the N terminals in mol1
for atom in mol1.GetAtoms():
atom.SetProp('Cterm', 'False')
if atom.GetSmarts() == '[N:2]' or atom.GetSmarts() == '[NH2:2]' or atom.GetSmarts() == '[NH:2]':
count += 1
atom.SetProp('Nterm', 'True')
else:
atom.SetProp('Nterm', 'False')
# detects all the C terminals in mol2 (it should be one)
for atom in mol2.GetAtoms():
atom.SetProp('Nterm', 'False')
if atom.GetSmarts() == '[C:1]' or atom.GetSmarts() == '[CH:1]':
atom.SetProp('Cterm', 'True')
else:
atom.SetProp('Cterm', 'False')
# mol2 is addes to all the N terminal of mol1
for i in range(count):
combo = rdmolops.CombineMols(mol1, mol2)
Nterm = []
Cterm = []
# saves in two different lists the index of the atoms which has to be connected
for atom in combo.GetAtoms():
if atom.GetProp('Nterm') == 'True':
Nterm.append(atom.GetIdx())
if atom.GetProp('Cterm') == 'True':
Cterm.append(atom.GetIdx())
# creates the amide bond
edcombo = rdchem.EditableMol(combo)
edcombo.AddBond(Nterm[0], Cterm[0], order=Chem.rdchem.BondType.SINGLE)
clippedMol = edcombo.GetMol()
# removes tags and lables form the atoms which reacted
clippedMol.GetAtomWithIdx(Nterm[0]).SetProp('Nterm', 'False')
clippedMol.GetAtomWithIdx(Cterm[0]).SetProp('Cterm', 'False')
clippedMol.GetAtomWithIdx(Nterm[0]).SetAtomMapNum(0)
clippedMol.GetAtomWithIdx(Cterm[0]).SetAtomMapNum(0)
# uptades the 'core' molecule
mol1 = clippedMol
return mol1 | 24a80efd94c4a5d4e0ddba478240d7c1b82ad52b | 25,046 |
def gather_point(input, index):
"""
**Gather Point Layer**
Output is obtained by gathering entries of X indexed by `index`
and concatenate them together.
.. math::
Out = X[Index]
.. code-block:: text
Given:
X = [[1, 2, 3],
[3, 4, 5],
[5, 6, 7]]
Index = [[1, 2]
Then:
Out = [[3, 4, 5],
[5, 6, 7]]
Args:
input (Variable): The source input with rank>=1, This
is a 3-D tensor with shape of [B, N, 3].
index (Variable): The index input with shape of [B, M].
Returns:
output (Variable): The output is a tensor with shape of [B,M].
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 5, 3], dtype='float32')
index = fluid.data(name='index', shape=[None, 1], dtype='int32')
output = fluid.layers.gather_point(x, index)
"""
helper = LayerHelper('gather_point', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather_point",
inputs={"X": input,
"Index": index},
outputs={"Output": out})
return out | dc4298ccf7df084abfc7d63f88ae7edb03af4010 | 25,047 |
def _apply_size_dependent_ordering(input_feature, feature_level, block_level,
expansion_size, use_explicit_padding,
use_native_resize_op):
"""Applies Size-Dependent-Ordering when resizing feature maps.
See https://arxiv.org/abs/1912.01106
Args:
input_feature: input feature map to be resized.
feature_level: the level of the input feature.
block_level: the desired output level for the block.
expansion_size: the expansion size for the block.
use_explicit_padding: Whether to use explicit padding.
use_native_resize_op: Whether to use native resize op.
Returns:
A transformed feature at the desired resolution and expansion size.
"""
padding = 'VALID' if use_explicit_padding else 'SAME'
if feature_level >= block_level: # Perform 1x1 then upsampling.
node = slim.conv2d(
input_feature,
expansion_size, [1, 1],
activation_fn=None,
normalizer_fn=slim.batch_norm,
padding=padding,
scope='Conv1x1')
if feature_level == block_level:
return node
scale = 2**(feature_level - block_level)
if use_native_resize_op:
input_shape = shape_utils.combined_static_and_dynamic_shape(node)
node = tf.image.resize_nearest_neighbor(
node, [input_shape[1] * scale, input_shape[2] * scale])
else:
node = ops.nearest_neighbor_upsampling(node, scale=scale)
else: # Perform downsampling then 1x1.
stride = 2**(block_level - feature_level)
node = slim.max_pool2d(
_maybe_pad(input_feature, use_explicit_padding), [3, 3],
stride=[stride, stride],
padding=padding,
scope='Downsample')
node = slim.conv2d(
node,
expansion_size, [1, 1],
activation_fn=None,
normalizer_fn=slim.batch_norm,
padding=padding,
scope='Conv1x1')
return node | c44206246102bbddc706be2cb0644676650c4675 | 25,048 |
def distance(s1, s2):
"""Return the Levenshtein distance between strings a and b."""
if len(s1) < len(s2):
return distance(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1] | d7bb6e7a374349fd65bde621a29ee110402d18aa | 25,049 |
def check_diversity(group, L):
"""check if group satisfy l-diversity
"""
SA_values = set()
for index in group:
str_value = list_to_str(gl_data[index][-1], cmp)
SA_values.add(str_value)
if len(SA_values) >= L:
return True
return False | 7e87f96a80651608688d86c9c9e921d793fb6a9e | 25,050 |
import urllib
def getEntries(person):
""" Fetch a Advogato member's diary and return a dictionary in the form
{ date : entry, ... }
"""
parser = DiaryParser()
f = urllib.urlopen("http://www.advogato.org/person/%s/diary.xml" % urllib.quote(person))
s = f.read(8192)
while s:
parser.feed(s)
s = f.read(8192)
parser.close()
result = {}
for d, e in map(None, parser.dates, parser.entries):
result[d] = e
return result | 9ed0b46aa694201817fd4c341a992c81d809abf5 | 25,051 |
def sum_values(p, K):
"""
sum the values in ``p``
"""
nv = []
for v in itervalues(p):
nv = dup_add(nv, v, K)
nv.reverse()
return nv | c92ac3492f0aa750879f899dde145918d4a9616d | 25,052 |
def define_permit_price_targeting_constraints(m):
"""Constraints used to get the absolute difference between the permit price and some target"""
# Constraints to minimise difference between permit price and target
m.C_PERMIT_PRICE_TARGET_CONSTRAINT_1 = pyo.Constraint(
expr=m.V_DUMMY_PERMIT_PRICE_TARGET_X_1 >= m.P_POLICY_PERMIT_PRICE_TARGET - m.V_DUAL_PERMIT_MARKET)
m.C_PERMIT_PRICE_TARGET_CONSTRAINT_2 = pyo.Constraint(
expr=m.V_DUMMY_PERMIT_PRICE_TARGET_X_2 >= m.V_DUAL_PERMIT_MARKET - m.P_POLICY_PERMIT_PRICE_TARGET)
return m | eb31f63963e0a66491e31d3f4f8f816e21c47de9 | 25,053 |
def predict4():
"""Use Xception to label image"""
path = 'static/Images/boxer.jpeg'
img = image.load_img(path,target_size=(299,299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
pclass = decode_predictions(preds, top=5)
result = str(pclass[0][0][1])
bad_chars=[';',':','_','!','*']
for i in bad_chars:
result = result.replace(i, ' ')
result = result.title()
print(result)
return result | 033fcde3cb670b8a66b430451f6b341ae2e7b980 | 25,054 |
def augment_data(image, label, seg_label, perform_random_flip_and_rotate,
num_channels, has_seg_labels):
"""
Image augmentation for training. Applies the following operations:
- Horizontally flip the image with probabiliy 0.5
- Vertically flip the image with probability 0.5
- Apply random rotation
"""
if perform_random_flip_and_rotate:
if has_seg_labels:
image = tf.concat([image, tf.expand_dims(seg_label, -1)], 2)
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
rotate_angle = tf.random_shuffle([0.0, 90.0, 180.0, 270.0])[0]
image = tf.contrib.image.rotate(
image, rotate_angle * np.pi / 180.0, interpolation='BILINEAR')
if has_seg_labels:
seg_label = image[:, :, -1]
image = image[:,:,:num_channels]
return image, label, seg_label | c243ae36a1d38cd36131bbd2f51347d2d29ca9ff | 25,055 |
def protobuf_open_channel(channel_name, media_type):
"""func"""
open_channel_request = pb.OpenChannelRequest()
open_channel_request.channel_name = channel_name
open_channel_request.content_type = media_type
return open_channel_request.SerializeToString() | 0d665788cbc37d8a15c276c41d2c28e5c12ee2ea | 25,056 |
def action(update, context):
"""A fun command to send bot actions (typing, record audio, upload photo, etc). Action appears at top of main chat.
Done using the /action command."""
bot = context.bot
user_id = update.message.from_user.id
username = update.message.from_user.name
admin = _admin(user_id)
if not admin:
return _for_admin_only_message(bot, user_id, username)
available_actions = ['RECORD_AUDIO', 'RECORD_VIDEO_NOTE', 'TYPING', 'UPLOAD_AUDIO',
'UPLOAD_DOCUMENT', 'UPLOAD_PHOTO', 'UPLOAD_VIDEO', 'UPLOAD_VIDEO_NOTE']
send_action = choice(available_actions)
bot.send_chat_action(chat_id=config["GROUPS"]["crab_wiv_a_plan"], action=send_action) | 5df8260a8787293187bba86712bf07505f915f39 | 25,057 |
import os
def findPartsLists(path):
"""gets a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print dirlist
expts = []
for fle in dirlist[0][2]:
#print fle
if(fle[-4:]=='xlsx'):
try:
xl_file = pd.read_excel(os.path.join(path,fle),None)
dfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(dfs.keys()
if("parts" in list(dfs.keys())[0]):
expts+=[(os.path.join(path,fle),fle[:-4])]
except IOError:
pass
return sorted(expts)[::-1] | d065351152367e13178a9453387b30e56ca61cd0 | 25,058 |
def borehole_vec(x, theta):
"""Given x and theta, return vector of values."""
(Hu, Ld_Kw, Treff, powparam) = np.split(theta, theta.shape[1], axis=1)
(rw, Hl) = np.split(x[:, :-1], 2, axis=1)
numer = 2 * np.pi * (Hu - Hl)
denom1 = 2 * Ld_Kw / rw ** 2
denom2 = Treff
f = ((numer / ((denom1 + denom2))) * np.exp(powparam * rw)).reshape(-1)
return f | 15f39f80d7ead4bb807dbb5c365acb900bbf405d | 25,059 |
import requests
import json
def read_datastore(resource_id):
"""
Retrieves data when the resource is part of the CKAN DataStore.
Parameters
----------
resource_id: str
Id for resource
Returns
----------
pd.DataFrame:
Data records in table format
"""
r = requests.get(
DATASTORE_SEARCH_URL, params={"resource_id": resource_id, "limit": 1}
)
n_records = json.loads(r.content)["result"]["total"]
r = requests.get(
DATASTORE_SEARCH_URL, params={"resource_id": resource_id, "limit": n_records}
)
r.encoding = "utf-8"
data_json = json.loads(r.content)["result"]["records"]
data_df = pd.DataFrame.from_records(data_json).fillna("")
return data_df | 80ff1b26960e7d33b0a68d286769736617353881 | 25,060 |
import hashlib
import binascii
def new_server_session(keys, pin):
"""Create SRP server session."""
context = SRPContext(
"Pair-Setup",
str(pin),
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512,
bits_salt=128,
bits_random=512,
)
username, verifier, salt = context.get_user_data_triplet()
context_server = SRPContext(
username,
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512,
bits_salt=128,
bits_random=512,
)
session = SRPServerSession(
context_server, verifier, binascii.hexlify(keys.auth).decode()
)
return session, salt | 5c3c20269dce31b4f7132d123845d7a46354373f | 25,061 |
def lenzi(df):
"""Check if a pandas series is empty"""
return len(df.index) == 0 | 561705e6ff0da3bfb03407a721f2aff71a4d42a1 | 25,062 |
def m_step(counts, item_classes, psuedo_count):
"""
Get estimates for the prior class probabilities (p_j) and the error
rates (pi_jkl) using MLE with current estimates of true item classes
See equations 2.3 and 2.4 in Dawid-Skene (1979)
Input:
counts: Array of how many times each rating was given by each rater
for each item
item_classes: Matrix of current assignments of items to classes
psuedo_count: A psuedo count used to smooth the error rates. For each
rater k
and for each class i and class j, we pretend rater k has rated
psuedo_count examples with class i when class j was the true class.
Returns:
p_j: class marginals [classes]
pi_kjl: error rates - the probability of rater k giving
response l for an item in class j [observers, classes, classes]
"""
[nItems, nRaters, nClasses] = np.shape(counts)
# compute class marginals
class_marginals = np.sum(item_classes, axis=0) / float(nItems)
# compute error rates for each rater, each predicted class
# and each true class
error_rates = np.matmul(counts.T, item_classes) + psuedo_count
# reorder axes so its of size [nItems x nClasses x nClasses]
error_rates = np.einsum('abc->bca', error_rates)
# divide each row by the sum of the error rates over all observation classes
sum_over_responses = np.sum(error_rates, axis=2)[:, :, None]
# for cases where an annotator has never used a label, set their sum over
# responses for that label to 1 to avoid nan when we divide. The result will
# be error_rate[k, i, j] is 0 if annotator k never used label i.
sum_over_responses[sum_over_responses == 0] = 1
error_rates = np.divide(error_rates, sum_over_responses)
return (class_marginals, error_rates) | 00d93803dd7f3f56af47f8fb455613d223fe0a89 | 25,063 |
from typing import Callable
def make_parser(fn: Callable[[], Parser]) -> Parser:
"""
Make typed parser (required for mypy).
"""
return generate(fn) | 491888a666718d84447ff9de1b215a9e9c0f8ff0 | 25,064 |
def mfcc_htk(y, sr, hop_length=2**10, window_length=22050, nmfcc=13, n_mels=26, fmax=8000, lifterexp=22):
"""
Get MFCCs 'the HTK way' with the help of Essentia
https://github.com/MTG/essentia/blob/master/src/examples/tutorial/example_mfcc_the_htk_way.py
Using all of the default parameters from there except the hop length (which shouldn't matter), and a much longer window length (which has been found to work better for covers)
Parameters
----------
window_length: int
Length of the window to use for the STFT
nmfcc: int
Number of MFCC coefficients to compute
n_mels: int
Number of frequency bands to use
fmax: int
Maximum frequency
Returns
-------
ndarray(nmfcc, nframes)
An array of all of the MFCC frames
"""
fftlen = int(2**(np.ceil(np.log(window_length)/np.log(2))))
spectrumSize= fftlen//2+1
zeroPadding = fftlen - window_length
w = estd.Windowing(type = 'hamming', # corresponds to htk default USEHAMMING = T
size = window_length,
zeroPadding = zeroPadding,
normalized = False,
zeroPhase = False)
spectrum = estd.Spectrum(size=fftlen)
mfcc_htk = estd.MFCC(inputSize = spectrumSize,
type = 'magnitude', # htk uses mel filterbank magniude
warpingFormula = 'htkMel', # htk's mel warping formula
weighting = 'linear', # computation of filter weights done in Hz domain
highFrequencyBound = fmax, # 8000 is htk default
lowFrequencyBound = 0, # corresponds to htk default
numberBands = n_mels, # corresponds to htk default NUMCHANS = 26
numberCoefficients = nmfcc,
normalize = 'unit_max', # htk filter normaliation to have constant height = 1
dctType = 3, # htk uses DCT type III
logType = 'log',
liftering = lifterexp) # corresponds to htk default CEPLIFTER = 22
mfccs = []
# startFromZero = True, validFrameThresholdRatio = 1 : the way htk computes windows
for frame in estd.FrameGenerator(audio.y, frameSize = window_length, hopSize = hop_length , startFromZero = True, validFrameThresholdRatio = 1):
spect = spectrum(w(frame))
mel_bands, mfcc_coeffs = mfcc_htk(spect)
mfccs.append(mfcc_coeffs)
return np.array(mfccs, dtype=np.float32).T | e3beb95a027e963549df6a164b0d99345137540b | 25,065 |
def num_to_int(num):
"""
Checks that a numerical value (e.g. returned by robot) is an integer and
not a float.
Parameters
----------
num : number to check
Returns
-------
integer : num cast to an integer
Raises
------
ValueError : if n is not an integer
"""
if num % 1 == 0:
return int(num)
else:
raise ValueError('Expecting integer. Got: "{0}" ({1})'
.format(num, type(num))) | af470940eb035fe8dd0160dfe9614c2b6d060194 | 25,066 |
def shuffle_blocks(wmx_orig, pop_size=800):
"""
Shuffles pop_size*pop_size blocks within the martrix
:param wmx_orig: original weight matrix
:param pop_size: size of the blocks kept together
:return: wmx_modified: modified weight matrix
"""
assert nPCs % pop_size == 0
np.random.seed(12345)
# get blocks
n_pops = nPCs / pop_size
blocks = {}
for i in range(n_pops):
for j in range(n_pops):
blocks[i, j] = wmx_orig[i*pop_size:(i+1)*pop_size, j*pop_size:(j+1)*pop_size]
# generate shuffling idx
x = np.linspace(0, n_pops-1, n_pops)
y = np.linspace(0, n_pops-1, n_pops)
np.random.shuffle(x)
np.random.shuffle(y)
# create block shuffled weight matrix
wmx_modified = np.zeros((nPCs, nPCs))
for i, id_i in enumerate(x):
for j, id_j in enumerate(y):
wmx_modified[i*pop_size:(i+1)*pop_size, j*pop_size:(j+1)*pop_size] = blocks[id_i, id_j]
return wmx_modified | aec38fe296b877ab79932aa675c5d04820e391af | 25,067 |
def change():
"""
Change language
"""
lang = request.args.get("lang", None)
my_id = None
if hasattr(g, 'my') and g.my:
my_id = g.my['_id']
data = core.languages.change(lang=lang, my_id=my_id)
return jsonify(data) | a5186669db31b533e1ca9bc146b11d577be4f845 | 25,068 |
import shutil
def make_pkg(pkgname, context):
"""Create a new extension package.
:param pkgname: Name of the package to create.
:param context: Mapping with keys that match the placeholders in the
templates.
:return: True if package creation succeeded or a tuple with False and an
error message in case the creation failed.
:rtype: Bool or Tuple
"""
try:
shutil.copytree(TEMPLATE_DIRNAME, pkgname)
except (OSError, IOError, shutil.Error) as e:
return False, e.strerror
for f in TEMPLATE_FILES:
try:
write_template(pkgname, f, context)
except (OSError, IOError) as e:
return False, e.strerror
return True | 6e2be6e991e2061a7b07e5e44a3479dbf0c2f1b1 | 25,069 |
def view_explorer_node(node_hash: str):
"""Build and send an induction query around the given node."""
node = manager.get_node_by_hash_or_404(node_hash)
query = manager.build_query_from_node(node)
return redirect_to_view_explorer_query(query) | 3534d546ba540dcfc6db1110c0a4a1086515dc3d | 25,070 |
import math
def encode_into_any_base(number, base, encoded_num):
"""Encode number into any base 2-36. Can be fractional or whole.
Parameters:
number: float -- integer representation of number (in base 10)
base: int -- base to convert to
encoded_num: str -- representation (so far) of number in base
Return: str -- string representation of number in the new base
"""
# enocding numbers if it's not fractional
if number % 1 == 0:
return encode_whole_number(number, base)
# encoding numbers that are fractional
else:
# first encoding the part that comes before the radix point
if not str(number)[0] == '0':
int_part = math.floor(number)
encoded_num += encode_whole_number(int_part, base)
# now cut off the integer from number, so it's just a fraction
number = number - int_part
# then encoding the decimal part of the number
return encode_into_any_base(number, base, encoded_num)
else:
# add the radix point to the answer
if encoded_num == '':
encoded_num += '0'
encoded_num += '.'
# convert the fractional part (of the overall number being encoded)
encoded_num += encode_fractional_number(number, base)
return encoded_num | f6dd94e173a3844dc6d858d1c6b360354624d3f1 | 25,071 |
def handle_forbidden(error: Forbidden) -> Response:
"""Render the base 403 error page."""
return respond(error.description, status=HTTPStatus.FORBIDDEN) | 89c59dd66ce63ceef9e60cf8beea0da7895a0394 | 25,072 |
def getval(l, b, map='sfd', size=None, order=1):
"""Return SFD at the Galactic coordinates l, b.
Example usage:
h, w = 1000, 4000
b, l = numpy.mgrid[0:h,0:w]
l = 180.-(l+0.5) / float(w) * 360.
b = 90. - (b+0.5) / float(h) * 180.
ebv = dust.getval(l, b)
imshow(ebv, aspect='auto', norm=matplotlib.colors.LogNorm())
"""
l = numpy.atleast_1d(l)
b = numpy.atleast_1d(b)
if map == 'sfd':
map = 'dust'
if map in ['dust', 'd100', 'i100', 'i60', 'mask', 'temp', 'xmap']:
fname = 'SFD_'+map
else:
fname = map
maxsize = { 'd100':1024, 'dust':4096, 'i100':4096, 'i60':4096,
'mask':4096 }
if size is None and map in maxsize:
size = maxsize[map]
if size is not None:
fname = fname + '_%d' % size
fname = 'maps/' + fname
### TODO
### dust_dir was wrong
fname = os.path.join(os.environ['DUST_DIR'], fname)
if not os.access(fname+'_ngp.fits', os.F_OK):
raise Exception('Map file %s not found' % (fname+'_ngp.fits'))
if l.shape != b.shape:
raise ValueError('l.shape must equal b.shape')
out = numpy.zeros_like(l, dtype='f4')
for pole in ['ngp', 'sgp']:
m = (b >= 0) if pole == 'ngp' else b < 0
if numpy.any(m):
hdulist = pyfits.open(fname+'_%s.fits' % pole)
w = wcs.WCS(hdulist[0].header)
x, y = w.wcs_world2pix(l[m], b[m], 0)
out[m] = map_coordinates(hdulist[0].data, [y, x], order=order, mode='nearest')
print fname
return out | 0c864577e545dccf9ced52c6ef88a616457006ac | 25,073 |
def format_timedelta(tdelta):
"""Return the timedelta as a 'HH:mm:ss' string."""
total_seconds = int(tdelta.total_seconds())
hours, remainder = divmod(total_seconds, 60*60)
minutes, seconds = divmod(remainder, 60)
return "{0:02d}:{1:02d}:{2:02d}".format(hours, minutes, seconds) | 852902e7972bcd13df8b60864ebcb2d75b2b259d | 25,074 |
def video_data_to_df(videos_entries, save_csv):
"""
Creating a dataframe from the video data stored as tuples
:param videos_entries: (list) list of tuples containing topics, subtopics, videos and durations
:param save_csv: (boolean) condition to specify if the df is saved locally as a csv file
:return dfx: (dataframe) df with all data arranged in dataframe
"""
## Generating dataframe from tuples
dfx = pd.DataFrame(videos_entries)
## Assigning the df's column names based
dfx.columns = videos_df_colnames
## Rounding the length values (mins)
dfx["video_length_[mins]"] = round(dfx["video_length_[mins]"], 2)
## Adding column with the video length time in hours
dfx["video_length_[hrs]"] = round(dfx["video_length_[mins]"]/60, 2)
## Sorting values
dfx.sort_values(by=["topic", "subtopic", "video"], inplace=True)
## Restarting index
dfx.reset_index(inplace=True, drop=True)
## Saving a local copy of the df
if save_csv:
dfx.to_csv(usmle_videos_csv_copy_path + usmle_2020_videos_df_filename)
return dfx | 11bb3d9293aa3689286cde76aea8bfff72594639 | 25,075 |
def create_mysql_entitySet(username, databaseName):
""" Create a new entity set in the databaseName """
password = get_password(username)
entitySetName = request.json['entitySetName']
attributes = request.json['attributes']
addToSchema(request.get_json(),"mysql")
pks = []
sql = "CREATE TABLE " + username + "_" + databaseName + "." + entitySetName + " ("
for attribute in attributes:
print(attribute, attributes[attribute])
print(attributes[attribute]['DataType'])
sql += " " + attribute + " " + attributes[attribute]['DataType']
if attributes[attribute]['NN'] == 1:
sql += " NOT NULL"
if attributes[attribute]['AI'] == 1:
sql += " AUTO_INCREMENT"
if attributes[attribute]['PK'] == 1:
pks.append(attribute)
sql += ","
sql += "PRIMARY KEY (" + pks[0]
for i in range(1,len(pks)):
sql += "," + pks[i]
sql += "));"
try:
cnx = connectSQLServerDB(username, password, username + "_" + databaseName)
mycursor = cnx.cursor()
mycursor.execute(sql)
cnx.close()
return jsonify(success=1, message="Entity Set '" + entitySetName + "' created successfully")
except mysql.connector.Error as err:
return jsonify(success=0, error_code=err.errno, message=err.msg) | 00510e850d4c10f24defe7af070c652d3b390b5c | 25,076 |
def m6(X, Y, Xp, Yp, alpha=1.0, prev='ident', post='ident', **kwargs):
"""Computes a matrix with the values of applying the kernel
:math:`m_4` between each pair of elements in :math:`X` and :math:`Y`.
Args:
X: Numpy matrix.
Y: Numpy matrix.
Xp: Numpy matrix with the probabilities of each category in *X*.
Yp: Numpy matrix with the probabilities of each category in *Y*.
alpha (float): Argument for the inverting function *h*.
prev (string): Function to transform the data before composing.
Values: ``'ident'``, ``'f1'`` or a function.
post (string): Function to transform the data after composing.
Values: ``'ident'``, ``'f1'``, ``'f2'`` or a function.
kwargs (dict): Arguments required by *prev* or *post*.
Return:
Numpy matrix of size :math:`m_X \\times m_Y`.
Since the code is vectorised any function passed in *prev* or *post*
must work on numpy arrays.
"""
h = lambda x: (1.0 - x ** alpha) ** (1.0 / alpha)
prevf = get_vector_function(prev, kwargs)
postf = get_vector_function(post, kwargs)
xm, xn = X.shape
ym, yn = Y.shape
Xp = h(Xp)
Yp = h(Yp)
G = np.zeros((xm, ym))
for i in range(xm):
I = np.tile(X[i], (ym, 1))
Ip = np.tile(Xp[i], (ym, 1))
EQ = I == Y
NE = I != Y
a = 2.0 * np.sum(prevf(Ip * EQ), axis=1)
b = np.sum(prevf(Ip * NE), axis=1)
c = np.sum(prevf(Yp * NE), axis=1)
dx = np.sum(prevf(1.0 - Ip * NE), axis=1)
dy = np.sum(prevf(1.0 - Ip * NE), axis=1)
d = dx + dy
apd = a + d
G[i, :] = apd / (apd + 2.0 * (b + c))
return postf(G) | 94d1651500ec9177a14a2c8ad80abc6ca7c3948b | 25,077 |
import time
import json
from unittest.mock import call
def serve_communications_and_statuses(erpnext_support_user, erpnext_support_issues, bench_site):
"""
returns a dict of support issue communications and statuses
response = {
"issue_name_1": {
"communications": [],
"status": "status",
"last_sync_on": "last_sync_on"
},
"issue_name_2": {
"communications": [],
"status": "status",
"last_sync_on": "last_sync_on"
}
}
"""
authenticate_erpnext_support_user(erpnext_support_user)
sync_time = get_datetime_str(now_datetime())
res = {}
time.sleep(5)
for erpnext_support_issue in json.loads(erpnext_support_issues):
if not erpnext_support_issue.get("frappe_issue_id"):
continue
# Sync Communications for Issue
fields = ["name", "subject", "content", "recipients", "has_attachment", "creation"]
filters = [
["reference_doctype", "=", "Issue"],
["reference_name", "=", erpnext_support_issue.get("frappe_issue_id")],
["communication_medium", "=", "Email"],
["sent_or_received", "=", "Sent"],
["creation", ">", get_datetime(erpnext_support_issue.get("last_sync_on"))]
]
communications = call(frappe.get_all, doctype="Communication", filters=filters, fields=fields, order_by="creation ASC")
# Sync Attachments for Communications
communications = get_attachments(communications)
# Sync Status for Issue
frappe_issue = frappe.get_doc("Issue", erpnext_support_issue.get("frappe_issue_id"))
res[erpnext_support_issue.get("name")] = {
"communications": communications,
"status": "Open" if frappe_issue.get("status") not in ["Open", "Closed"] else frappe_issue.get("status"),
"priority": frappe_issue.get("priority"),
"resolution_by": get_datetime_str(frappe_issue.resolution_by) if frappe_issue.resolution_by else None,
"last_sync_on": sync_time,
"release": frappe_issue.get("release")
}
return json.dumps(res) | ceaeeb5a1f5cbe956aeaef681b5e37c3d4ed58d2 | 25,078 |
def answer_view(answerid):
"""route to view a specific answer"""
return jsonify({"answer":"Your updated answer: {} ".format(user_answers[answerid])}) | 82c7697bfe601b54dcb1fd9c8667565886a09c34 | 25,079 |
def jwk_factory(acct_priv_key_path: str) -> _JWKBase:
"""generate jwk object according private key file"""
with open(acct_priv_key_path, 'rb') as f:
acct_priv = serialization.load_pem_private_key(
data=f.read(),
password=None,
backend=default_backend()
)
if isinstance(acct_priv, rsa.RSAPrivateKey):
jwk = JWKRSA(
priv_key=acct_priv,
n=acct_priv.public_key().public_numbers().n,
e=acct_priv.public_key().public_numbers().e
)
elif isinstance(acct_priv, ec.EllipticCurvePrivateKey):
if isinstance(acct_priv.curve, ec.SECP256R1):
jwk = JWKES256(acct_priv)
else:
raise NotImplementedError(
f'ecdsa curve {acct_priv.curve} not implemented'
)
else:
raise TypeError(f'key type {type(acct_priv)} not supported')
return jwk | fc08dd7294ddb067534c05a7e13b26e053ac3c42 | 25,080 |
from pathlib import Path
def execute(
scan_definition: str | Path,
df: DataFrame,
*,
soda_server_client: SodaServerClient | None = None,
) -> ScanResult:
"""
Execute a scan on a data frame.
Parameters
----------
scan_definition : Union[str, Path]
The path to a scan file or the content of a scan file.
df: DataFrame
The data frame to be scanned.
soda_server_client : Optional[SodaServerClient] (default : None)
A soda server client.
Returns
-------
out : ScanResult
The scan results.
"""
scan_yml = create_scan_yml(scan_definition)
df.createOrReplaceTempView(scan_yml.table_name)
scan = create_scan(scan_yml, soda_server_client=soda_server_client)
scan.execute()
return scan.scan_result | 7bf0bedfb8865de117565110be4225b502e2fed2 | 25,081 |
def jaccard_similarity(emb1: np.ndarray, emb2: np.ndarray) -> float:
""" 计算特征向量的Jaccard系数
:param emb1: shape = [feature,]
:param emb2: shape = [feature,]
:return: Jaccard 系数
"""
up = np.double(np.bitwise_and((emb1 != emb2), np.bitwise_or(emb1 != 0, emb2 != 0)).sum())
down = np.double(np.bitwise_or(emb1 != 0, emb2 != 0).sum())
d1 = (up / down)
return d1 | 18e95d7f14ca093892770364fc5af75b95bebe2a | 25,082 |
from typing import Tuple
from typing import Dict
from typing import List
def _share_secret_int_indices(s_i: int, n: int, t: int) -> Tuple[Dict[int, int], List[PointG1]]:
""" Computes n shares of a given secret such that at least t + 1 shares are required for recovery
of the secret. Additionally returns the commitents to the coefficient of the polynom
used to verify the validity of the shares.
Assumes nodes use the indices [1, 2, ..., n].
See share_secret function of a generalized variant with arbitary indices.
"""
coefficients = [s_i] + [
random_scalar() for j in range(t)
] # coefficients c_i0, c_i1, ..., c_it
def f(x: int) -> int:
""" evaluation function for secret polynomial
"""
return (
sum(coef * pow(x, j, CURVE_ORDER) for j, coef in enumerate(coefficients)) % CURVE_ORDER
)
shares = {x: f(x) for x in range(1, n + 1)}
commitments = [multiply(G1, coef) for coef in coefficients]
return shares, commitments | b822bd79337be741bbd626751f9d745b4b9e23fc | 25,083 |
def auto_type(key, redis=None, default=None, o=True):
"""Returns datatype instance"""
if redis is None:
redis = config.redis
key = compress_key(key)
if redis.exists(key):
datatype = redis.type(key)
if datatype == 'string':
test_string = RedisString(key, redis=redis).data
if isinstance(test_string, dict):
datatype = 'dict-string'
elif isinstance(test_string, list):
datatype = 'list-string'
elif isinstance(test_string, basestring):
datatype = 'string'
elif isinstance(test_string, int):
datatype = 'string'
elif isinstance(test_string, float):
datatype = 'string'
return TYPE_MAP.get(datatype)(key, redis=redis, o=o)
else:
if default:
try:
return TYPE_MAP.get(default)(key, redis=redis, o=o)
except KeyError:
raise ValueError('Provide a valid default redis type.')
return None | 3d1751c14c4b0c04d11ab265395dce94822558d8 | 25,084 |
from pathlib import Path
def get_user_data_dir(app_name=DEFAULT_APP_NAME, auto_create=True) -> Path:
"""
Get platform specific data folder
"""
return _get_user_dir(
app_name=app_name,
xdg_env_var='XDG_DATA_HOME', win_env_var='APPDATA',
fallback='~/.local/share', win_fallback='~\\AppData\\Roaming', macos_fallback='~/Library',
auto_create=auto_create
) | 321b885983affcc5cf4d4baf0410ae9ad6b6f443 | 25,085 |
import codecs
import csv
import os
import re
def parse_evidence(
fixed_labels=None,
evidence_files=None,
molecules=None,
evidence_score_field=None,
return_raw_csv_data=False,
unimod_file_list=None,
):
"""
Reads in the evidence file and returns the final formatted fixed labels,
the evidence lookup, which is passed to the isotopologue library and the
final formatted molecules (fixed labels are stripped form the molecules).
Note:
Output .csv files from `Ursgal`_ (`Documentation`_) can directly be
used. Also `mzTab`_ files can be used as input.
.. _Ursgal:
https://github.com/ursgal/ursgal
.. _Documentation:
http://ursgal.readthedocs.io/en/latest/
.. _mzTab:
http://www.psidev.info/mztab
Args:
fixed_labels (dict): dict with fixed labels, example format is shown
below.
evidence_files (list): list of evidence file paths.
molecules (list): list of additional molecules
evidence_score_field (str): specify fieldname which holds the search
engine score (Default is "PEP")
Example fixed label format::
{
'C' : [
{
'element': {
'O': 1,
'H': 3,
'14N': 1,
'C': 2
},
'evidence_mod_name': 'Carbamidomethyl'
},
]
}
Returns:
tuple: final formatted fixed label dict, evidence lookup, list of molecules
"""
if molecules is None:
molecules = []
if evidence_score_field is None:
evidence_score_field = "PEP" # default
unimod_parser = pyqms.UnimodMapper(xml_file_list=unimod_file_list)
fixed_mod_lookup = {}
amino_acid_2_fixed_mod_name = ddict(list)
formatted_fixed_labels = None
evidence_lookup = None
molecule_set = set()
all_fixed_mod_names = set()
if fixed_labels is not None and len(fixed_labels.keys()) != 0:
formatted_fixed_labels = {}
for aa, fixed_mod_info_dict_list in fixed_labels.items():
for fixed_mod_info_dict in fixed_mod_info_dict_list:
if isinstance(fixed_mod_info_dict["element_composition"], dict):
tmp_cc_factory = pyqms.ChemicalComposition()
tmp_cc_factory.add_chemical_formula(
fixed_mod_info_dict["element_composition"]
)
else:
tmp_cc_factory = fixed_mod_info_dict["element_composition"]
# print(type(tmp_cc_factory))
# print(fixed_mod_info_dict)
if aa not in formatted_fixed_labels.keys():
formatted_fixed_labels[aa] = []
formatted_fixed_labels[aa].append(tmp_cc_factory.hill_notation_unimod())
# save it under name and amino acid!
fixed_mod_lookup[fixed_mod_info_dict["evidence_mod_name"]] = dc(
tmp_cc_factory
)
amino_acid_2_fixed_mod_name[aa].append(
fixed_mod_info_dict["evidence_mod_name"]
)
all_fixed_mod_names.add(fixed_mod_info_dict["evidence_mod_name"])
tmp_cc_factory.clear()
cc_factory = pyqms.ChemicalComposition()
# this is the lookup for the lib with the evidences
# tmp_evidences = ddict(list)
tmp_evidences = {}
csv_raw_data_to_return = {}
# tmp_charges_of_evidences = set()
for evidence_file in evidence_files:
input_is_csv = False
evidence_lookup = {}
with codecs.open(
evidence_file, mode="r", encoding="utf-8"
) as openend_evidence_file:
# first buffer the file here depending on mztab andf csv input
if evidence_file.upper().endswith("CSV"):
dict_reader = csv.DictReader(openend_evidence_file)
modification_fieldname = "Modifications"
rt_fieldname = "Retention Time (s)"
seq_fieldname = "Sequence"
input_is_csv = True
elif evidence_file.upper().endswith("MZTAB"):
dict_reader = csv.DictReader(
[row for row in openend_evidence_file if row[:3] in ["PSM", "PSH"]],
delimiter="\t",
)
modification_fieldname = "modifications"
rt_fieldname = "retention_time"
seq_fieldname = "sequence"
else:
print(
"The format {0} is not recognized by the pyQms adaptor function".format(
os.path.splitext(evidence_file)[1]
)
)
input_buffer = []
for line_dict in dict_reader:
input_buffer.append(line_dict)
csv_raw_data_to_return[evidence_file] = input_buffer
for line_dict in input_buffer:
modifications = line_dict.get(modification_fieldname, "")
if modifications == "":
molecule = line_dict[seq_fieldname]
else:
if input_is_csv:
formatted_mods = line_dict[modification_fieldname]
else:
formatted_mods = []
# 2-UNIMOD:4,3-UNIMOD:4
for pos_and_unimod_id in line_dict[
modification_fieldname
].split(","):
pos, unimod_id = pos_and_unimod_id.split("-")
unimod_name = unimod_parser.id2first_name(unimod_id.split(":")[1])
formatted_mods.append("{0}:{1}".format(unimod_name, pos))
formatted_mods = ";".join(formatted_mods)
molecule = "{0}#{1}".format(
line_dict[seq_fieldname], formatted_mods
)
dict_2_append = {}
rt = line_dict.get(rt_fieldname, "")
# seconds is the standard also for mzTab
if rt != "":
dict_2_append["RT"] = float(rt) / 60.0 # always in min
score = line_dict.get(evidence_score_field, "")
if score != "":
dict_2_append["score"] = float(score)
dict_2_append["score_field"] = evidence_score_field
else:
dict_2_append["score"] = "None"
dict_2_append["score_field"] = "None"
if molecule not in tmp_evidences.keys():
tmp_evidences[molecule] = {"evidences": [], "trivial_names": set()}
for trivial_name_key in [
"proteinacc_start_stop_pre_post_;", # old ursgal style
"trivial_name", # self defined name
"Protein ID", # new ursgal style
"accession", # mzTab style
]:
additional_name = line_dict.get(trivial_name_key, "")
if additional_name != "":
# use set to remove double values
tmp_evidences[molecule]["trivial_names"].add(additional_name)
if 'trivial_name' not in dict_2_append.keys():
dict_2_append['trivial_name'] = additional_name
else:
dict_2_append['trivial_name'] += ';{0}'.format(additional_name)
tmp_evidences[molecule]["evidences"].append(dict_2_append)
mod_pattern = re.compile(r""":(?P<pos>[0-9]*$)""")
all_molecules = list(molecules)
if len(tmp_evidences.keys()) > 0:
all_molecules += list(tmp_evidences.keys())
for molecule_and_mods in sorted(all_molecules):
# try to convert trivial name set to list for conveniences
try:
tmp_evidences[molecule_and_mods]["trivial_names"] = sorted(
list(set(tmp_evidences[molecule_and_mods]["trivial_names"]))
)
except:
pass
# print(molecule_and_mods)
if "#" in molecule_and_mods:
molecule, modifications = molecule_and_mods.split("#")
else:
molecule = molecule_and_mods
modifications = None
fixed_label_mod_addon_names = []
if modifications is not None:
mods_to_delete = []
mod_list = modifications.split(";")
for pos_in_mod_list, mod_and_pos in enumerate(mod_list):
# OLD STYLE, no ':' in mod allowed!
# mod, pos = mod_and_pos.split(':')
# NEW STYLE, SILAC does not crash...
for match in mod_pattern.finditer(mod_and_pos):
pos = int(match.group("pos"))
mod = mod_and_pos[: match.start()]
break
modded_aa = molecule[int(pos) - 1]
if (
formatted_fixed_labels is not None
and modded_aa in formatted_fixed_labels.keys()
and mod in all_fixed_mod_names
):
fixed_label_mod_addon_names.append(mod)
mods_to_delete.append(pos_in_mod_list)
for modpos_2_remove in sorted(mods_to_delete, reverse=True):
mod_list.pop(modpos_2_remove)
if len(mod_list) > 0:
molecule = "{0}#{1}".format(molecule, ";".join(mod_list))
else:
# nosetest does not line else and pass
# molecule = molecule
pass
else:
# fail check if fixed mod is not in the modifications!
# add all fixed modification!
if formatted_fixed_labels is not None:
for aa in molecule:
if aa in formatted_fixed_labels.keys():
for mod_name in amino_acid_2_fixed_mod_name[aa]:
fixed_label_mod_addon_names.append(mod_name)
# print(molecule)
if molecule.startswith("+"):
cc_factory.add_chemical_formula(molecule)
elif "#" in molecule:
try:
sequence, modifications = molecule.split("#")
except ValueError:
raise ValueError(f"Invalid Sequence too many # ({molecule})")
cc_factory.use(sequence=sequence, modifications=modifications)
else:
cc_factory.use(sequence=molecule)
if len(fixed_label_mod_addon_names) != 0:
for fixed_mod_name in fixed_label_mod_addon_names:
cc_factory.add_chemical_formula(fixed_mod_lookup[fixed_mod_name])
complete_formula = cc_factory.hill_notation_unimod()
molecule_set.add(molecule)
if molecule_and_mods in tmp_evidences.keys():
if complete_formula not in evidence_lookup.keys():
evidence_lookup[complete_formula] = {}
evidence_lookup[complete_formula][molecule_and_mods] = tmp_evidences[
molecule_and_mods
]
cc_factory.clear()
molecule_list = list(molecule_set)
if return_raw_csv_data:
return (
formatted_fixed_labels,
evidence_lookup,
molecule_list,
csv_raw_data_to_return,
)
else:
return formatted_fixed_labels, evidence_lookup, molecule_list | f01615da155fdea090c49f54049f1e57837c20cf | 25,086 |
import copy
import numpy
def calculateDominantFrequency(signal, fs, fMin = 0, fMax = None, applyWindow = True,
fftZeroPaddingFactor = 1
):
"""
calculates the dominant frequency of the given signal
@param signal input signal
@param fs sampling frequency
@param fMin the minimum frequency [Hz] that should be considered
@param fMax the maximum frequency [Hz] that should be considered. If None
(default), we'll take half the Nyquist frequency.
@param applyWindow if True, we'll apply a HANN window before
calculating the FFT
@param fftZeroPaddingFactor if greater than one, we'll append the
appropriate number of zeros to the signal before calculating the FFT
"""
n = len(signal)
signalTmp = copy.deepcopy(signal)
if applyWindow:
fftWindow = createLookupTable(len(signalTmp), LOOKUP_TABLE_HANN)
signalTmp *= fftWindow
if fftZeroPaddingFactor > 1:
m = int(round(n * fftZeroPaddingFactor))
signalTmp = numpy.append(signalTmp, numpy.zeros(m - n))
spectrumX, spectrumY = calculateFFT(signalTmp, fs, len(signalTmp),
applyWindow = False, convertToDb = True,
spectrumType = AMPLITUDE_SPECTRUM)
binWidth = spectrumX[1] - spectrumX[0]
idx1 = 0
if fMin > 0:
idx1 = int(round(fMin / float(binWidth)))
idx2 = -1
if fMax > 0:
idx2 = int(round(fMax / float(binWidth)))
domFreq = numpy.nan
try:
domFreq, dummy = generalUtility.findArrayMaximum(spectrumY, idx1, idx2, doInterpolate = True)
domFreq *= binWidth
except Exception as e:
pass
# domFreq = None
# eMax = None
# if fMax is None:
# fMax = fs / 2.0
# for i in range(len(spectrumY)):
# f = spectrumX[i]
# if f >= fMin and f <= fMax:
# if domFreq is None:
# domFreq = spectrumX[i]
# eMax = spectrumY[i]
# else:
# if spectrumY[i] > eMax:
# domFreq = spectrumX[i]
# eMax = spectrumY[i]
# print domFreq, domFreq2
return domFreq | ab5f2818d309202f57230197c87c54b67a0f849c | 25,087 |
def check_title(file_path):
"""
return 'has title' if found
no title, None, if not found
file_path is full path with file name and extension
"""
#print('is text file: ', tool.is_utf8_text_file(file_path))
if tool.is_utf8_text_file(file_path):
with open(file_path, 'r') as f:
text = f.read()
head = text[:300]
if tool.has_title(head):
return 'has title'
else:
return 'no title'
pass
pass
return None | 4559772c1e50e807935c6112cfa6001a857b9dc4 | 25,088 |
from typing import Tuple
import torch
def permute_adjacency_twin(t1,t2) -> Tuple[torch.Tensor,torch.Tensor]:
"""
Makes a permutation of two adjacency matrices together. Equivalent to a renaming of the nodes.
Supposes shape (n,n)
"""
n,_ = t1.shape
perm = torch.randperm(n)
return t1[perm,:][:,perm],t2[perm,:][:,perm] | df3dc6507b8eae9d148ec9b2e664a427813d93a7 | 25,089 |
from collections import defaultdict,deque
def rad_extract(eventfiles,center,radius_function,return_cols=['PULSE_PHASE'],cuts=None,apply_GTI=True,theta_cut=66.4,zenith_cut=105,return_indices=False):
""" Extract events with a radial cut.
Return specified columns and perform additional boolean cuts.
Return is in form of a dictionary whose keys are column names
(and 'DIFFERENCES') and values are numpy arrays with the column
values. These will have been concatenated if there are multiple
FT1 files.
========= =======================================================
Argument Description
========= =======================================================
eventfiles -- a list of FT1 filenames
center -- a SkyDir giving the center of the radial cut
radius_function -- can be either a float specifying a cookier cutter
radial cut, or a function taking as arguments the energy
and event_class and speciying the radius in degrees, e.g.
def radius(energy,event_class):
return numpy.where(event_class,2,1)*(energy/1000)**-0.75
========= =======================================================
Keyword Description
========= =======================================================
return_cols ['RA','DEC','ENERGY','EVENT_CLASS','PULSE_PHASE'] -
a list of FT1 column names to return
cuts None - an optional list of boolean cuts to apply,
e.g., ['ENERGY > 100']
NB -- cuts not yet implemented!!
no_cuts [False] do not apply default zenith and incidence angle cuts
apply_GTI [True] accept or reject an event based on GTI if True;
else ignore GTI
return_indices [False] if True, return an array giving the index in the
original file of each event; obviously only useful in the
case of a single event file
========= =======================================================
"""
if not hasattr(radius_function,'__call__'):
simple_scalar = True
rval = radius_function
radius_function = lambda e,event_class: rval
else:
simple_scalar = False
eventfiles = __FITS_parse__(eventfiles)
coldict = defaultdict(deque)
cols = {}
cut_cols = ['ZENITH_ANGLE','THETA','TIME']
keys = list(set(['RA','DEC','ENERGY','CONVERSION_TYPE']+cut_cols+return_cols))
accepted = 0
total = 0
for eventfile in eventfiles:
#e = pf.open(eventfile,memmap=1)
#nrows = e[1].data.shape[0]
#e.close()
nrows = pyfits.getheader(eventfile,'EVENTS')['NAXIS2']
for key in keys:
cols[key] = np.empty(nrows,dtype=float)
PythonUtilities.get_float_col(cols[key],eventfile,'EVENTS',key)
rad = radius_function(cols['ENERGY'],cols['CONVERSION_TYPE'])
tmask = trap_mask(cols['RA'],cols['DEC'],center,rad)
tmask &= (cols['ZENITH_ANGLE'] < zenith_cut) & (cols['THETA'] < theta_cut)
if apply_GTI:
tmask &= get_gti_mask(eventfile,cols['TIME'])
print ('GTI will remove %d of %d photons.'%((~tmask).sum(),len(tmask)))
if simple_scalar:
rmask,diffs = rad_mask(cols['RA'][tmask],cols['DEC'][tmask],center,rad)
else:
rmask,diffs = rad_mask(cols['RA'][tmask],cols['DEC'][tmask],center,rad[tmask])
for key in keys:
coldict[key].append(cols[key][tmask][rmask])
if return_indices:
if 'EVENT_INDICES' not in return_cols:
return_cols.append('EVENT_INDICES')
coldict['EVENT_INDICES'].append(np.arange(len(tmask))[tmask][rmask])
coldict['DIFFERENCES'].append(diffs)
accepted += tmask.sum()
total += len(tmask)
for key in coldict.keys():
if (key in cut_cols) and not (key in return_cols):
cols.pop(key)
continue
cols[key] = np.concatenate([x for x in coldict[key]])
if key in INT_TYPES: cols[key] = cols[key].astype(int)
print ('Cuts removed %d of %d photons.'%(total-accepted,total))
return cols | bb0a5f96764c0a1edec1f408f283a2473ed630bf | 25,090 |
import distutils
def strtobool(value):
"""Cast a string to a bool."""
if value is None:
return None
if type(value) is bool:
return value
return distutils.util.strtobool(value) | 57cb071725959072fe478c44be130709a0ebf8f9 | 25,091 |
import re
def list_to_exp(str_list, term_padding_exp=r'\b', compile=True):
"""
Returns a regular expression (compiled or not) that will catch any of the strings of the str_list.
Each string of the str_list will be surrounded by term_padding_exp (default r'\b' forces full word matches).
Note: Also orders the strings according to length so that no substring will overshadow a superstring.
"""
str_list = util_ulist.sort_as(str_list, list(map(len, str_list)), reverse=True)
exp = term_padding_exp + '(' + '|'.join(str_list) + ')' + term_padding_exp
if compile:
return re.compile(exp)
else:
return exp | f9a1d7002a36f0348179b9997c5dec672455f077 | 25,092 |
def prepare_ddp_loader(loader: DataLoader, num_processes: int, process_index: int) -> DataLoader:
"""
Transfers loader to distributed mode. Experimental feature.
Args:
loader: pytorch dataloder
num_processes (:obj:`int`, `optional`, defaults to 1):
The number of processes running concurrently.
process_index (:obj:`int`, `optional`, defaults to 0):
The index of the current process.
Returns:
DataLoader: pytorch dataloder with distributed batch sampler.
"""
ddp_dataset = loader.dataset
# Iterable dataset doesn't like batch_sampler, but DataLoader creates a default one for it
if isinstance(ddp_dataset, IterableDataset):
ddp_batch_sampler = None
else:
ddp_batch_sampler = BatchSamplerShard(
loader.batch_sampler,
num_processes=num_processes,
process_index=process_index,
)
# We ignore all of those since they are all dealt with by our new_batch_sampler
ignore_kwargs = [
"batch_size",
"shuffle",
"sampler",
"batch_sampler",
"drop_last",
"generator",
]
kwargs = {
k: getattr(loader, k, _PYTORCH_DATALOADER_KWARGS[k])
for k in _PYTORCH_DATALOADER_KWARGS
if k not in ignore_kwargs
}
# Need to provide batch_size as batch_sampler is None for Iterable dataset
if ddp_batch_sampler is None:
kwargs["drop_last"] = loader.drop_last
kwargs["batch_size"] = loader.batch_size
loader = DataLoader(dataset=ddp_dataset, batch_sampler=ddp_batch_sampler, **kwargs)
return loader | 4f57b1888fdf43fcb910d802faee8ba997ee095f | 25,093 |
import logging
def __validate_exchange(value: str) -> str:
"""
Check to see if passed string is in the list of possible Exchanges.
:param value: Exchange name.
:return: Passed value or No Return
"""
valid_values = EXCHANGE_VALUES
if value in valid_values:
return value
else:
logging.error(
f"Invalid exchange value: {value}. Valid options: {valid_values}"
) | 001472e1485da0fc410dceafa67b78fe5dfe1058 | 25,094 |
def main(content, title="", classes=[]):
"""Generate a 'Material for MkDocs' admonition.
"""
md = markdown.markdown(content)
return '<div class="admonition {0}">\n'.format(" ".join(classes)) + \
' <p class="admonition-title">{0}</p>\n'.format(title) + \
' <p>{0}</p>\n'.format(md) + \
'</div>' | e29942de52b73d8652a54c64dd22c8bac6e8496c | 25,095 |
def get_bprop_matrix_set_diag(self):
"""Generate bprop for MatrixSetDiag"""
get_dtype = P.DType()
def bprop(x, y, z, out, dout):
input_shape = F.shape(x)
batch_shape = input_shape[:-2]
matrix_shape = input_shape[-2:]
diag_shape = batch_shape + (_get_min(matrix_shape),)
grad_shape = F.shape(dout)
grad_dtype = get_dtype(dout)
assist = _get_matrix_diag_part_assist(grad_shape, grad_dtype)
dx = inner.MatrixSetDiag()(dout, P.Zeros()(diag_shape, grad_dtype), assist)
dy = inner.MatrixDiagPart()(dout, assist)
dz = zeros_like(z)
return dx, dy, dz
return bprop | c35f69a957b30bcefeba858e7e9bd4ee9e4591b8 | 25,096 |
from yt import load_particles
def fake_sph_grid_ds(hsml_factor=1.0):
"""Returns an in-memory SPH dataset useful for testing
This dataset should have 27 particles with the particles arranged uniformly
on a 3D grid. The bottom left corner is (0.5,0.5,0.5) and the top right
corner is (2.5,2.5,2.5). All particles will have non-overlapping smoothing
regions with a radius of 0.05, masses of 1, and densities of 1, and zero
velocity.
"""
npart = 27
x = np.empty(npart)
y = np.empty(npart)
z = np.empty(npart)
tot = 0
for i in range(0, 3):
for j in range(0, 3):
for k in range(0, 3):
x[tot] = i + 0.5
y[tot] = j + 0.5
z[tot] = k + 0.5
tot += 1
data = {
"particle_position_x": (x, "cm"),
"particle_position_y": (y, "cm"),
"particle_position_z": (z, "cm"),
"particle_mass": (np.ones(npart), "g"),
"particle_velocity_x": (np.zeros(npart), "cm/s"),
"particle_velocity_y": (np.zeros(npart), "cm/s"),
"particle_velocity_z": (np.zeros(npart), "cm/s"),
"smoothing_length": (0.05 * np.ones(npart) * hsml_factor, "cm"),
"density": (np.ones(npart), "g/cm**3"),
"temperature": (np.ones(npart), "K"),
}
bbox = np.array([[0, 3], [0, 3], [0, 3]])
return load_particles(data=data, length_unit=1.0, bbox=bbox) | 9f32616d325fde7941cbcea814b3133fbcc988e5 | 25,097 |
async def _async_get_image_sessions(device: Device) -> dict[str, ImageSession]:
"""Return image events for the device."""
events = await device.event_media_manager.async_image_sessions()
return {e.event_token: e for e in events} | 4406abc1ac08d39bb0127be1d02f5c664c167e04 | 25,098 |
def make_element_weight_parser(weight_column):
""" Parameterize with the column - this allows us
to generate data from different analysis result types.
"""
def parse_element_weight(csv_row):
name = csv_row[0]
weight = float(csv_row[weight_column]) # Assert not zero?
return name, weight
return parse_element_weight | ddc3a4f82ecd0fe4833683759b1a1c4296839a54 | 25,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.