content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
from datetime import datetime
def _read_housekeeping(fname):
"""Reads housekeeping file (fname; csv-format) returns a pandas data frame instance."""
print('reading %s'%fname)
try:
df = pd.read_csv(fname, error_bad_lines=False)
except ValueError:
return False
# data = df.values
# dateString = fname.split('_')[0]
dt = datetime.datetime.strptime('19700101', "%Y%m%d") - datetime.datetime.strptime('19040101', "%Y%m%d")
dts = dt.total_seconds()
# todo: (low) what is that delta t for, looks fishi (Hagen)
dtsPlus = datetime.timedelta(hours=0).total_seconds()
# Time_s = data[:,0]
# data = data[:,1:]
df.index = pd.Series(pd.to_datetime(df.Time_s-dts-dtsPlus, unit = 's'), name = 'Time_UTC')
# if 'P_Baro' in df.keys():
# df['barometric_pressure'] = df.P_Baro
# df.drop('P_Baro', 1, inplace=True)
# df['altitude'] = ct.p2h(df.barometric_pressure)
return POPSHouseKeeping(df) | b41a492f497a9a3220d52968d2a94466c7973673 | 31,300 |
import math
def get_line_equation(segment_point0, segment_point1):
"""
Ax + By + C = 0
:param segment_point0: Point
:param segment_point1:
:return: A, B, C
"""
x0, y0 = segment_point0.px, segment_point0.py
x1, y1 = segment_point1.px, segment_point1.py
a, b, c = y1 - y0, x0 - x1, x1 * y0 - y1 * x0
d = math.sqrt(a * a + b * b)
a, b, c = a / d, b / d, c / d
return a, b, c | 9e0b35f2cac4c7a5835755878fd8aa5d32735699 | 31,301 |
def keep_lesser_x0_y0_zbt0_pair_in_dict(p, p1, p2):
"""Defines x0, y0, and zbt0 based on the group associated with the
lowest x0. Thus the new constants represent the point at the left-most
end of the combined plot.
:param p: plot to combine p1 and p2 into
:param p1: 1st plot to combine
:param p2: 2nd plot to combine
:return: p, after its const_dict has been updated
"""
const_dict = p[3]
cd1, cd2 = p1[3], p2[3]
if 'x0' in cd1 and 'x0' in cd2:
if cd2['x0'] < cd1['x0']:
const_dict['x0'] = cd2['x0']
const_dict['y0'] = cd2['y0'] if 'y0' in cd2 else None
const_dict['zbt0'] = cd2['zbt0'] if 'zbt0' in cd2 else None
else:
const_dict['x0'] = cd1['x0']
const_dict['y0'] = cd1['y0'] if 'y0' in cd1 else None
const_dict['zbt0'] = cd1['zbt0'] if 'zbt0' in cd1 else None
p = p[0:3] + (const_dict,)
return p | 4dc7c008e86606b4257980f59b12fc6a183e060f | 31,302 |
from typing import List
from typing import Dict
from typing import Optional
def build_csv_from_cellset_dict(
row_dimensions: List[str],
column_dimensions: List[str],
raw_cellset_as_dict: Dict,
top: Optional[int] = None,
line_separator: str = "\r\n",
value_separator: str = ",",
include_attributes: bool = False) -> str:
""" transform raw cellset data into concise dictionary
:param column_dimensions:
:param row_dimensions:
:param raw_cellset_as_dict:
:param top: Maximum Number of cells
:param line_separator:
:param value_separator:
:param include_attributes: include attribute columns
:return:
"""
cells = raw_cellset_as_dict['Cells']
# empty cellsets produce "" in order to be compliant with previous implementation that used `/Content` API endpoint
if len(cells) == 0:
return ""
lines = list()
column_axis, row_axis, _ = extract_axes_from_cellset(raw_cellset_as_dict=raw_cellset_as_dict)
headers = _build_headers_for_csv(row_axis, column_axis, row_dimensions, column_dimensions, include_attributes)
lines.append(value_separator.join(headers))
for ordinal, cell in enumerate(cells[:top or len(cells)]):
# if skip is used in execution we must use the original ordinal from the cell, if not we can simply enumerate
ordinal = cell.get("Ordinal", ordinal)
line = []
if column_axis and row_axis:
index_rows = ordinal // column_axis['Cardinality'] % row_axis['Cardinality']
index_columns = ordinal % column_axis['Cardinality']
line_items = _build_csv_line_items_from_axis_tuple(
members=row_axis['Tuples'][index_rows]['Members'],
include_attributes=include_attributes)
line.extend(line_items)
line_items = _build_csv_line_items_from_axis_tuple(
members=column_axis['Tuples'][index_columns]['Members'],
include_attributes=include_attributes)
line.extend(line_items)
elif column_axis:
index_rows = ordinal % column_axis['Cardinality']
line_items = _build_csv_line_items_from_axis_tuple(
members=column_axis['Tuples'][index_rows]['Members'],
include_attributes=include_attributes)
line.extend(line_items)
line.append(str(cell["Value"] or ""))
lines.append(value_separator.join(line))
return line_separator.join(lines) | b6f40a97f14da3c37d63b6bfd545dc95fa61240e | 31,303 |
def get_stages_from_api(**kwargs):
"""
This is the API method, called by the appConfig.instantiate method
"""
resp = utils.request(utils.RETRIEVE, 'stages', kwargs)
return utils.parse(resp) | 03e6ae52b0e3e18bd107b5bf0069ccaa6c01b322 | 31,304 |
import numpy
import numba
def fill_str_array(data, size, push_back=True):
"""
Fill StringArrayType array with given values to reach the size
"""
string_array_size = len(data)
nan_array_size = size - string_array_size
num_chars = sdc.str_arr_ext.num_total_chars(data)
result_data = sdc.str_arr_ext.pre_alloc_string_array(size, num_chars)
# Keep NaN values of initial array
arr_is_na_mask = numpy.array([sdc.hiframes.api.isna(data, i) for i in range(string_array_size)])
data_str_list = sdc.str_arr_ext.to_string_list(data)
nan_list = [''] * nan_array_size
result_list = data_str_list + nan_list if push_back else nan_list + data_str_list
cp_str_list_to_array(result_data, result_list)
# Batch=64 iteration to avoid threads competition
batch_size = 64
if push_back:
for i in numba.prange(size//batch_size + 1):
for j in range(i*batch_size, min((i+1)*batch_size, size)):
if j < string_array_size:
if arr_is_na_mask[j]:
str_arr_set_na(result_data, j)
else:
str_arr_set_na(result_data, j)
else:
for i in numba.prange(size//batch_size + 1):
for j in range(i*batch_size, min((i+1)*batch_size, size)):
if j < nan_array_size:
str_arr_set_na(result_data, j)
else:
str_arr_j = j - nan_array_size
if arr_is_na_mask[str_arr_j]:
str_arr_set_na(result_data, j)
return result_data | 5dc586a7334bdae73145574fa9afb2f939f1808e | 31,305 |
def _visible_fields(user_profile, user, configuration=None):
"""
Return what fields should be visible based on user's preferences
:param user_profile: User profile object
:param user: User object
:param configuration: A visibility configuration dictionary.
:return: whitelist List of fields to be shown
"""
if not configuration:
configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
profile_visibility = get_profile_visibility(user_profile, user, configuration)
if profile_visibility == ALL_USERS_VISIBILITY:
return configuration.get('bulk_shareable_fields')
elif profile_visibility == CUSTOM_VISIBILITY:
return _visible_fields_from_custom_preferences(user, configuration)
else:
return configuration.get('public_fields') | 43e9b0f03ebee891681a6c3cf7892c5dab36e5f0 | 31,306 |
def open_dataframe():
"""
Function to open the dataframe if it exists, or create a new one if it does not
:return: Dataframe
"""
print("Checking for presence of data file......")
try:
datafile = './data/data.csv'
dataframe = pd.read_csv(datafile)
print("File found.... loading into dataframe")
return dataframe
except IOError:
if input("File not found: Create new file? (Y/N)").lower() == 'y':
initial = [Achievement('category field', 'team involved',
'description of achievement', 'context of the report')]
dataframe = pd.DataFrame([vars(t) for t in initial])
dataframe.to_csv('./data/data.csv', index=False)
return dataframe
return dataframe | 8f6bbed1e57df7a1567863c4ecd3bc4656901727 | 31,307 |
import logging
def parse_manifest_v3(manif, manifest_info, opts):
"""
parse IIIF V3 manifest for annotations
"""
annotation_info = manifest_info['annotations']
canvas_ids = set()
annolist_idx = 0
if manif.get('type', None) != 'Manifest':
raise ValueError("Manifest not of type Manifest")
manif_id = manif.get('id', None)
logging.debug(f"manifest id: {manif_id}")
if not manif_id:
raise ValueError("Manifest has no id")
manif_label = manif.get('label', None)
logging.debug(f"manifest label: {manif_label}")
if not manif_label:
raise ValueError("Manifest has no label")
manif_items = manif.get('items', None)
if manif_items is None or not isinstance(manif_items, list):
raise ValueError("Manifest has no items")
logging.debug(f"manifest has {len(manif_items)} items")
for canvas in manif_items:
canvas_id = canvas.get('id', None)
logging.debug(f"canvas id: {canvas_id}")
if not canvas_id:
raise ValueError("Canvas has no id")
if canvas.get('type', None) != 'Canvas':
raise ValueError(f"Canvas {canvas_id} not of type Canvas")
canvas_label = canvas.get('label', None)
logging.debug(f"canvas label: {canvas_label}")
if not canvas_label:
raise ValueError(f"Canvas {canvas_id} has no label")
canvas_items = canvas.get('items', None)
if canvas_items is None or not isinstance(canvas_items, list):
raise ValueError(f"Canvas {canvas_id} has no items")
canvas_ids.add(canvas_id)
canvas_annos = canvas.get('annotations', None)
if opts['mode'] == 'read':
#
# read mode: record canvas id and annotations
#
canvas_ids.add(canvas_id)
if canvas_annos is None or not isinstance(canvas_annos, list):
# no annotationpages
continue
for annolist in canvas_annos:
parse_annotationlist_v3(annolist, annotation_info)
elif opts['mode'] == 'insert':
#
# insert mode
#
annotations = annotation_info['by_target'].get(canvas_id, None)
if annotations is None:
continue
annolist_idx += 1
annolist_id, annolist_fn = create_annotationlist_id(manifest_info, canvas_id, annolist_idx, opts)
if opts['reference_mode'] == 'inline':
annolist = create_annotationlist_v3(manifest_info, annolist_id, annotations, add_context=False)
canvas['annotations'] = [annolist]
else:
annolist = create_annotationlist_v3(manifest_info, annolist_id, annotations, add_context=True)
save_json(annolist, annolist_fn, opts)
canvas['annotations'] = [{
'id': annolist_id,
'type': 'AnnotationPage'
}]
if opts['mode'] == 'read':
manifest_info = {
'manifest_version': 3,
'id': manif_id,
'label': manif_label,
'canvas_ids': canvas_ids,
'annotations': annotation_info,
'manifest': manif
}
elif opts['mode'] == 'insert':
manif['id'] = manifest_info['id']
manifest_info['manifest'] = manif
return manifest_info | 6596af85090cb6f1b9908f6b6e34c0b02b216fae | 31,308 |
import fnmatch
def zipglob(sfiles, namelist, path):
"""Returns a subset of filtered namelist"""
files = []
# cycle the sfiles
for sfile in sfiles:
# we will create a list of existing files in the zip filtering them
# by the sfile filename
sfile.zfiles = fnmatch.filter(namelist, join(path, sfile.filename))
files += sfile.zfiles
return files | 818e9a7598ba0827616061bbfed80e345d1e22a5 | 31,309 |
def to_string(result: ValidationResult, name_col_width: int) -> str:
"""Format a validation result for printing."""
name = state_name(result.state)
if result.failed:
msg = ", ".join(result.error_details.strip().split("\n"))
return f"❌ {name} {msg}"
elif result.state.reward is None:
return f"✅ {name}"
else:
return f"✅ {name:<{name_col_width}} {result.state.reward:9.4f}" | 263e327e053e6aee06a936b24eaabc2dd9ef028a | 31,310 |
def two_sum_v1(array, target):
"""
For each element, find the complementary value and check if this second value is in the list.
Complexity: O(n²)
"""
for indice, value in enumerate(array):
second_value = target - value
# Complexity of in is O(n). https://stackoverflow.com/questions/13884177/complexity-of-in-operator-in-python
if second_value in array:
return [indice, array.index(second_value)]
else:
return None | 0dcc3b4a10ac4c04cabd4ab09a9e71f739455f55 | 31,311 |
def worker_numric_avg(fleet, value, env="mpi"):
"""R
"""
return worker_numric_sum(fleet, value, env) / fleet.worker_num() | 9906fb0c35b718a9da6c8d6d0e0a5a85da5cf28d | 31,312 |
from typing import List
from typing import Tuple
from typing import Dict
def build_graph(
nodes: List[Tuple[str, Dict]], edges: List[Tuple[str, str, Dict]]
) -> nx.DiGraph:
"""Builds the graph using networkx
Arguments
---------
nodes : list
A list of node tuples
edges : list
A list of edge tuples
Returns
-------
networkx.DiGraph
A directed graph representing the reference energy system
"""
graph = nx.DiGraph()
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
return graph | 0d0bbbfa96ddd5c170a2ec7e9fb06b964b997dd3 | 31,313 |
def table_exists_sql(any_schema=False):
"""SQL to check for existence of a table. Note that for temp tables, any_schema should be set to True."""
if not any_schema:
schema_filter_sql = sql.SQL('AND schemaname = current_schema()')
else:
schema_filter_sql = sql.SQL('')
return sql.SQL("""SELECT EXISTS (SELECT 1
FROM pg_tables
WHERE tablename = %s
{schema_filter_sql})""").format(schema_filter_sql=schema_filter_sql) | 2ea073d26705f218d2929c7a419ef61a05c4cced | 31,314 |
def _process_json(data):
"""
return a list of GradPetition objects.
"""
requests = []
for item in data:
petition = GradPetition()
petition.description = item.get('description')
petition.submit_date = datetime_from_string(item.get('submitDate'))
if 'decisionDate' in item and item.get('decisionDate') is not None:
petition.decision_date = datetime_from_string(
item.get('decisionDate'))
else:
petition.decision_date = None
if item.get('deptRecommend') is not None and\
len(item.get('deptRecommend')) > 0:
petition.dept_recommend = item.get('deptRecommend').lower()
if item.get('gradSchoolDecision') is not None and\
len(item.get('gradSchoolDecision')) > 0:
petition.gradschool_decision =\
item.get('gradSchoolDecision').lower()
requests.append(petition)
return requests | 5d381b896cd237b7780f1c048ef3e8fc6dd8bb9a | 31,315 |
def PaddingMask(pad=0):
"""Returns a layer that maps integer sequences to padding masks.
The layer expects as input a batch of integer sequences. The layer output is
an N-D array that marks for each sequence position whether the integer (e.g.,
a token ID) in that position represents padding -- value ``pad`` -- versus
text/content -- all other values. The padding mask shape is
(batch_size, 1, 1, encoder_sequence_length), such that axis 1 will broadcast
to cover any number of attention heads and axis 2 will broadcast to cover
decoder sequence positions.
Args:
pad: Integer that represents padding rather than a token/content ID.
"""
def f(x):
if len(x.shape) != 2:
raise ValueError(
f'Input to PaddingMask must be a 2-D array with shape '
f'(batch_size, sequence_length); instead got shape {x.shape}.')
batch_size = x.shape[0]
sequence_length = x.shape[1]
content_positions = (x != pad)
return content_positions.reshape((batch_size, 1, 1, sequence_length))
return Fn(f'PaddingMask({pad})', f) | 146f4bb6b518b38c007a42ed78c7e0d344070dee | 31,316 |
import yaml
def python_packages():
"""
Reads input.yml and returns a list of python
related packages
"""
with open(r"tests/input.yml") as file:
inputs = yaml.load(file, Loader=yaml.FullLoader)
return inputs["python_packages"] | 91889c21b1553f9b09c451913e658b458c4502d0 | 31,317 |
import asyncio
def create_tcp_visonic_connection(address, port, protocol=VisonicProtocol, command_queue = None, event_callback=None, disconnect_callback=None, loop=None, excludes=None):
"""Create Visonic manager class, returns tcp transport coroutine."""
# use default protocol if not specified
protocol = partial(
protocol,
loop=loop if loop else asyncio.get_event_loop(),
event_callback=event_callback,
disconnect_callback=disconnect_callback,
excludes=excludes,
command_queue = command_queue,
# ignore=ignore if ignore else [],
)
address = address
port = port
conn = loop.create_connection(protocol, address, port)
return conn | 0db9e05db4035caf828d61c91799d3658c61b6e0 | 31,318 |
def metric_wind_dict_to_beaufort(d):
"""
Converts all the wind values in a dict from meters/sec
to the corresponding Beaufort scale level (which is not an exact number but rather
represents a range of wind speeds - see: https://en.wikipedia.org/wiki/Beaufort_scale).
Conversion table: https://www.windfinder.com/wind/windspeed.htm
:param d: the dictionary containing metric values
:type d: dict
:returns: a dict with the same keys as the input dict and values converted
to Beaufort level
"""
result = {}
for key, value in d.items():
if key != 'deg': # do not convert wind degree
if value <= 0.2:
bf = 0
elif 0.2 < value <= 1.5:
bf = 1
elif 1.5 < value <= 3.3:
bf = 2
elif 3.3 < value <= 5.4:
bf = 3
elif 5.4 < value <= 7.9:
bf = 4
elif 7.9 < value <= 10.7:
bf = 5
elif 10.7 < value <= 13.8:
bf = 6
elif 13.8 < value <= 17.1:
bf = 7
elif 17.1 < value <= 20.7:
bf = 8
elif 20.7 < value <= 24.4:
bf = 9
elif 24.4 < value <= 28.4:
bf = 10
elif 28.4 < value <= 32.6:
bf = 11
else:
bf = 12
result[key] = bf
else:
result[key] = value
return result | b26ddb5e9c0423612a9c7086030fd77bbfa371ad | 31,319 |
def add_favorite_clubs():
"""
POST endpoint that adds favorite club(s) for student user. Ordering is preserved
based on *when* they favorited.
"""
user = get_current_user()
json = g.clean_json
new_fav_clubs_query = NewOfficerUser.objects \
.filter(confirmed=True) \
.filter(club__link_name__in=json['clubs']) \
.only('club.link_name')
potential_clubs = [club['club']['link_name'] for club in query_to_objects(new_fav_clubs_query)]
for club in potential_clubs:
if club not in user.favorited_clubs:
user.favorited_clubs += [club]
user.save()
return jsonify(_fetch_user_profile(user)['favorited_clubs']) | 1288e3d579dca54d25883fed4241b6fa1206f7f0 | 31,320 |
def death_rate_60():
"""
Real Name: b'death rate 60'
Original Eqn: b'Critical Cases 60*fraction of death 60/duration of treatment 60'
Units: b'person/Day'
Limits: (None, None)
Type: component
b''
"""
return critical_cases_60() * fraction_of_death_60() / duration_of_treatment_60() | 223990d67fcde9731080e58c7f5ca6ee208c17ff | 31,321 |
from datetime import datetime
def cast_vote(uid, target_type, pcid, value):
""" Casts a vote in a post.
`uid` is the id of the user casting the vote
`target_type` is either `post` or `comment`
`pcid` is either the pid or cid of the post/comment
`value` is either `up` or `down`
"""
# XXX: This function returns api3 objects
try:
user = User.get(User.uid == uid)
except User.DoesNotExist:
return jsonify(msg=_("Unknown error. User disappeared")), 403
if value == "up" or value is True:
voteValue = 1
elif value == "down" or value is False:
voteValue = -1
if user.given < 0:
return jsonify(msg=_('Score balance is negative')), 403
else:
return jsonify(msg=_("Invalid vote value")), 400
if target_type == "post":
target_model = SubPost
try:
target = SubPost.select(SubPost.uid, SubPost.score, SubPost.upvotes, SubPost.downvotes,
SubPost.pid.alias('id'), SubPost.posted)
target = target.where((SubPost.pid == pcid) & (SubPost.deleted == 0)).get()
except SubPost.DoesNotExist:
return jsonify(msg=_('Post does not exist')), 404
if target.deleted:
return jsonify(msg=_("You can't vote on deleted posts")), 400
try:
qvote = SubPostVote.select().where(SubPostVote.pid == pcid).where(SubPostVote.uid == uid).get()
except SubPostVote.DoesNotExist:
qvote = False
elif target_type == "comment":
target_model = SubPostComment
try:
target = SubPostComment.select(SubPostComment.uid, SubPost.sid, SubPostComment.pid, SubPostComment.status,
SubPostComment.score,
SubPostComment.upvotes, SubPostComment.downvotes,
SubPostComment.cid.alias('id'), SubPostComment.time.alias('posted'))
target = target.join(SubPost).where(SubPostComment.cid == pcid).where(SubPostComment.status.is_null(True))
target = target.objects().get()
except SubPostComment.DoesNotExist:
return jsonify(msg=_('Comment does not exist')), 404
if target.uid_id == user.uid:
return jsonify(msg=_("You can't vote on your own comments")), 400
if target.status:
return jsonify(msg=_("You can't vote on deleted comments")), 400
try:
qvote = SubPostCommentVote.select().where(SubPostCommentVote.cid == pcid).where(
SubPostCommentVote.uid == uid).get()
except SubPostCommentVote.DoesNotExist:
qvote = False
else:
return jsonify(msg=_("Invalid target")), 400
try:
SubMetadata.get((SubMetadata.sid == target.sid) & (SubMetadata.key == "ban") & (SubMetadata.value == user.uid))
return jsonify(msg=_('You are banned on this sub.')), 403
except SubMetadata.DoesNotExist:
pass
if (datetime.utcnow() - target.posted.replace(tzinfo=None)) > timedelta(days=60):
return jsonify(msg=_("Post is archived")), 400
positive = True if voteValue == 1 else False
undone = False
if qvote:
if bool(qvote.positive) == (True if voteValue == 1 else False):
qvote.delete_instance()
if positive:
upd_q = target_model.update(score=target_model.score - voteValue, upvotes=target_model.upvotes - 1)
else:
upd_q = target_model.update(score=target_model.score - voteValue, downvotes=target_model.downvotes - 1)
new_score = -voteValue
undone = True
User.update(score=User.score - voteValue).where(User.uid == target.uid).execute()
User.update(given=User.given - voteValue).where(User.uid == uid).execute()
else:
qvote.positive = positive
qvote.save()
if positive:
upd_q = target_model.update(score=target_model.score + (voteValue * 2),
upvotes=target_model.upvotes + 1, downvotes=target_model.downvotes - 1)
else:
upd_q = target_model.update(score=target_model.score + (voteValue * 2),
upvotes=target_model.upvotes - 1, downvotes=target_model.downvotes + 1)
new_score = (voteValue * 2)
User.update(score=User.score + (voteValue * 2)).where(User.uid == target.uid).execute()
User.update(given=User.given + voteValue).where(User.uid == uid).execute()
else: # First vote cast on post
now = datetime.utcnow()
if target_type == "post":
sp_vote = SubPostVote.create(pid=pcid, uid=uid, positive=positive, datetime=now)
else:
sp_vote = SubPostCommentVote.create(cid=pcid, uid=uid, positive=positive, datetime=now)
sp_vote.save()
if positive:
upd_q = target_model.update(score=target_model.score + voteValue, upvotes=target_model.upvotes + 1)
else:
upd_q = target_model.update(score=target_model.score + voteValue, downvotes=target_model.downvotes + 1)
new_score = voteValue
User.update(score=User.score + voteValue).where(User.uid == target.uid).execute()
User.update(given=User.given + voteValue).where(User.uid == uid).execute()
if target_type == "post":
upd_q.where(SubPost.pid == target.id).execute()
socketio.emit('threadscore', {'pid': target.id, 'score': target.score + new_score},
namespace='/snt', room=target.id)
socketio.emit('yourvote',
{'pid': target.id, 'status': voteValue if not undone else 0, 'score': target.score + new_score},
namespace='/snt',
room='user' + uid)
else:
upd_q.where(SubPostComment.cid == target.id).execute()
socketio.emit('uscore', {'score': target.uid.score + new_score},
namespace='/snt', room="user" + target.uid_id)
return jsonify(score=target.score + new_score, rm=undone) | 702622b91612c1b9636c16786c76c1c711cf7520 | 31,322 |
def convert_file(ifn: str, ofn: str, opts: Namespace) -> bool:
"""
Convert ifn to ofn
:param ifn: Name of file to convert
:param ofn: Target file to convert to
:param opts: Parameters
:return: True if conversion is successful
"""
if ifn not in opts.converted_files:
out_json = to_r4(opts.in_json, opts.fhirserver, opts.addcontext)
with open(ofn, "w") as outf:
outf.write(as_json(out_json))
opts.converted_files.append(ifn)
return True | 963a3bdc4b5fa48295230e183ee99fd4b3f79b22 | 31,323 |
def target_validation(target_name, action):
"""
Given a Target name and an action, determine if the target_name is a valid
target in target.json and if the target supports the action.
Parameters
----------
target_name : str
Name of the Target.
action : str
Type of action the API is looking to perform on the Target
Returns
-------
True if the validation passes.
Raises a custom ValidationException error if validation fails.
"""
json_data = read_file('presqt/specs/targets.json', True)
for data in json_data:
if data['name'] == target_name:
if data["supported_actions"][action] is False:
raise PresQTValidationError(
"PresQT Error: '{}' does not support the action '{}'.".format(target_name, action),
status.HTTP_400_BAD_REQUEST)
return True, data['infinite_depth']
else:
raise PresQTValidationError(
"PresQT Error: '{}' is not a valid Target name.".format(target_name), status.HTTP_404_NOT_FOUND) | c2f8015856f154c16fbcae29f3ed931c3a4d8f73 | 31,324 |
import hashlib
def _writechecksummanifest(fn: str, prefixlen: int, bac: dict) -> tuple[str, int, int]:
"""Write an AIP "checksum manifest".
This writes an AIP "checksum manifest" to the given ``fn`` PDS filename, stripping ``prefixlen``
characters off paths, and using information from the ``bac``. Return a triple of the MD5
of the manifest, its size in bytes, and a count of the number of entries in it.
"""
hashish, size, count = hashlib.new("md5"), 0, 0
with open(fn, "wb") as o:
for files in bac.values():
for f in files:
entry = f"{f.md5}\t{f.url[prefixlen:]}\r\n".encode("utf-8")
o.write(entry)
hashish.update(entry)
size += len(entry)
count += 1
if count % _progresslogging == 0:
_logger.debug("⏲ Wrote %d entries into the checksum manifest %s", count, fn)
_logger.info("📄 Wrote AIP checksum manifest %s with %d entries", fn, count)
return hashish.hexdigest(), size, count | ac5b960e0afae10a15ffccdb5fbc3e6743ad085f | 31,325 |
def bartletts_formula(acf_array, n):
"""
Computes the Standard Error of an acf with Bartlet's formula
Read more at: https://en.wikipedia.org/wiki/Correlogram
:param acf_array: (array) Containing autocorrelation factors
:param n: (int) Length of original time series sequence.
"""
# The first value has autocorrelation with it self. So that values is skipped
se = np.zeros(len(acf_array) - 1)
se[0] = 1 / np.sqrt(n)
se[1:] = np.sqrt((1 + 2 * np.cumsum(acf_array[1:-1]**2)) / n )
return se | d207695a59d1b1c968f2e3877edbee3ce97f1604 | 31,326 |
def AddEnum(idx, name, flag):
"""
Add a new enum type
@param idx: serial number of the new enum.
If another enum with the same serial number
exists, then all enums with serial
numbers >= the specified idx get their
serial numbers incremented (in other words,
the new enum is put in the middle of the list of enums).
If idx >= GetEnumQty() or idx == -1
then the new enum is created at the end of
the list of enums.
@param name: name of the enum.
@param flag: flags for representation of numeric constants
in the definition of enum.
@return: id of new enum or BADADDR
"""
if idx < 0:
idx = idx & SIZE_MAX
return idaapi.add_enum(idx, name, flag) | 1b5a713380c1b79e1bc26e1300e36adbcc7ceb8e | 31,327 |
from typing import Optional
from typing import Tuple
from typing import Union
def get_turbine_shadow_polygons(blade_length: float,
blade_angle: Optional[float],
azi_ang: float,
elv_ang: float,
wind_dir,
tower_shadow: bool = True
) -> Tuple[Union[None, Polygon, MultiPolygon], float]:
"""
Calculates the (x, y) coordinates of a wind turbine's shadow, which depends on the sun azimuth and elevation.
The dimensions of the tower and blades are in fixed ratios to the blade_length. The blade angle is the degrees from
z-axis, whereas the wind direction is where the turbine is pointing towards (if None, north is assumed).
In spherical coordinates, blade angle is phi and wind direction is theta, with 0 at north, moving clockwise.
The output shadow polygon is relative to the turbine located at (0, 0).
:param blade_length: meters, radius in spherical coords
:param blade_angle: degrees from z-axis, or None to use ellipse as swept area
:param azi_ang: azimuth degrees, clockwise from north as 0
:param elv_ang: elevation degrees, from x-y plane as 0
:param wind_dir: degrees from north, clockwise, determines which direction rotor is facing
:param tower_shadow: if false, do not include the tower's shadow
:return: (shadow polygon, shadow angle from north) if shadow exists, otherwise (None, None)
"""
# "Shadow analysis of wind turbines for dual use of land for combined wind and solar photovoltaic power generation":
# the average tower_height=2.5R; average tower_width=R/16; average blade_width=R/16
blade_width = blade_length / 16
tower_height = 2.5 * blade_length
tower_width = blade_width
# get shadow info
sun_elv_rad = np.radians(elv_ang)
tan_elv_inv = np.tan(sun_elv_rad) ** -1
shadow_ang = azi_ang - 180.0
if not wind_dir:
wind_dir = 0
if elv_ang <= 0.0:
shadow_ang = np.nan
if shadow_ang < 0.0:
shadow_ang += 360.0
shadow_tower_length = tower_height * tan_elv_inv
if shadow_tower_length <= 0.0:
shadow_tower_length = np.nan
theta = np.radians(shadow_ang)
if np.isnan(shadow_tower_length) or np.isnan(theta):
return None, None
shadow_length_blade_top = (tower_height + blade_length) * tan_elv_inv
shadow_length_blade_bottom = (tower_height - blade_length) * tan_elv_inv
shadow_height_blade = shadow_length_blade_top - shadow_length_blade_bottom
shadow_width_blade = blade_length * abs(np.cos(np.radians(shadow_ang - wind_dir)))
# calculate the tower shadow position
tower_dx = tower_width / 2.0
tower_dy = shadow_tower_length
theta_left = np.radians(shadow_ang - 90)
theta_right = np.radians(shadow_ang + 90)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
base_left_x, base_left_y = tower_dx * np.sin(theta_left), tower_dx * np.cos(theta_left)
base_rght_x, base_rght_y = tower_dx * np.sin(theta_right), tower_dx * np.cos(theta_right)
top_rght_x, top_rght_y = tower_dy * sin_theta + base_rght_x, tower_dy * cos_theta + base_rght_y
top_left_x, top_left_y = tower_dy * sin_theta + base_left_x, tower_dy * cos_theta + base_left_y
if tower_shadow:
turbine_shadow = Polygon(((base_left_x, base_left_y),
(base_rght_x, base_rght_y),
(top_rght_x, top_rght_y),
(top_left_x, top_left_y)))
else:
turbine_shadow = Polygon()
# calculate the blade shadows of swept area using parametric eq of general ellipse
radius_x = shadow_width_blade
radius_y = shadow_height_blade / 2
center_x = tower_dy * sin_theta
center_y = tower_dy * cos_theta
rot_ang = 360 - shadow_ang + 90
rotation_theta = np.radians(rot_ang)
if blade_angle is None:
degs = np.linspace(0, 2 * np.pi, 50)
x, y = blade_pos_of_rotated_ellipse(radius_y, radius_x, rotation_theta, degs, center_x, center_y)
turbine_shadow = cascaded_union([turbine_shadow, Polygon(zip(x, y))])
else:
turbine_blade_angles = (blade_angle, blade_angle + 120, blade_angle - 120)
for blade_angle in turbine_blade_angles:
blade_theta = np.radians(blade_angle - 90)
x, y = blade_pos_of_rotated_ellipse(radius_y, radius_x, rotation_theta, blade_theta, center_x, center_y)
blade_1_dr = np.radians(blade_angle + 90)
blade_2_dr = np.radians(blade_angle - 90)
blade_tip_left_x, blade_tip_left_y = tower_dx * np.cos(blade_1_dr) + center_x, \
tower_dx * np.sin(blade_1_dr) + center_y
blade_tip_rght_x, blade_tip_rght_y = tower_dx * np.cos(blade_2_dr) + center_x, \
tower_dx * np.sin(blade_2_dr) + center_y
blade_base_rght_x, blade_base_rght_y = tower_dx * np.cos(blade_2_dr) + x, \
tower_dx * np.sin(blade_2_dr) + y
blade_base_left_x, blade_base_left_y = tower_dx * np.cos(blade_1_dr) + x, \
tower_dx * np.sin(blade_1_dr) + y
turbine_shadow = cascaded_union([turbine_shadow, Polygon(((blade_tip_left_x, blade_tip_left_y),
(blade_tip_rght_x, blade_tip_rght_y),
(blade_base_rght_x, blade_base_rght_y),
(blade_base_left_x, blade_base_left_y)))])
return turbine_shadow, shadow_ang | c3d568d60325a8309a3305b871943b55f8959f41 | 31,328 |
def str_igrep(S, strs):
"""Returns a list of the indices of the strings wherein the substring S
is found."""
return [i for (i,s) in enumerate(strs) if s.find(S) >= 0]
#return [i for (s,i) in zip(strs,xrange(len(strs))) if s.find(S) >= 0] | bae8afdb7d0da4eb8384c06e9f0c9bc3f6a31242 | 31,329 |
def random_laplace(shape, loc=0.0, scale=1.0, dtype=tf.float32, seed=None):
"""
Helper function to sample from the Laplace distribution, which is not
included in core TensorFlow.
"""
z1 = random_exponential(shape, loc, dtype=dtype, seed=seed)
z2 = random_exponential(shape, scale, dtype=dtype, seed=seed)
return z1 - z2 | 77c2df0bacfcf2ec07f137def93e2a9429d968ca | 31,330 |
import math
def resample_image(img_in, width_in, height_in, width_out, interpolation_method="bilinear"):
"""
Resample (i.e., interpolate) an image to new dimensions
:return resampled image, new height
"""
img_out = []
scale = float(width_out) / float(width_in)
scale_inv = 1.0 / scale
# print "Resampling scale and scale_inv: {}, {}".format(scale, scale_inv)
height_out = int(height_in * scale)
# print "Image dimensions resampled: {} R x {} C".format(height_out, width_out)
if interpolation_method == "nearest_neighbor":
for ro in xrange(0, height_out):
for co in xrange(0, width_out):
ri = int(round(float(ro) * scale_inv))
ci = int(round(float(co) * scale_inv))
px_nn = img_in[ri * width_in + ci]
img_out.append(px_nn)
elif interpolation_method == "bilinear":
for ro in xrange(0, height_out):
for co in xrange(0, width_out):
ri_flt = float(ro) * scale_inv
ri_flr = int(math.floor(ri_flt))
ri_cln = int(math.ceil(ri_flt))
if ri_cln == ri_flr:
ri_cln += 1
ci_flt = float(co) * scale_inv
ci_flr = int(math.floor(ci_flt))
ci_cln = int(math.ceil(ci_flt))
if ci_cln == ci_flr:
ci_cln += 1
top = float(img_in[ri_flr * width_in + ci_flr]) * (ci_cln - ci_flt) \
+ float(img_in[ri_flr * width_in + ci_cln]) * (ci_flt - ci_flr)
bot = float(img_in[ri_cln * width_in + ci_flr]) * (ci_cln - ci_flt) \
+ float(img_in[ri_cln * width_in + ci_cln]) * (ci_flt - ci_flr)
center = top * (ri_cln - ri_flt) + bot * (ri_flt - ri_flr)
px_bl = int(round(center))
img_out.append(px_bl)
else:
raise ValueError("Invaliid interpolation method: ".format(interpolation_method))
return img_out, height_out | 4d9759c02749cab30244326d3da7cf7c6c48fe46 | 31,331 |
def identify_missing(df=None, na_values=['n/a', 'na', '--', '?']):
"""Detect missing values.
Identify the common missing characters such as 'n/a', 'na', '--'
and '?' as missing. User can also customize the characters to be
identified as missing.
Parameters
----------
df : DataFrame
Raw data formatted in DataFrame.
Returns
-------
flag : bool
Indicates whether missing values are detected.
If true, missing values are detected. Otherwise not.
"""
for value in na_values:
df = df.replace(value, np.nan)
# flag indicates whether any missing value is detected
flag = df.isnull().values.any()
return flag | b7b7fe20309463cd6f9044cb85459084910d23a4 | 31,332 |
def categorize():
"""API de categorização utilizando o modelo Perceptron()"""
# Load input
body = request.json
# Error handling
if not "products" in body:
return { "error": "json field 'products' does not exist"}, 400
products = body["products"]
if type(products) != list:
return { "error": "'products' must be a list of objects"}, 400
# Extrair os nomes de produto do método POST e formatar de acordo com as regras
# estabelecidas na pipeline de treinamento. Observe que nem todos os jsons
# terão todas as keys, então optou por se fazer um exception handling
inputs = []
for product in products:
# Error handling (produtos individuais)
if type(product) != dict:
return { "error": "'products' must be a list of objects"}, 400
empty_indicator = 0
try:
concatenated_tags = product["concatenated_tags"]
except KeyError:
concatenated_tags = ""
#indicar que as tags estão vazias
empty_indicator+=1
try:
title = product["title"]
except KeyError:
title = ""
#indicar que o título está vazio
empty_indicator+=1
try:
query = product["query"]
except KeyError:
query = ""
#indicar que query está vazia
empty_indicator+=1
#Error handling (se uma json query estiver vazia)
if empty_indicator == 3:
return {
"error": "product json queries must contain at least one of the " +
"following elements: 'query', 'concatenated_tags' or 'title'"
}, 400
# O input da heurística de machine learning tem o formato
# query + ' ' + title + ' ' + concatenated_tags
input_ = query + ' ' + title + ' ' + concatenated_tags
inputs.append(input_)
integer_categories = model.predict(inputs) # Numpy array
# Converter as categorias para nomes. Os elementos inteiros serão mapeados
# para categorias em string com a iteração vetorizada rápida da biblioteca numpy
str_categories = vectorized_converter(integer_categories)
str_list = str_categories.tolist()
return {"categories": str_list} | b155b1a88c62124d64559414bee5a07a325e1279 | 31,333 |
def _TryJobSvnRepo(builder_type):
"""Returns an SVN repo to use for try jobs based on the builder type."""
if builder_type == fetch_build.PERF_BUILDER:
return PERF_SVN_REPO_URL
if builder_type == fetch_build.FULL_BUILDER:
return FULL_SVN_REPO_URL
if builder_type == fetch_build.ANDROID_CHROME_PERF_BUILDER:
return ANDROID_CHROME_SVN_REPO_URL
raise NotImplementedError('Unknown builder type "%s".' % builder_type) | 9d3a71ee10735499a0f677c88f5b2dc2c8e24e5c | 31,334 |
def find_wr5bis_common2(i, n, norm, solution_init, common2b_init):
"""
Find the point when for the scalar product of the solution
equals the scalar product of a guess with 2 consecutive bits in common.
Fct_common2b(w) = fct_solution(w), for which w in [w0_3 , w0_4]
with 0 =< w0_3 < w0_4 < 1 ?
fct_solution(w) = (3*norm - E) w + E
fct_common2b(w) = (norm - P) w + P
Parameters:
i -- integer
n -- integer
norm -- integer
solution_init -- list of Decimal
common2b_init -- list of Decimal
Return:
w5 -- Decimal
"""
a0 = solution_init[0]
a1 = solution_init[1]
a2 = solution_init[2]
a3 = solution_init[3]
a4 = solution_init[4]
ai = common2b_init[i % n]
b = common2b_init[(i + 1) % n]
c = common2b_init[(i + 2) % n]
d = common2b_init[(i + 3) % n]
e = common2b_init[(i + 4) % n]
b = abs(b)
c = abs(c)
d = abs(d)
e = abs(e)
E = a0 + a1 + a2 + a3 - a4
P = ai + b + c + d + e
w5 = Decimal()
if (P - E + Decimal(2)*norm) != 0:
w5 = (P - E) / (P - E + Decimal(2)*norm)
else:
w5 = None
return w5 | 2678f1ad355f1bc96aaf1be96945af2b21727d97 | 31,335 |
import yaml
import re
def yml_remove_releaseNote_record(file_path, current_server_version):
"""
locate and remove release notes from a yaml file.
:param file_path: path of the file
:param current_server_version: current server GA version
:return: True if file was changed, otherwise False.
"""
with open(file_path, 'r') as f:
yml_text = f.read()
f.seek(0)
yml_data = yaml.safe_load(f)
v = yml_data.get('fromversion') or yml_data.get('fromVersion')
if v and server_version_compare(current_server_version, str(v)) < 0:
print('keeping release notes for ({})\nto be published on {} version release'.format(
file_path,
current_server_version
))
return False
rn = yml_data.get('releaseNotes')
if rn:
yml_text = re.sub(r'\n?releaseNotes: [\'"]?{}[\'"]?'.format(re.escape(rn).replace(r'\ ', r'\s+')), '', yml_text)
with open(file_path, 'w') as f:
f.write(yml_text)
return True
return False | 48a6f68642a094dd07a0daa13a78d11991a2aa5c | 31,336 |
from typing import Union
def calculate_z_score(
data: Union[MultimodalData, UnimodalData, anndata.AnnData],
n_bins: int = 50,
) -> np.array:
"""Calculate the standardized z scores of the count matrix.
Parameters
-----------
data: ``MultimodalData``, ``UnimodalData``, or ``anndata.AnnData`` object.
Single cell expression data.
n_bins: ``int``, optional, default: ``50``
Number of bins on expression levels for grouping genes.
Returns
-------
numpy.array
A 2D numpy array of shape ``(n_cells, n_features)``, which represents the standardized z-score expression matrix.
Examples
---------
>>> pg.calculate_z_score(data)
>>> pg.calculate_z_score(data, n_bins=100)
"""
if isinstance(data, MultimodalData):
data = data._unidata
if not _check_and_calc_sig_background(data, n_bins):
return None
z_score_mat = (data.X.toarray().astype(np.float32) - data.var["mean"].values.astype(np.float32) - data.obsm["sig_bkg_mean"][:, data.var["bins"].cat.codes].astype(np.float32)) / data.obsm["sig_bkg_std"][:, data.var["bins"].cat.codes].astype(np.float32)
return z_score_mat | df510bea5d475690ee234c1f92c6a9cb5bfab308 | 31,337 |
def encode(*args, **kwargs):
"""
A helper function to encode an element.
@param args: The python data to be encoded.
@kwarg encoding: AMF encoding type. One of L{ENCODING_TYPES}.
@return: A L{util.BufferedByteStream} object that contains the data.
"""
encoding = kwargs.pop('encoding', DEFAULT_ENCODING)
encoder = get_encoder(encoding, **kwargs)
[encoder.writeElement(el) for el in args]
stream = encoder.stream
stream.seek(0)
return stream | 41fd8c725643826a9e74dfdd59607f5bc6eda5c3 | 31,338 |
from typing import Tuple
def fenergy_symmetric_bar(
work_ab: ArrayLike,
work_bc: ArrayLike,
uncertainty_method: str = "BAR",
) -> Tuple[float, float]:
"""BAR for symmetric periodic protocols.
Args:
work_ab: Measurements of work from first half of protocol.
work_bc: Measurements of work from mirror image second half of protocol.
uncertainty_method: Method to calculate errors (BAR, MBAR, or Logistic)
Returns:
Estimated free energy difference to the middle point of the protocol, and
an estimated error
"""
work_ab = np.asarray(work_ab, dtype=np.float64)
work_bc = np.asarray(work_bc, dtype=np.float64)
weights_r = np.exp(-work_ab - fenergy_logmeanexp(work_ab))
return fenergy_bar(work_ab, work_bc, None, weights_r, uncertainty_method) | 569f694cd9a2ea58ef929230e5eb4fc399229e57 | 31,339 |
def format_resolution(resolution):
"""For debugging, convert resolution dict from resolve_citations() to
just the matched_text() of each cite, like
{'1 U.S. 1': ['1 U.S. 1', '1 U.S., at 2']}.
"""
return {
k.citation.matched_text(): [i.matched_text() for i in v]
for k, v in resolution.items()
} | ff4d327c2e747c5d3221bcd38bf4f93b2b30b17b | 31,340 |
def mod_arr_fit(ktp_dct, mess_path,
fit_type='single', fit_method='dsarrfit',
t_ref=1.0, a_conv_factor=1.0, inp_param_dct=None):
"""
Routine for a single reaction:
(1) Grab high-pressure and pressure-dependent rate constants
from a MESS output file
(2) Fit rate constants to an Arrhenius expression
"""
assert fit_type in ('single', 'double'), 'Only single/double fits'
if inp_param_dct is not None:
assert set(list(ktp_dct.keys())) <= set(list(inp_param_dct.keys())), (
'Pressure in ktp not in guess: \n {} \n{}'.format(
list(ktp_dct.keys()), list(inp_param_dct.keys()))
)
# Dictionaries to store info; indexed by pressure (given in fit_ps)
fit_param_dct = {}
fit_temp_dct = {}
# Calculate the fitting parameters from the filtered T,k lists
for pressure, tk_arr in ktp_dct.items():
# Set the temperatures and rate constants
temps = tk_arr[0]
rate_constants = tk_arr[1]
# Fit rate constants using desired Arrhenius fit
if fit_type == 'single':
fit_params = ratefit.fit.arrhenius.single(
temps, rate_constants, t_ref, fit_method,
dsarrfit_path=mess_path, a_conv_factor=a_conv_factor)
elif fit_type == 'double':
# Generate guess parameters
if inp_param_dct is not None:
arr_guess = inp_param_dct[pressure]
arr1_guess, arr2_guess = arr_guess[:3], arr_guess[3:]
else:
arr1_guess, arr2_guess = (8.1e-11, -0.01, 2000.0), ()
fit_params = ratefit.fit.arrhenius.double(
temps, rate_constants, t_ref, fit_method,
arr1_guess=arr1_guess, arr2_guess=arr2_guess,
dsarrfit_path=mess_path, a_conv_factor=a_conv_factor)
# Store the fitting parameters in a dictionary
fit_param_dct[pressure] = fit_params
# Store the temperatures used to fit in a dictionary
fit_temp_dct[pressure] = [min(temps), max(temps)]
# Check if the desired fits were successful at each pressure
fit_success = all(params for params in fit_param_dct.values())
return fit_param_dct, fit_temp_dct, fit_success | aacb366b2b826b8ffaaae620ee531ce7cb7e0339 | 31,341 |
from typing import Union
from typing import Sequence
from typing import List
def _choose_image_ids(selected: Union[None, int, Sequence[int]],
available: List[int]) -> List[int]:
"""Choose which image ids to load from disk."""
# Load all.
if selected is None:
return available
# Load a random selection.
if isinstance(selected, int):
if selected > len(available):
raise ValueError(f'Unable to sample {selected} random indices from '
f'{len(available)} available.')
rng = np.random.RandomState(42)
ids = rng.choice(available, size=selected, replace=False)
return ids.tolist()
# Load a specific set of indices.
if isinstance(selected, (list, tuple)):
if any([i >= len(available) for i in selected]):
raise ValueError(f'Unable to select indices {selected} from '
f'{len(available)} available.')
return [available[i] for i in selected]
raise NotImplementedError(
f'Unable to choose image_ids using selected of type {type(selected)}: '
f'{selected}') | 2f12c0f840ec4daede35ac3f65e745ad8681c19a | 31,342 |
import mite as m2
import M2kinter as m2
def _open_file(name, mode):
"""
Opens a file in the specified mode. If the mite or M2kinter module
is available the path given is not absolute, the writepath or
datapath (depending on the specified mode) is searched first.
"""
if not name:
raise OpalFileException('File name cannot be empty')
name = os.path.expanduser(os.path.expandvars(name))
infile = None
if name[0] != os.sep:
try:
except:
try:
except:
m2 = None
if m2:
if mode[0] == 'w':
paths = m2.writepath()
else:
assert mode[0] == 'r'
paths = m2.datapath()
for path in paths:
if path:
if path[-1] != os.sep:
path += os.sep
if os.path.isfile(path + name) or mode[0] == 'w':
infile = open(path + name, mode)
if infile:
break
if not infile and (os.path.isfile(name) or mode[0] == 'w'):
infile = open(name, mode)
return infile | 4aee4a2a54e5f9bd1ba72810b72deb50d0f69d54 | 31,343 |
def canonical_message_builder(content, fmt):
"""Builds the canonical message to be verified.
Sorts the fields as a requirement from AWS
Args:
content (dict): Parsed body of the response
fmt (list): List of the fields that need to go into the message
Returns (str):
canonical message
"""
m = ""
for field in sorted(fmt):
try:
m += field + "\n" + content[field] + "\n"
except KeyError:
# Build with what you have
pass
return str(m) | 41a5e61cea00348c43675e373acb3cdcb311a762 | 31,344 |
import random
def get_random_image(shape):
"""
Expects something like shape=(480,640,3)
:param shape: tuple of shape for numpy array,
for example from my_array.shape
:type shape: tuple of ints
:return random_image:
:rtype: np.ndarray
"""
if random.random() < 0.5:
rand_image = get_random_solid_color_image(shape)
else:
rgb1 = get_random_solid_color_image(shape)
rgb2 = get_random_solid_color_image(shape)
vertical = bool(np.random.uniform() > 0.5)
rand_image = gradient_image(rgb1, rgb2, vertical=vertical)
if random.random() < 0.5:
return rand_image
else:
return add_noise(rand_image) | 6ac0a627ce6f125b269584cb0694c6b26bb5e23d | 31,345 |
import torch
def evaluate(model, val_loader, device):
"""
model: CNN networks
val_loader: a Dataloader object with validation data
device: evaluate on cpu or gpu device
return classification accuracy of the model on val dataset
"""
# evaluate the model
model.eval()
# context-manager that disabled gradient computation
with torch.no_grad():
correct = 0
total = 0
for i, (images, targets) in enumerate(val_loader):
# device: cpu or gpu
images = images.half()
images = images.to(device)
targets = targets.to(device)
outputs = model(images)
# return the maximum value of each row of the input tensor in the
# given dimension dim, the second return vale is the index location
# of each maxium value found(argmax)
_, predicted = torch.max(outputs.data, dim=1)
correct += (predicted == targets).sum().item()
total += targets.size(0)
accuracy = correct / total
print('Accuracy on Test Set: {:.4f} %'.format(100 * accuracy))
return accuracy | f5b738117a2c73d666718acaeff83f8856294db9 | 31,346 |
def estimateInharmonicity(inputFile = '../../sounds/piano.wav', t1=0.1, t2=0.5, window='hamming',
M=2048, N=2048, H=128, f0et=5.0, t=-90, minf0=130, maxf0=180, nH = 10):
"""
Function to estimate the extent of inharmonicity present in a sound
Input:
inputFile (string): wav file including the path
t1 (float): start time of the segment considered for computing inharmonicity
t2 (float): end time of the segment considered for computing inharmonicity
window (string): analysis window
M (integer): window size used for computing f0 contour
N (integer): FFT size used for computing f0 contour
H (integer): Hop size used for computing f0 contour
f0et (float): error threshold used for the f0 computation
t (float): magnitude threshold in dB used in spectral peak picking
minf0 (float): minimum fundamental frequency in Hz
maxf0 (float): maximum fundamental frequency in Hz
nH (integer): number of integers considered for computing inharmonicity
Output:
meanInharm (float or np.float): mean inharmonicity over all the frames between the time interval
t1 and t2.
"""
# 0. Read the audio file and obtain an analysis window
fs, x = UF.wavread(inputFile)
w = get_window(window, M)
# 1. Use harmonic model to compute the harmonic frequencies and magnitudes
xhfreq, xhmag, xhphase = HM.harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope=0.01, minSineDur=0.0)
# 2. Extract the time segment in which you need to compute the inharmonicity.
lt1 = int(np.ceil(fs*t1/float(H)))
lt2 = int(np.floor(fs*t2/float(H)))
x_seg = xhfreq[lt1: lt2]
# 3. Compute the mean inharmonicity of the segment
I = np.zeros(x_seg.shape[0])
for l in range(0, x_seg.shape[0]):
non_zero_freqs = np.where(x_seg[l, :] > 0.0)[0]
non_zero_freqs = np.delete(non_zero_freqs, 0)
for r in non_zero_freqs:
I[l] += (np.abs(x_seg[l, r] - (r+1) * x_seg[l, 0]))/float(r+1)
I[l] = 1.0 / nH * I[l]
mean_inharm = 1.0/(lt2 - lt1) * np.sum(I)
return mean_inharm | f3d8d78b3e565b69e72f435b5afdef7a6f6a28fd | 31,347 |
import attr
def _get_default_secret(var, default):
"""
Get default or raise MissingSecretError.
"""
if isinstance(default, attr.Factory):
return attr.NOTHING
elif isinstance(default, Raise):
raise MissingSecretError(var)
return default | debece74ea410589a0330dac9aaf2e57796c2001 | 31,348 |
import copy
def merge_similar_bounds(json_data: dict, file_names: list, bounds_list: list) -> dict:
"""Finds keys in a dictionary where there bounds are similar and merges them.
Parameters
----------
json_data : dict
Dictionary data from which the data informations were extracted from.
file_names : list
Keys list extracted from the Dictionary
bounds_list : list
Bounds list extracted from the Dictionary
Returns
-------
dict
New Dictionary where files with similar bounds are merged together.
"""
try:
new_json = copy.deepcopy(json_data)
check = []
similar_values = []
for index, i in enumerate(bounds_list):
if i in check:
similar_values.append([check.index(i), index])
print("actual first index:", check.index(
i), "bound index value:", index)
else:
check.append(i)
for initial, later in similar_values:
main_json = new_json[file_names[initial]]
add_json = new_json[file_names[later]]
new_file_name = f'{file_names[initial]},{file_names[later]}'
new_file = {}
new_file['bounds'] = main_json['bounds']
new_file['years'] = main_json['years']
new_file['years'].extend(add_json['years'])
new_file['points'] = main_json['points']
new_file['points'].extend(add_json['points'])
new_file['access_url'] = main_json['access_url']
new_file['access_url'].extend(add_json['access_url'])
new_file['len'] = main_json['len'] + add_json['len']
del new_json[file_names[initial]]
del new_json[file_names[later]]
new_json[new_file_name] = new_file
logger.info('Successfully merged files with related bounds')
except Exception as e:
logger.exception('Failed to merge bound related files')
return new_json | 314d6501e887d7a52d2aed054583188be992c1ed | 31,349 |
import base64
def is_base64(s):
"""Return True if input string is base64, false otherwise."""
s = s.strip("'\"")
try:
if isinstance(s, str):
sb_bytes = bytes(s, 'ascii')
elif isinstance(s, bytes):
sb_bytes = s
else:
raise ValueError("Argument must be string or bytes")
return base64.b64encode(base64.b64decode(sb_bytes)) == sb_bytes
except Exception:
return False | 6ce7bc4ddc79d5d50acce35f7995033ffb7d364a | 31,350 |
def get_coco(opt, coco_path):
"""Get coco dataset."""
train_dataset = CenterMultiPoseDataset(opt, split = 'train') # custom dataset
val_dataset = CenterMultiPoseDataset(opt, split = 'val') # custom dataset
opt.val_interval = 10
return train_dataset, val_dataset | c78e07bff16053ce1c4c9a8246750f938159f3b6 | 31,351 |
def _get_output_columns(nodes, context):
"""Get the output columns for a list of SqlNodes.
Args:
nodes: List[SqlNode], the nodes to get output columns from.
context: CompilationContext, global compilation state and metadata.
Returns:
List[Column], list of SqlAlchemy Columns to output for this query.
"""
columns = []
for node in nodes:
for sql_output in sql_context_helpers.get_outputs(node, context):
field_name = sql_output.field_name
column = sql_context_helpers.get_column(field_name, node, context)
column = column.label(sql_output.output_name)
columns.append(column)
return columns | 9c8c45311ca03892eaf4e82bbb592af6137eb0a6 | 31,352 |
from typing import final
def notif(message):
"""
docstring
"""
#message= mess.text
#print(message)
#print(type(message))
query= str(message).split(',')
#print(query)
if(len(query)==2):
#print(eval(query[1]))
list_str= final.ajio_care.find_stock(eval(query[0]),eval(query[1]))
#return list_str
if (list_str[0] > 0): #instock
return str(list_str[0])+" is available"
else :
return 0 | 5791165d8ac9fe582090de2f6a4831f2da3039ee | 31,353 |
import optparse
import sys
def parse_args():
"""
Parses command line arguments
"""
parser = optparse.OptionParser(
version=nodeenv_version,
usage="%prog [OPTIONS] ENV_DIR")
parser.add_option('-n', '--node', dest='node',
metavar='NODE_VER', default=get_last_stable_node_version(),
help='The node.js version to use, e.g., '
'--node=0.4.3 will use the node-v0.4.3 '
'to create the new environment. The default is last stable version.')
parser.add_option('-j', '--jobs', dest='jobs', default=2,
help='Sets number of parallel commands at node.js compilation. '
'The default is 2 jobs.')
parser.add_option('-v', '--verbose',
action='store_true', dest='verbose', default=False,
help="Verbose mode")
parser.add_option('-q', '--quiet',
action='store_true', dest='quiet', default=False,
help="Quete mode")
parser.add_option('-r', '--requirement',
dest='requirements', default='', metavar='FILENAME',
help='Install all the packages listed in the given requirements file. '
'Not compatible with --without-npm option.')
parser.add_option('--prompt', dest='prompt',
help='Provides an alternative prompt prefix for this environment')
parser.add_option('-l', '--list', dest='list',
action='store_true', default=False,
help='Lists available node.js versions')
parser.add_option( '--without-ssl', dest='without_ssl',
action='store_true', default=False,
help='Build node.js without SSL support')
parser.add_option( '--debug', dest='debug',
action='store_true', default=False,
help='Build debug variant of the node.js')
parser.add_option( '--profile', dest='profile',
action='store_true', default=False,
help='Enable profiling for node.js')
parser.add_option( '--without-npm', dest='without_npm',
action='store_true', default=False,
help='Install npm in new virtual environment')
parser.add_option('--npm', dest='npm',
metavar='NODE_VER', default='latest',
help='The npm version to use, e.g., '
'--npm=0.3.18 will use the npm-0.3.18.tgz '
'tarball to install. The default is last available version.')
parser.add_option( '--no-npm-clean', dest='no_npm_clean',
action='store_true', default=False,
help='Skip the npm 0.x cleanup. Do cleanup by default.')
options, args = parser.parse_args()
if not options.list:
if not args:
print('You must provide a DEST_DIR')
parser.print_help()
sys.exit(2)
if len(args) > 1:
print('There must be only one argument: DEST_DIR (you gave %s)' % (
' '.join(args)))
parser.print_help()
sys.exit(2)
if options.requirements and options.without_npm:
print('These options are not compatible: --requirements, --without-npm')
parser.print_help()
sys.exit(2)
return options, args | c7bb1121d8cfd71732823b2e2043287af734e5bb | 31,354 |
def float32(x):
"""Returns a 32-bit floating point representation of the input.
Only defined for basic scalar types."""
return np.float32(x) | a63d595a1d9a1949183a8303b0258779082278c7 | 31,355 |
from pathlib import Path
def collect_test_names():
""" "
Finds all test names in `TEST_DATA_DIR` which have are valid, i.e.
which have both a C file and associated gcc AST json
"""
test_data_dir_path = Path(TEST_DATA_DIR)
c_files = test_data_dir_path.glob("*.c")
ast_files = test_data_dir_path.glob("*_gcc_ast.json")
# the stem of the file is the file name without extension
c_test_names = [f.stem for f in c_files]
ast_test_names = [f.stem.replace("_gcc_ast", "") for f in ast_files]
test_names = set(c_test_names).intersection(set(ast_test_names))
return test_names | c34609264f460c66d8ea85a456901e0f1daca84f | 31,356 |
def read_from_file(filename):
"""Read from a file located at `filename` and return the corresponding graph object."""
file = open(filename, "r")
lines = file.readlines()
file.close()
# Check if it is a graph or digraph
graph_or_digraph_str = lines[0].strip() if len(lines) > 0 else None
if graph_or_digraph_str != "G" and graph_or_digraph_str != "D":
raise Exception("File must start with G or D.")
is_bidirectional = graph_or_digraph_str == "G"
g = Graph()
# Add all vertices
for vertex_key in lines[1].strip("() \n").split(","):
g.add_vertex(vertex_key)
# Add all edges
for line in lines[2:]:
# Split components of edge
new_edge = line.strip("() \n").split(",")
if len(new_edge) < 2 or len(new_edge) > 3:
raise Exception("Lines adding edges must include 2 or 3 values")
# Get vertices
vertex1, vertex2 = new_edge[:2]
# Get weight if it exists
weight = int(new_edge[2]) if len(new_edge) == 3 else None
# Add edge(s)
g.add_edge(vertex1, vertex2, weight)
if is_bidirectional:
g.add_edge(vertex2, vertex1, weight)
return g | 86879facbef971541fabe95ef9430480931ef986 | 31,357 |
from quaternion.calculus import spline_definite_integral as sdi
def inner_product(t, abar, b, axis=None, apply_conjugate=False):
"""Perform a time-domain complex inner product between two waveforms <a, b>.
This is implemented using spline interpolation, calling
quaternion.calculus.spline_definite_integral
Parameters
----------
t : array_like
Time samples for waveforms abar and b.
abar : array_like
The conjugate of the 'a' waveform in the inner product (or
simply a, if apply_conjugate=True is been passed). Must have the
same shape as b.
b : array_like
The 'b' waveform in the inner product. Must have the same
shape as a.
axis : int, optional
When abar and b are multidimensional, the inner product will
be computed over this axis and the result will be one
dimension lower. Default is None, will be inferred by
`spline_definite_integral`.
apply_conjugate : bool, optional
Whether or not to conjugate the abar argument before
computing. True means inner_product will perform the conjugation
for you. Default is False, meaning you have already
performed the conjugation.
Returns
-------
inner_product : ndarray
The integral along 'axis'
"""
if not apply_conjugate:
integrand = abar * b
else:
integrand = np.conjugate(abar) * b
return sdi(integrand, t, axis=axis) | c675ee377a73e0858ad078bce47b5e41120b8d0b | 31,358 |
from typing import List
def build_datamodel(good_pbks: List[str], is_supply: bool) -> DataModel:
"""
Build a data model for supply and demand (i.e. for offered or requested goods).
:param good_pbks: the list of good public keys
:param is_supply: Boolean indicating whether it is a supply or demand data model
:return: the data model.
"""
goods_quantities_attributes = [
Attribute(good_pbk, int, False) for good_pbk in good_pbks
]
price_attribute = Attribute("price", float, False)
description = TAC_SUPPLY_DATAMODEL_NAME if is_supply else TAC_DEMAND_DATAMODEL_NAME
data_model = DataModel(description, goods_quantities_attributes + [price_attribute])
return data_model | 35b450039e6401a80fc03c61e771eb433bfab693 | 31,359 |
def tryReduceOr(sig, val):
"""
Return sig and val reduced by | operator or None
if it is not possible to statically reduce expression
"""
m = sig._dtype.all_mask()
if not val.vldMask:
return val
if val._isFullVld():
v = val.val
if v == m:
return val
elif v == 0:
return sig | 4be6cb3ebf3792859745ed474151e0b748f4d479 | 31,360 |
def get_mod_from_id(mod_id, mod_list):
"""
Returns the mod for given mod or None if it isn't found.
Parameters
----------
mod_id : str
The mod identifier to look for
mod_list : list[DatRecord]
List of mods to search in (or dat file)
Returns
-------
DatRecord or None
Returns the mod if found, None otherwise
"""
for mod in mod_list:
if mod['Id'] == mod_id:
return mod
return None | 1fac309e4dfadea6da34946eb695f77cbbd61f92 | 31,361 |
def resize(image):
"""
Resize the image to the input shape used by the network model
"""
return cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA) | 315b43be9fc33740466fb6671119fbc97a2c853a | 31,362 |
def exp_f(name):
""""Similar to E but trains to full 3001 epochs"""
print("e82 but with seq length 2000 and 5 appliances and learning rate 0.01 and train and validation on all 5 houses")
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[80, 20, 20, 20, 600],
min_on_durations=[60, 60, 60, 300, 300],
window=("2013-05-01", "2015-01-01"),
seq_length=2000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
subsample_target=5,
train_buildings=[1,2,3,4,5],
validation_buildings=[1,2,3,4,5]
)
net = Net(
experiment_name=name + 'f',
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.01),
layers_config=[
{
'type': BLSTMLayer,
'num_units': 60,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 80,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BLSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net | fceb234feb9848e6a2618e4f5433345265d1b839 | 31,363 |
def indices_2_one_hot(indices, n):
"""
Converts a list of indices into one hot codification
:param indices: list of indices
:param n: integer. Size of the vocabulary
:return: numpy array with shape (len(indices), n)
"""
one_hot = np.zeros((len(indices), n), dtype=np.int8)
for i in range(len(indices)):
if indices[i] >= n:
raise ValueError("Index out of bounds when converting to one hot")
one_hot[i, indices[i]] = 1
return one_hot | c74864bf23cbd56dbc9de12f250570b9df9cdf8c | 31,364 |
from typing import Set
def extract_tables(query: str) -> Set[Table]:
"""
Helper function to extract tables referenced in a query.
"""
return ParsedQuery(query).tables | cb48448b09f9aac90a85ca2bd7011f32fcbe6e6f | 31,365 |
def master_operation(matrix):
"""
Split the initial matrix into tasks and distribute them among slave operations
"""
workers = MPI.COMM_WORLD.Get_size()
accumulator = []
task_queue = []
if not matrix[0][0]:
task_queue.append([(0, 0)])
while True:
sent_workers = []
for pid in range(1, workers):
if len(task_queue):
start = task_queue.pop(0)
sent_workers.append(pid)
MPI.COMM_WORLD.send(dict(start=start, status='process'), dest=pid, tag=1)
if not len(task_queue) and not len(sent_workers):
break
for pid in sent_workers:
data = MPI.COMM_WORLD.recv(source=pid, tag=pid)
if len(accumulator):
continue
if len(data['result']) == 1:
accumulator.append(data['result'][0])
task_queue = []
elif len(data['result']) > 1:
task_queue += data['result']
for pid in range(1, workers):
MPI.COMM_WORLD.isend(dict(status='terminate'), dest=pid, tag=1)
accumulator.sort(key=lambda sequence: len(sequence))
return [(col, row) for (row, col) in accumulator[0]] if len(accumulator) else [] | c936abd299cd0181138f4f4c87a5cf306be06c7a | 31,366 |
def _weight_mean_color(graph, src, dst, n):
"""Callback to handle merging nodes by recomputing mean color.
The method expects that the mean color of `dst` is already computed.
Parameters
----------
graph : RAG
The graph under consideration.
src, dst : int
The vertices in `graph` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
data : dict
A dictionary with the `"weight"` attribute set as the absolute
difference of the mean color between node `dst` and `n`.
"""
diff = graph.nodes[dst]['mean color'] - graph.nodes[n]['mean color']
diff = np.linalg.norm(diff)
return {'weight': diff} | 13fe474363578f704dfe8e16be725628a6e3ca5f | 31,367 |
def predict(model, pTestSet, pModelParams, pNoConvertBack):
"""
Function to predict test set
Attributes:
model -- model to use
testSet -- testSet to be predicted
conversion -- conversion function used when training the model
"""
#copy the test set, before invalidated rows and/or the columns not required for prediction are dropped
predictionDf = pTestSet.copy(deep=True)
### check if the test set contains reads, only then can we compute score later on
nanreadMask = pTestSet['reads'] == np.nan
testSetHasTargetValues = pTestSet[nanreadMask].empty
if not testSetHasTargetValues:
predictionDf['reads'] = 0.0
### remove invalidated rows
validMask = predictionDf['valid'] == True
predictionDf = predictionDf[validMask]
if predictionDf.empty:
msg = "Aborting. No valid samples to predict"
raise SystemExit(msg)
### Eliminate NaNs - there should be none
predictionDf.replace([np.inf, -np.inf], np.nan, inplace=True)
if not predictionDf[predictionDf.isna().any(axis=1)].empty:
msg = "Warning: There are {:d} rows in the testSet which contain NaN\n"
msg = msg.format(predictionDf[predictionDf.isna().any(axis=1)].shape[0])
msg += "The NaNs are in column(s) {:s}\n"
msg = msg.format(", ".join(predictionDf[predictionDf.isna().any(axis=1)].columns))
msg += "Replacing by zeros. Check input data!"
print(msg)
predictionDf.fillna(value=0, inplace=True)
### Hide Columns that are not needed for prediction
dropList = ['first', 'second', 'chrom', 'reads', 'avgRead', 'valid']
noDistance = 'noDistance' in pModelParams and pModelParams['noDistance'] == True
noMiddle = 'noMiddle' in pModelParams and pModelParams['noMiddle'] == True
noStartEnd = 'noStartEnd' in pModelParams and pModelParams['noStartEnd'] == True
if noDistance:
dropList.append('distance')
if noMiddle:
if pModelParams['method'] == 'oneHot':
dropList.append('middleProt')
elif pModelParams['method'] == 'multiColumn':
numberOfProteins = int((predictionDf.shape[1] - 6) / 3)
for protein in range(numberOfProteins):
dropList.append(str(protein + numberOfProteins))
else:
raise NotImplementedError()
if noStartEnd:
if pModelParams['method'] == 'oneHot':
dropList.append('startProt')
dropList.append('endProt')
elif pModelParams['method'] == 'multiColumn':
numberOfProteins = int((predictionDf.shape[1] - 6) / 3)
for protein in range(numberOfProteins):
dropList.append(str(protein))
dropList.append(str(protein + 2 * numberOfProteins))
else:
raise NotImplementedError()
test_X = predictionDf.drop(columns=dropList, errors='ignore')
### convert reads to log reads
predictionDf['standardLog'] = np.log(predictionDf['reads']+1)
### predict
print("Valid prediction samples: {:d}".format(test_X.shape[0]))
predReads = model.predict(test_X)
if np.min(predReads) < 0:
maxPred = np.max(predReads)
np.clip(predReads, 0, None, out=predReads)
msg = "Warning: Some predicted read counts were negative.\n"
msg += "Clamping to range 0...{:.3f}".format(maxPred)
print(msg)
predictionDf['predReads'] = predReads
### clamp prediction output to normed input range, if desired
if not pNoConvertBack \
and pModelParams['normReadCount'] and pModelParams['normReadCount'] == True \
and pModelParams['normReadCountValue'] and pModelParams ['normReadCountValue'] > 0:
scaler = MinMaxScaler(feature_range=(0, pModelParams['normReadCountValue']), copy=False)
predictionDf[['predReads']] = scaler.fit_transform(predictionDf[['predReads']])
thresMask = predictionDf['predReads'] < pModelParams['normReadCountThreshold']
predictionDf.loc[thresMask, 'predReads'] = 0.0
msg = "normalized predicted values to range 0...{:.3f}, threshold {:.3f}"
msg = msg.format(pModelParams['normReadCountValue'],pModelParams['normReadCountThreshold'])
print(msg)
#y_pred = np.absolute(y_pred)
#test_y['predAbs'] = y_pred
### convert back if necessary
if pModelParams['conversion'] == 'none':
target = 'reads'
elif pModelParams['conversion'] == 'standardLog':
target = 'standardLog'
predictionDf['predReads'] = np.exp(predictionDf['predReads']) - 1
if testSetHasTargetValues:
score = model.score(test_X,predictionDf[target])
else:
score = None
return predictionDf, score | 4d6aa09bc1223732d73ea7f37aed2ccc28e879b3 | 31,368 |
def poisson_log_likelihood(x, log_rate):
"""Compute the log likelihood under Poisson distribution.
log poisson(k, r) = log(r^k * e^(-r) / k!)
= k log(r) - r - log k!
log poisson(k, r=exp(l)) = k * l - exp(l) - lgamma(k + 1)
Args:
x: binned spike count data.
log_rate: The (log) rate that define the likelihood of the data
under the LFADS model.
Returns:
The log-likelihood of the data under the model (up to a constant factor).
"""
return x * log_rate - np.exp(log_rate) - lax.lgamma(x + 1.0) | dc797090efceb4266a90e89125fb5a9acc5b2da7 | 31,369 |
import math
def distance(point1, point2):
""" Return the distance between two points."""
dx = point1[0] - point2[0]
dy = point1[1] - point2[1]
return math.sqrt(dx * dx + dy * dy) | 7605d98e33989de91c49a5acf702609272cf5a68 | 31,370 |
import os
import json
def get_filesystem_perf_results(result_dir, pred_type='classification'):
"""
Retrieve model metadata and performance metrics stored in the filesystem from a hyperparameter search run.
"""
model_uuid_list = []
model_type_list = []
max_epochs_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
dataset_key_list = []
splitter_list = []
rf_estimators_list = []
rf_max_features_list = []
rf_max_depth_list = []
best_epoch_list = []
model_score_type_list = []
feature_transform_type_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['r2_score', 'rms_score', 'mae_score']
else:
metrics = ['roc_auc_score', 'prc_auc_score', 'precision', 'recall_score',
'accuracy_score', 'npv', 'matthews_cc', 'kappa', 'cross_entropy', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
score_dict['valid']['model_choice_score'] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get list of training run metrics for this model
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
model_type_list.append(model_type)
model_score_type = model_params['model_choice_score_type']
model_score_type_list.append(model_score_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
dataset_key_list.append(metadata_dict['training_dataset']['dataset_key'])
feature_transform_type = metadata_dict['training_dataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
if model_type == 'NN':
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
if model_type == 'RF':
rf_params = metadata_dict['rf_specific']
rf_estimators_list.append(rf_params['rf_estimators'])
rf_max_features_list.append(rf_params['rf_max_features'])
rf_max_depth_list.append(rf_params['rf_max_depth'])
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
score_dict['valid']['model_choice_score'].append(subset_metrics['valid']['model_choice_score'])
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_type=model_type_list,
dataset_key=dataset_key_list,
featurizer=featurizer_list,
splitter=splitter_list,
model_score_type=model_score_type_list,
feature_transform_type=feature_transform_type_list,
learning_rate=learning_rate_list,
dropouts=dropouts_list,
layer_sizes=layer_sizes_list,
best_epoch=best_epoch_list,
max_epochs=max_epochs_list,
rf_estimators=rf_estimators_list,
rf_max_features=rf_max_features_list,
rf_max_depth=rf_max_depth_list))
perf_df['model_choice_score'] = score_dict['valid']['model_choice_score']
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (subset, metric)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = 'model_choice_score'
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df | 625f34a877b07d4a8d75e59e316d6ec5169d2e41 | 31,371 |
import math
def order_of_magnitude(value):
"""
Returns the order of magnitude of the most significant digit of the
specified number. A value of zero signifies the ones digit, as would be
the case in [Number]*10^[Order].
:param value:
:return:
"""
x = abs(float(value))
offset = 0 if x >= 1.0 else -1
return int(math.log10(x) + offset) | 53a4b1be76199864fee69d4333049fb1f2371e46 | 31,372 |
def compute_discounted_R(R, discount_rate=1):
"""Returns discounted rewards
Args:
R (1-D array): a list of `reward` at each time step
discount_rate (float): Will discount the future value by this rate
Returns:
discounted_r (1-D array): same shape as input `R`
but the values are normalized and discounted
Examples:
#>>> R = [1, 1, 1]
#>>> compute_discounted_R(R, .99) # before normalization
[1 + 0.99 + 0.99**2, 1 + 0.99, 1]
"""
discounted_r = np.zeros_like(R, dtype=np.float32)
running_add = 0
for t in reversed(range(len(R))):
running_add = running_add * discount_rate + R[t]
discounted_r[t] = running_add
discounted_r += 68
discounted_r /= 88
return discounted_r | 50a18277e749faa73c725217824091a71d00f991 | 31,373 |
def setup_dom_for_char(character, create_dompc=True, create_assets=True,
region=None, srank=None, family=None, liege_domain=None,
create_domain=True, create_liege=True, create_vassals=True,
num_vassals=2):
"""
Creates both a PlayerOrNpc instance and an AssetOwner instance for
a given character. If region is defined and create_domain is True,
we create a domain for the character. Family is the House that will
be created (or retrieved, if it already exists) as an owner of the
domain, while 'fealty' will be the Organization that is set as their
liege.
"""
pc = character.player_ob
if not pc:
raise TypeError("No player object found for character %s." % character)
if create_dompc:
dompc = setup_dom_for_player(pc)
else:
dompc = pc.Dominion
if not srank:
srank = character.db.social_rank
if create_assets:
amt = starting_money(srank)
setup_assets(dompc, amt)
# if region is provided, we will setup a domain unless explicitly told not to
if create_domain and region:
if character.db.gender and character.db.gender.lower() == 'male':
male = True
else:
male = False
if not family:
family = character.db.family or "%s Family" % character
# We make vassals if our social rank permits it
if create_vassals:
create_vassals = srank < 6
# if we're setting them as vassals to a house, then we don't create a liege
liege = None
if liege_domain:
create_liege = False
liege = liege_domain.ruler
ruler, liege, vassals = setup_family(dompc, family, create_liege=create_liege, create_vassals=create_vassals,
character=character, srank=srank, region=region, liege=liege,
num_vassals=num_vassals)
# if we created a liege, finish setting them up
if create_liege:
name = "%s's Liege" % character
setup_dom_for_npc(name, srank=srank - 1, region=region, ruler=liege)
# return the new domain if we were supposed to create one
return setup_domain(dompc, region, srank, male, ruler)
else: # if we're not setting up a new domain, return Dominion object
return dompc | 3c806c560e0691440bc7d7399467eecb563745f0 | 31,374 |
def cumulative_sum(t):
"""
Return a new list where the ith element is the sum of all elements up to that
position in the list. Ex: [1, 2, 3] returns [1, 3, 6]
"""
res = [t[0]]
for i in range(1, len(t)):
res.append(res[-1] + t[i])
return res | 14b2ef722f72e239d05737a7bb7b3a6b3e15305f | 31,375 |
def WIS(x, q, x_q, norm=False, log=False, smooth=False):
"""
Parameters
----------
:
TODO
:
TODO
Returns
-------
:
TODO
"""
# todo sort q and x_q based on q
K = len(q) // 2
alps = np.array([1 - q[-i - 1] + q[i] for i in range(K)])
Fs = np.array([[x_q[i], x_q[-i - 1]] for i in range(K)])
m = x_q[K + 1]
w0 = 0.5
wk = alps / 2.0
if smooth:
ret = 1.0 / (K + 1.0) * (w0 * 2 * np.abs(x - m) + np.sum(wk * smooth_IS(x, Fs[:, 0], Fs[:, 1], alps)))
else:
ret = 1.0 / (K + 1.0) * (w0 * 2 * np.abs(x - m) + np.sum(wk * IS(x, Fs[:, 0], Fs[:, 1], alps)))
if norm:
ret /= x
if log:
ret = np.log(ret)
return ret | b3b9b2157d05dd1329a0051862789826d8b0e1a7 | 31,376 |
from typing import Tuple
def update_documents_in_collection(resource) -> Tuple[Response, int]:
"""Endpoint for updating multiple documents."""
try:
collection_name = services.check_resource_name(resource)
request_args = request.args.copy()
filters = ["_projection", "_sort", "_limit", "_skip"]
for item in filters:
request_args.pop(item, None)
modifications = request.json
query = services.map_to_query_operator(request_args)
matched_items, modified_items = services.update_many_documents(
db, collection_name, query, modifications # type: ignore
)
return jsonify(matched_items=matched_items, modified_items=modified_items), 200
except exceptions.ResourceNameNotFoundError as e:
abort(404, description=e)
except exceptions.EmptyQueryFatalActionError as e:
abort(403, description=e) | 743b7bf3c3d2be765da181b5fbceef3309f91b48 | 31,377 |
def generate_prior_data(Pi, a_prior, b_prior):
"""Return column data sources needed to generate prior distribution."""
# Prior probability distribution
n = 1000
x = np.linspace(0, 1, n)
dist = beta(a_prior, b_prior)
p = dist.pdf(x)
# Arrays for the area under the curve patch
xs = np.hstack((x, [1, 0]))
ys = np.hstack((p, [0, 0]))
# Create column data sources
s1 = ColumnDataSource(data={'x': x, 'p': p})
s2 = ColumnDataSource(data={'params':
[Pi, a_prior, b_prior, a_prior, b_prior]})
s3 = ColumnDataSource(data={'x': xs, 'y': ys})
return s1, s2, s3 | 1bce66203f0b3ad6ab74fb346a81ec15ff2b7d63 | 31,378 |
def InteractionFingerprintAtomic(ligand, protein, strict=True):
"""Interaction fingerprint accomplished by converting the molecular
interaction of ligand-protein into bit array according to
the residue of choice and the interaction. For every residue
(One row = one residue) there are eight bits which represent
eight type of interactions:
- (Column 0) hydrophobic contacts
- (Column 1) aromatic face to face
- (Column 2) aromatic edge to face
- (Column 3) hydrogen bond (protein as hydrogen bond donor)
- (Column 4) hydrogen bond (protein as hydrogen bond acceptor)
- (Column 5) salt bridges (protein positively charged)
- (Column 6) salt bridges (protein negatively charged)
- (Column 7) salt bridges (ionic bond with metal ion)
Parameters
----------
ligand, protein : oddt.toolkit.Molecule object
Molecules, which are analysed in order to find interactions.
strict : bool (deafult = True)
If False, do not include condition, which informs whether atoms
form 'strict' H-bond (pass all angular cutoffs).
Returns
-------
InteractionFingerprint : numpy array
Vector of calculated IFP (size = no residues * 8 type of interaction)
"""
atomids = np.unique(protein.atom_dict['id'])
IFP = np.zeros((len(atomids), 8), dtype=np.uint8)
# hydrophobic contacts (column = 0)
hydrophobic = hydrophobic_contacts(protein, ligand)[0]['id']
# print('hydrophobic', hydrophobic)
np.add.at(IFP, (np.searchsorted(atomids, np.sort(hydrophobic)[::-1]), 0), 1)
# aromatic face to face (Column = 1), aromatic edge to face (Column = 2)
rings, _, strict_parallel, strict_perpendicular = pi_stacking(
protein, ligand)
# print('rings', rings)
if len(rings) > 0:
np.add.at(IFP, (np.searchsorted(
atomids, np.sort(rings[strict_parallel]['id'])[::-1]), 1), 1)
np.add.at(IFP, (np.searchsorted(
atomids, np.sort(rings[strict_perpendicular]['id'])[::-1]), 2), 1)
# h-bonds, protein as a donor (Column = 3)
_, donors, strict0 = hbond_acceptor_donor(ligand, protein)
if strict is False:
strict0 = None
# print('donors', donors)
np.add.at(IFP, (np.searchsorted(
atomids, np.sort(donors[strict0]['id'])[::-1]), 3), 1)
# h-bonds, protein as an acceptor (Column = 4)
acceptors, _, strict1 = hbond_acceptor_donor(protein, ligand)
if strict is False:
strict1 = None
# print('acceptors', acceptors)
np.add.at(IFP, (np.searchsorted(
atomids, np.sort(acceptors[strict1]['id'])[::-1]), 4), 1)
# salt bridges, protein positively charged (Column = 5)
plus, _ = salt_bridge_plus_minus(protein, ligand)
# print('plus', plus)
np.add.at(IFP, (np.searchsorted(atomids, np.sort(plus['id'])[::-1]), 5), 1)
# salt bridges, protein negatively charged (Colum = 6)
_, minus = salt_bridge_plus_minus(ligand, protein)
# print('minus', minus)
np.add.at(IFP, (np.searchsorted(atomids, np.sort(minus['id'])[::-1]), 6), 1)
# salt bridges, ionic bond with metal ion (Column = 7)
_, metal, strict2 = acceptor_metal(protein, ligand)
# print('metal', metal)
if strict is False:
strict2 = None
np.add.at(IFP, (np.searchsorted(
atomids, np.sort(metal[strict2]['id'])[::-1]), 7), 1)
return IFP.flatten() | ecdfc34e5c6cb5c5ca3fcf008629b1d3face158c | 31,379 |
from typing import Union
def add_subject_conditions(
data: pd.DataFrame, condition_list: Union[SubjectConditionDict, SubjectConditionDataFrame]
) -> pd.DataFrame:
"""Add subject conditions to dataframe.
This function expects a dataframe with data from multiple subjects and information on which subject
belongs to which condition.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe where new index level ``condition`` with subject conditions should be added to
condition_list : ``SubjectConditionDict`` or ``SubjectConditionDataFrame``
:obj:`~biopsykit.utils.datatype_helper.SubjectConditionDict` or
:obj:`~biopsykit.utils.datatype_helper.SubjectConditionDataFrame` with information on which subject belongs to
which condition
Returns
-------
:class:`~pandas.DataFrame`
dataframe with new index level ``condition`` indicating which subject belongs to which condition
"""
if is_subject_condition_dataframe(condition_list, raise_exception=False):
condition_list = condition_list.groupby("condition").groups
is_subject_condition_dict(condition_list)
return pd.concat({cond: data.loc[subjects] for cond, subjects in condition_list.items()}, names=["condition"]) | 7475af9b13685604678b4d566e9b2daa4f6f82ef | 31,380 |
def chinese_remainder(n1: int, r1: int, n2: int, r2: int) -> int:
"""
>>> chinese_remainder(5,1,7,3)
31
penjelasan : 31 adalah nomor yang paling kecil
ketika dibagi dengan 5 kita dapat hasil bagi 1
ketika dibagi dengan 7 kita dapat hasil bagi 3
"""
(x, y) = extended_euclid(n1, n2)
m = n1 * n2
n = r2 * x * n1 + r1 * y * n2
return (n % m + m) % m | e98882790c9c4bdd1e23f1d9b49d7a30ddaf7e81 | 31,381 |
def likelihood(angle, displacement, ln_variance, z, s, debug=False):
"""
Returns theano function from the angle, displacement, ln_variance2 theano.scalars
"""
variance = tt.exp(ln_variance)
# gradient = tt.tan(angle)
v = tt.stacklists([[-np.sin(angle)], [np.cos(angle)]])
delta = tt.dot(v.T, z.T) - displacement
sigma2 = tt.dot(v.T, tt.dot(s + variance, v).T) # put variance2 in the sigma2 term (http://dfm.io/posts/fitting-a-plane/)
factor = sigma2 + variance
return -(tt.log(factor) / 2).sum() - ((delta**2) / 2 / factor).sum()
# variance_matrix = tt.stacklists([[gradient**2, -gradient], [-gradient, 1]]) # http://dfm.io/posts/fitting-a-plane/
# variance_matrix *= variance / (1 + (gradient**2))
#
#
# ll = -tt.sum(tt.log(sigma2) / 2) - tt.sum(delta*delta / 2 / sigma2)
# if debug:
# return ll, v, sigma2, delta
# return ll | 3ddac8592c6f95e79ad8350cbc4b42b9d5a7b83a | 31,382 |
from typing import Iterable
def query_factorize_industry_df(factorize_arr, market=None):
"""
使用match_industries_factorize可以查询到行业所对应的factorize序列,
使用factorize序列即组成需要查询的行业组合,返回行业组合pd.DataFrame对象
eg: 从美股所有行业中找到中国企业的行业
input:ABuIndustries.match_industries_factorize('中国', market=EMarketTargetType.E_MARKET_TARGET_US)
output:
[(1, '中国食品、饮料与烟草'),
(8, '中国网络游戏'),
(9, '中国新能源'),
(22, '中国汽车与汽车零部件'),
(31, '中国制药、生物科技和生命科学'),
(32, '中国金融'),
(33, '中国互联网软件服务'),
(41, '中国金属与采矿'),
(54, '中国建筑材料'),
(66, '中国硬件、半导体与设备'),
(79, '中国运输'),
(81, '中国化学制品'),
(114, '中国互联网信息服务'),
(169, '中国房地产'),
(195, '中国电子商务'),
(212, '中国耐用消费品与服装'),
(214, '中国一般制造业'),
(216, '中国媒体'),
(217, '中国日消品零售'),
(220, '中国软件与服务'),
(223, '中国传统能源'),
(224, '中国能源设备与服务'),
(228, '中国纸业与包装'),
(232, '中国商业与专业服务'),
(237, '中国教育培训'),
(238, '中国医疗保健设备与服务'),
(240, '中国非日消品零售'),
(258, '中国酒店餐饮')]
然后使用ABuIndustries.query_factorize_industry_df((31, 32, 33))即可获取到
(31, '中国制药、生物科技和生命科学'),
(32, '中国金融'),
(33, '中国互联网软件服务'),
行业中的所有股票信息的pd.DataFrame对象
:param factorize_arr: eg:(31, 32, 33) or [31, 32, 33] or 31
:param market: 需要查询的市场,eg:EMarketTargetType.E_MARKET_TARGET_US
:return: 返回行业组合pd.DataFrame对象
"""
if not isinstance(factorize_arr, Iterable):
# 如果不是可迭代的,即只是一个factorize序列,转换为序列,方便统一处理
factorize_arr = [factorize_arr]
# 通过industries_market获取对应市场操作句柄industries_market_op
industries_market_op = industries_market(market=market)
industry_df = None
for ind in factorize_arr:
query_industry = industries_market_op.query_industry_factorize(ind)
# 将所有查询到的行业信息,pd.DtaFrame对象连接起来
industry_df = query_industry if industry_df is None else pd.concat([query_industry, industry_df])
if industry_df is not None:
# 去除重复的,比如a在行业b,又在行业c
# noinspection PyUnresolvedReferences
industry_df.drop_duplicates(inplace=True)
return industry_df | 7060336e59b54d87f061a6163367b22c056edb6a | 31,383 |
import yaml
def rbac_assign_roles(email, roles, tenant=None):
"""assign a list of roles to email"""
tstr = " -tenant=%s " % (tenant) if tenant else ""
roles = ",".join(roles)
rc = run_command("%s user-role -op assign -user-email %s -roles %s %s" % (
g_araalictl_path, email, roles, tstr),
result=True, strip=False)
assert rc[0] == 0, rc[1]
return yaml.load(rc[1], yaml.SafeLoader) | 27bc4835052fd3e6c5e2660ab47c12b49ff426ef | 31,384 |
def haversine_np(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
Reference:
https://stackoverflow.com/a/29546836/7657658
https://gist.github.com/mazzma12/6dbcc71ab3b579c08d66a968ff509901
"""
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(
dlat / 2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6371 * c
return km | ace51c2e9e93a42270f669d4b8d48ce87ff660d6 | 31,385 |
import typing
import os
def basename_wo_ext(
path: typing.Union[str, bytes],
*,
ext: str = None
) -> str:
"""File basename without file extension.
Args:
path: file or directory name
ext: explicit extension to be removed
Returns:
basename of directory or file without extension
Example:
>>> path = '/test/file.wav'
>>> basename_wo_ext(path)
'file'
"""
path = safe_path(path)
path = os.path.basename(path)
if ext is not None:
if not ext.startswith('.'):
ext = '.' + ext # 'mp3' => '.mp3'
if path.endswith(ext):
path = path[:-len(ext)]
else:
path = os.path.splitext(path)[0]
return path | 40992c6906811cc5c448dbecdb7806ddedc68f36 | 31,386 |
import re
def extract_current_step(current_status_string):
""" Attempts to extract the current step numeric identifier from the given status string. Returns the step
number or None if none.
"""
# Older format: `Step 12 :`
# Newer format: `Step 4/13 :`
step_increment = re.search(r"Step ([0-9]+)/([0-9]+) :", current_status_string)
if step_increment:
return int(step_increment.group(1))
step_increment = re.search(r"Step ([0-9]+) :", current_status_string)
if step_increment:
return int(step_increment.group(1)) | 8bbee5b13140394e3e04021eccd43d2b4c3b4c14 | 31,387 |
import warnings
def unique1d(ar1, return_index=False, return_inverse=False):
"""
Find the unique elements of an array.
Parameters
----------
ar1 : array_like
This array will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices against `ar1` that result in the
unique array.
return_inverse : bool, optional
If True, also return the indices against the unique array that
result in `ar1`.
Returns
-------
unique : ndarray
The unique values.
unique_indices : ndarray, optional
The indices of the unique values. Only provided if `return_index` is
True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array. Only provided if
`return_inverse` is True.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions
for performing set operations on arrays.
Examples
--------
>>> np.unique1d([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique1d(a)
array([1, 2, 3])
Reconstruct the input from unique values:
>>> np.unique1d([1,2,6,4,2,3,2], return_index=True)
>>> x = [1,2,6,4,2,3,2]
>>> u, i = np.unique1d(x, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> i
array([0, 1, 4, 3, 1, 2, 1])
>>> [u[p] for p in i]
[1, 2, 6, 4, 2, 3, 2]
"""
if return_index:
warnings.warn("The order of the output arguments for "
"`return_index` has changed. Before, "
"the output was (indices, unique_arr), but "
"has now been reversed to be more consistent.")
ar = np.asanyarray(ar1).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar
if return_inverse or return_index:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag] | 8ac57d97079d60215dc96fd33d1f176129445662 | 31,388 |
def login_required(func):
"""
Decorator check required login and active user
:param func:
:return:
"""
@wraps(func)
def decorated_view(*args, **kwargs):
if current_app.login_manager._login_disabled:
return func(*args, **kwargs)
elif not current_user.is_authenticated and not current_user.is_active:
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorated_view | 894d162a8fd50c0e4fba810c0f575865994ba00e | 31,389 |
def prepare_statement(template, values):
"""Correctly escape things and keep as unicode.
pyscopg2 has a default encoding of `latin-1`: https://github.com/psycopg/psycopg2/issues/331"""
new_values = []
for value in values:
adapted = adapt(value)
adapted.encoding = 'utf-8'
new_values.append(adapted.getquoted().decode())
return template.format(*new_values) | 68af78444da86cdf73f74f84f5c7f0743b591e5c | 31,390 |
def addgroup(request):
"""Add group form."""
return render(
request,
'addgroup.htm',
context={},
) | dce8da2641b35bbdb1062463e9bc954b70c9d1d2 | 31,391 |
def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
"""Fixes the `_CachedFunction.__call__` signature to be correct.
It already has *almost* the correct signature, except:
1. the `self` argument needs to be marked as "bound";
2. any `cache_context` argument should be removed;
3. an optional keyword argument `on_invalidated` should be added.
"""
# First we mark this as a bound function signature.
signature = bind_self(ctx.default_signature)
# Secondly, we remove any "cache_context" args.
#
# Note: We should be only doing this if `cache_context=True` is set, but if
# it isn't then the code will raise an exception when its called anyway, so
# its not the end of the world.
context_arg_index = None
for idx, name in enumerate(signature.arg_names):
if name == "cache_context":
context_arg_index = idx
break
arg_types = list(signature.arg_types)
arg_names = list(signature.arg_names)
arg_kinds = list(signature.arg_kinds)
if context_arg_index:
arg_types.pop(context_arg_index)
arg_names.pop(context_arg_index)
arg_kinds.pop(context_arg_index)
# Third, we add an optional "on_invalidate" argument.
#
# This is a callable which accepts no input and returns nothing.
calltyp = CallableType(
arg_types=[],
arg_kinds=[],
arg_names=[],
ret_type=NoneType(),
fallback=ctx.api.named_generic_type("builtins.function", []),
)
arg_types.append(calltyp)
arg_names.append("on_invalidate")
arg_kinds.append(ARG_NAMED_OPT) # Arg is an optional kwarg.
signature = signature.copy_modified(
arg_types=arg_types,
arg_names=arg_names,
arg_kinds=arg_kinds,
)
return signature | 7b1b9893afe4f1e723eed7894b0adf9221c24d1d | 31,392 |
def load_valid_data_full():
"""
load validation data from disk
"""
hdf5_file_valid = h5py.File(HDF5_PATH_VALID, "r")
data_num_valid = hdf5_file_valid["valid_img"].shape[0]
images_valid = np.array(hdf5_file_valid["valid_img"][:]) # your test set features
labels_valid = np.array(hdf5_file_valid["valid_labels"][:]) # your test set labels
acns_valid = np.array(hdf5_file_valid["valid_acns"][:])
labels_valid = vol_inception_utils.convert_to_one_hot(labels_valid, 2).T
return images_valid, labels_valid, data_num_valid | bc586424e6fc2669107c7548220461a089bbad16 | 31,393 |
import json
def import_slab_structures(filename):
"""Read 2D water structures from file and return a dictionary of it.
Parameters
----------
filename : str
Filename of the file containing the bulk structures
Returns
-------
filedict : dict
Dictionary of the structures
"""
filedict = {}
with open(filename, "r") as o:
data = json.load(o)
for i, items in data.items():
filedict[i] = []
for v in items:
#struct = Poscar.from_file("./"+v[-1]).structure
struct = Poscar.from_file(('/').join(filename.split('/')[:-1]) + '/' + v[-1]).structure
filedict[i].append((v, struct))
return filedict | d6ca4d5c7b5c264d55cd26f638dfb4ec34bce259 | 31,394 |
import re
def handle_email(text):
"""Summary
Args:
text (TYPE): Description
Returns:
TYPE: Description
"""
return re.sub(r'(\w+@\w+)', Replacement.EMAIL.value, text) | c96e3f5791394d5200c309e5ea1a285aae85a3df | 31,395 |
def LowerCustomDatatypes():
"""Lower custom datatypes.
See tvm::datatypes::Registry for more information on adding custom datatypes.
Returns
-------
fpass : tvm.ir.transform.Pass
The result pass
"""
return _ffi_api.LowerCustomDatatypes() | cb55a578a3daabf6e95a64bc95ba75643d2f14bd | 31,396 |
import typing
def apply_if_or_value(
maybe_value: typing.Optional[typing.Any],
operation: typing.Callable[[typing.Any], typing.Any],
fallback_value: typing.Any,
) -> typing.Any:
"""Attempt to apply operation to maybe_value, returning fallback_value if
maybe_value is None.
Almost a convenience composition of or_value over apply_if, except that it
*will* return None and not fallback_value if invoking operation on
maybe_value returns None.
Parameters
----------
maybe_value
The value to test and feed to operation if it is not None.
operation: callable object
The operation to apply to maybe_value if it is not None.
fallback_value
The value to return if maybe_value is None.
Returns
-------
result
The result of invoking operation on maybe_value, or fallback_value.
"""
if maybe_value is not None:
return operation(maybe_value)
else:
return fallback_value | fe67fbc1b71ed22fa3da516df82a72cb64e30f33 | 31,397 |
import torch
def make_pyg_dataset_from_dataframe(
df: pd.DataFrame, list_n: list, list_e: list, paired=False, mode: str = "all"
) -> list:
"""Take a Dataframe, a list of strings of node features, a list of strings of edge features
and return a List of PyG Data objects.
Parameters
----------
df
DataFrame containing "protonated" and "deprotonated" columns with mol objects, as well as "pKa" and "marvin_atom" columns
list_n
list of node features to be used
list_e
list of edge features to be used
paired
If true, including protonated and deprotonated molecules, if False only the type specified in mode
mode
if paired id false, use data from columnname == mol
Returns
-------
list
contains all molecules from df as pyG Graph data
"""
print(f"Generating data with paired boolean set to: {paired}")
if paired is False and mode not in ["protonated", "deprotonated"]:
raise RuntimeError(f"Wrong combination of {mode} and {paired}")
selected_node_features = make_features_dicts(NODE_FEATURES, list_n)
selected_edge_features = make_features_dicts(EDGE_FEATURES, list_e)
if paired:
dataset = []
for i in df.index:
m = mol_to_paired_mol_data(
df.protonated[i],
df.deprotonated[i],
df.marvin_atom[i],
selected_node_features,
selected_edge_features,
)
m.reference_value = torch.tensor([df.pKa[i]], dtype=torch.float32)
m.ID = df.ID[i]
m.to(device=DEVICE) # NOTE: put everything on the GPU
dataset.append(m)
return dataset
else:
print(f"Generating data with {mode} form")
dataset = []
for i in df.index:
if mode == "protonated":
m, molecular_charge = mol_to_single_mol_data(
df.protonated[i],
df.marvin_atom[i],
selected_node_features,
selected_edge_features,
)
elif mode == "deprotonated":
m, molecular_charge = mol_to_single_mol_data(
df.deprotonated[i],
df.marvin_atom[i],
selected_node_features,
selected_edge_features,
)
else:
raise RuntimeError()
m.reference_value = torch.tensor([df.pKa[i]], dtype=torch.float32)
m.ID = df.ID[i]
m.to(device=DEVICE) # NOTE: put everything on the GPU
dataset.append(m)
return dataset | 267787c7b527a92421fa7e83ca75bf2a8083ec2a | 31,398 |
def ucfirst(string: str):
"""Return the string with the first character in upper case."""
return _change_first_case(string, upper=True) | 4f52744dc62f4db7437451de3120691bbb184298 | 31,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.