content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def bassGura(A, B, desiredPoles, C=None, E=None):
"""
use Bass-Gura formalism to compute gain matrix, G, that places poles of the closed-loop meta-system \hat A- \hat B*G
at desired locations
Inputs:
A (numpy matrix/array, type=real) - system state matrix
B (numpy matrix/array, type=real) - system control matrix
desiredPoles (numpy matrix/array, type=complex) - desired pole locations
C (numpy matrix/array, type=real) - (default=None) system measurement matrix
E (numpy matrix/array, type=real) - (default=None) exogeneous input matrix
Returns:
(numpy matrix/array, type=real) - full gain matrix
Raises:
TypeError - if the input matrices C and E are improper
ValueError - (reraises) if one of the called methods raises ValueError
"""
A = A.astype('float')
B = B.astype('float')
if all([inp is None for inp in (C, E)]):
return bassGuraNoExo(A, B, desiredPoles)
elif (C is None and not E is None) or (E is None and not C is None):
raise TypeError('C, E must either both be None or both be numpy arrays')
C = C.astype('float')
E = E.astype('float')
G = bassGuraNoExo(A, B, desiredPoles)
G0 = bassGuraExo(A, B, C, G, E)
return np.hstack([G, G0]) | df3dffdfddebd0681835aff1939103c210198852 | 3,629,300 |
import os
def dwarf(allstar,mhrange=[-2.5,1.0],loggrange=[3.8,5.5],teffrange=[3000,7500],apokasc_cat='APOKASC_cat_v4.4.2.fits',out='logg',calib=False) :
""" logg calibration for dwarfs, from asteroseismic and isochrones
"""
if calib :
param = 'PARAM'
else :
param = 'FPARAM'
gd=apselect.select(allstar,badval=['STAR_BAD'],mh=mhrange,logg=loggrange,teff=teffrange,raw=True)
allstar=allstar[gd]
try:
gd=np.where(allstar['VISIT'] == 0)[0]
allstar=allstar[gd]
except: pass
# match ASPCAP with APOKASC, and get RC/RGB stars
apokasc=fits.open(os.environ['APOGEE_DIR']+'/data/apokasc/'+apokasc_cat)[1].data
# strip off .XXXX if we have it, e.g. from calibration fields where we have added .FIELD
apogee_id = np.array(np.core.defchararray.split(allstar['APOGEE_ID'],'.').tolist())[:,0]
i1,i2=match.match(apogee_id,apokasc['2MASS_ID'])
# now get isochrone logg from lower main sequence
isologg=isochrone(allstar,snrbd=50)
isochrone_id = np.array(np.core.defchararray.split(isologg['APOGEE_ID'],'.').tolist())[:,0]
j1,j2=match.match(apogee_id,isochrone_id)
# plots of gravity differences
fig,ax=plots.multi(2,2)
plots.plotc(ax[0,0],allstar[param][i1,1],allstar[param][i1,1]-apokasc['LOGG_DW'][i2],allstar[param][i1,0],yr=[-1,1],
xt='log g',yt=r'$\Delta$logg',zt='Teff',colorbar=True,xr=[3,6],zr=[4000,7000])
plots.plotc(ax[0,1],allstar[param][i1,3],allstar[param][i1,1]-apokasc['LOGG_DW'][i2],allstar[param][i1,0],yr=[-1,1],
xt='[M/H]',yt=r'$\Delta$logg',zt='Teff',colorbar=True,xr=[-2,0.5],zr=[4000,7000])
plots.plotc(ax[1,0],allstar[param][i1,0],allstar[param][i1,1]-apokasc['LOGG_DW'][i2],10.**allstar[param][i1,2],yr=[-1,1],
xt='Teff',yt=r'$\Delta$logg',zt='vmicro',colorbar=True,xr=[3000,8000],zr=[0.5,2.5])
plots.plotc(ax[1,1],allstar[param][i1,0],allstar[param][i1,1]-apokasc['LOGG_DW'][i2],allstar[param][i1,3],yr=[-1,1],
xt='Teff',yt=r'$\Delta$logg',zt='[M/H]',colorbar=True,xr=[3000,8000],zr=[-2,0.5])
# only add main sequence in Teff plot
plots.plotc(ax[1,1],allstar[param][j1,0],allstar[param][j1,1]-isologg['ISOLOGG'][j2],allstar[param][j1,3],zr=[-2,0.5])
plt.tight_layout()
# 2D fit as f(Teff,[M/H]), using both APOKASC and isochrone log g
gd=np.where(apokasc['LOGG_DW'][i2] > -99)[0]
tfit=allstar[param][i1[gd],0]
mhfit=allstar[param][i1[gd],3]
diff=allstar[param][i1[gd],1]-apokasc['LOGG_DW'][i2[gd]]
snrfit=allstar['SNR'][i1[gd]]
# do fit from high S/N, but get uncertainties from all
gd=np.where(allstar['SNR'][j1] > 300)[0]
msfit = fit.fit2d(np.append(tfit,allstar[param][j1[gd],0]),
np.append(mhfit,allstar[param][j1[gd],3]),
np.append(diff,allstar[param][j1[gd],1]-isologg['ISOLOGG'][j2[gd]]),degree=1,reject=0.3)
# for uncertainties, all all S/N
tfit=np.append(tfit,allstar[param][j1,0])
mhfit=np.append(mhfit,allstar[param][j1,3])
diff=np.append(diff,allstar[param][j1,1]-isologg['ISOLOGG'][j2])
snrfit=np.append(snrfit,allstar['SNR'][j1])
mserrpar = err.errfit(tfit,np.clip(snrfit,0.,249.),mhfit,diff-msfit(tfit,mhfit),
out=out+'_ms',title='log g',zr=[0,0.2])
#mserrpar=np.zeros([4])
# plot the relation
tfit=np.arange(teffrange[0],teffrange[1],10)
mhfit=tfit*0.
plots.plotl(ax[1,1],tfit,msfit(tfit,mhfit),color='orange',linewidth=1.5)
mhfit=tfit*0-1.
plots.plotl(ax[1,1],tfit,msfit(tfit,mhfit),color='c',linewidth=1.5)
mhfit=tfit*0+0.5
plots.plotl(ax[1,1],tfit,msfit(tfit,mhfit),color='r',linewidth=1.5)
ax[1,1].grid()
if out is not None:
fig.savefig(out+'_dwarfs.png')
plt.close()
# HR diagram plot color coded by asteroseismic gravity differences
hrfig,hrax=plots.multi(1,2,hspace=0.001)
gd=np.where(apokasc['APOKASC2_LOGG'][i2] > -99)[0]
plots.plotc(hrax[0],allstar[param][i1[gd],0],allstar[param][i1[gd],1],allstar[param][i1[gd],1]-apokasc['APOKASC2_LOGG'][i2[gd]],
xr=[8000,3000],yr=[6,0],zr=[-0.5,0.5],colorbar=True,zt=r'$\Delta$ logg',xt='Teff',yt='logg')
plots.plotc(hrax[0],allstar[param][j1,0],allstar[param][j1,1],allstar[param][j1,1]-isologg['ISOLOGG'][j2],zr=[-0.5,0.5])
plots.plotc(hrax[1],allstar[param][i1[gd],0],apokasc['APOKASC2_LOGG'][i2[gd]],allstar[param][i1[gd],1]-apokasc['APOKASC2_LOGG'][i2[gd]],
xr=[8000,3000],yr=[6,0],zr=[-0.5,0.5],colorbar=True,zt=r'$\Delta$ logg',xt='Teff',yt='APOKASC logg')
# use asteroseismic logg on y axis
gd=np.where(apokasc['LOGG_DW'][i2] > -99)[0]
plots.plotc(hrax[0],allstar[param][i1[gd],0],allstar[param][i1[gd],1],allstar[param][i1[gd],1]-apokasc['LOGG_DW'][i2[gd]],
xr=[8000,3000],yr=[6,0],zr=[-0.5,0.5])
plots.plotc(hrax[1],allstar[param][i1[gd],0],apokasc['LOGG_DW'][i2[gd]],allstar[param][i1[gd],1]-apokasc['LOGG_DW'][i2[gd]],
xr=[8000,3000],yr=[6,0],zr=[-0.5,0.5])
if out is not None:
hrfig.savefig(out+'_all.png')
plt.close()
return {'calloggmin' : loggrange[0], 'calloggmax' : loggrange[1], 'loggmin' : loggrange[0], 'loggmax' : loggrange[1],
'mhmin' : mhrange[0], 'mhmax' : mhrange[1], 'temin': teffrange[0], 'temax' : teffrange[1],
'msfit' : msfit.parameters, 'errpar' : mserrpar } | 3fae8ce5dddafbde7d5dd64c56f50aa46a6e4717 | 3,629,301 |
def get_marks(record, transcript):
""" (str, str) -> list of float
Return a list of final course marks from transcript that can be used for
admission, computed based on information in record.
>>> record = 'Eyal de Lara,Fort McMurray Composite High,2016,MAT,90,92,ENG,92,NE,BIO,77,85,BSci'
>>> transcript = 'MAT,90,92,ENG,92,NE,BIO,77,85'
>>> get_marks(record, transcript)
[91.0, 92.0, 81.0]
>>> record = 'Paul Gries,Ithaca High School,1986,BIO,61,70,CHM,80,90,CAT,95,96,BEng'
>>> transcript = 'BIO,61,70,CHM,80,90,CAT,95,96'
>>> get_marks(record, transcript)
[65.5, 85.0]
"""
valid_admission_courses = ['BIO', 'CHM', 'ENG', 'MAT', 'SCI']
mark_list = []
for i in range(1, NUM_APPLICATION_COURSES + 1):
course = af.extract_course(transcript, i)
for course_code in valid_admission_courses:
both_marks = af.get_both_marks(course, course_code)
if len(both_marks) > 0:
course_mark = both_marks[:MARK_LENGTH]
exam_mark = both_marks[MARK_LENGTH + 1:]
final_mark = af.get_final_mark(record, course_mark, exam_mark)
mark_list.append(final_mark)
return mark_list | 5737dc0c05a20bc5bbdd5f10206b3eb3ed690a11 | 3,629,302 |
from typing import Any
from typing import Tuple
from re import T
def zip(*iterables: Any) -> "Iter[Tuple[T, ...]]":
""" Replacement for the builtin ``zip`` function. This version returns
an instance of Iter_ to allow further iterable chaining."""
return Iter(_zip(*iterables)) | 589c44a2976240c7bebb5d4d52203c081b6dce6c | 3,629,303 |
def loadConversations(fileName, lines,
fields=["character1ID", "character2ID", "movieID", "utteranceIDs"],
delimiter=" +++$+++ "):
"""
Args:
fileName (str): file to load
field (set<str>): fields to extract
Return:
dict<dict<str>>: the extracted fields for each line
"""
conversations = []
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(delimiter)
# Extract fields
convObj = {}
for i, field in enumerate(fields):
convObj[field] = values[i]
# Convert string to list (convObj["utteranceIDs"] == "['L598485', 'L598486', ...]")
lineIds = eval(convObj["utteranceIDs"])
# Reassemble lines
convObj["lines"] = []
for lineId in lineIds:
convObj["lines"].append(lines[lineId])
conversations.append(convObj)
return conversations | 9f6da36dd648d1b64cabc7725701a3704dc938a9 | 3,629,304 |
from typing import OrderedDict
def optimize_schedule_rotations(schedule, sregistry):
"""
Transform the schedule such that the tensor temporaries "rotate" along
the outermost Dimension. This trades a parallel Dimension for a smaller
working set size.
"""
# The rotations Dimension is the outermost
ridx = 0
rmapper = defaultdict(list)
processed = []
for k, group in groupby(schedule, key=lambda i: i.writeto):
g = list(group)
try:
candidate = k[ridx]
except IndexError:
# Degenerate alias (a scalar)
processed.extend(g)
continue
d = candidate.dim
try:
ds = schedule.dmapper[d]
except KeyError:
# Can't do anything if `d` isn't a BlockDimension
processed.extend(g)
continue
n = candidate.min_size
assert n > 0
iis = candidate.lower
iib = candidate.upper
ii = ModuloDimension('%sii' % d, ds, iis, incr=iib)
cd = CustomDimension(name='%s%s' % (d, d), symbolic_min=ii, symbolic_max=iib,
symbolic_size=n)
dsi = ModuloDimension('%si' % ds, cd, cd + ds - iis, n)
mapper = OrderedDict()
for i in g:
# Update `indicess` to use `xs0`, `xs1`, ...
mds = []
for indices in i.indicess:
v = indices[ridx]
try:
md = mapper[v]
except KeyError:
name = sregistry.make_name(prefix='%sr' % d.name)
md = mapper.setdefault(v, ModuloDimension(name, ds, v, n))
mds.append(md)
indicess = [indices[:ridx] + [md] + indices[ridx + 1:]
for md, indices in zip(mds, i.indicess)]
# Update `writeto` by switching `d` to `dsi`
intervals = k.intervals.switch(d, dsi).zero(dsi)
sub_iterators = dict(k.sub_iterators)
sub_iterators[d] = dsi
writeto = IterationSpace(intervals, sub_iterators)
# Transform `alias` by adding `i`
pivot = i.pivot.xreplace({d: d + cd})
# Extend `ispace` to iterate over rotations
d1 = writeto[ridx+1].dim # Note: we're by construction in-bounds here
intervals = IntervalGroup(Interval(cd, 0, 0), relations={(d, cd, d1)})
rispace = IterationSpace(intervals, {cd: dsi}, {cd: Forward})
aispace = i.ispace.zero(d)
aispace = aispace.augment({d: mds + [ii]})
ispace = IterationSpace.union(rispace, aispace)
processed.append(ScheduledAlias(pivot, writeto, ispace, i.aliaseds,
indicess, i.score))
# Update the rotations mapper
rmapper[d].extend(list(mapper.values()))
return schedule.rebuild(*processed, rmapper=rmapper) | 2a36d58774e5e5bad6893815fa9b058bdb93c70e | 3,629,305 |
def parse_config(config_fn):
"""Deprecated.
Called from pbsmrtpipe, for now.
"""
return parse_cfg_file(config_fn) | c6e0505b2d664c5302ccf8a85e72571429f6326d | 3,629,306 |
def transformer_decoder_layers(name,
n_layers,
decoder_input,
**kwargs):
"""A transformation block composed of transformer decoder layers."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
hparams = kwargs["hparams"]
outputs = decoder_input
with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
for layer_idx in range(n_layers):
outputs = transformer_decoder_layer(
decoder_input=outputs,
layer_idx=layer_idx,
**kwargs)
outputs = common_layers.layer_preprocess(outputs, hparams)
return outputs | 7d47acb38cd1f6b0e77b4901ab80c040c169b331 | 3,629,307 |
import os
import logging
import re
def group_filenames(filenames):
"""Given a list of JWST filenames, group together files from the
same exposure. These files will share the same ``program_id``,
``observation``, ``visit``, ``visit_group``, ``parallel_seq_id``,
``activity``, ``exposure``, and ``suffix``. Only the ``detector``
will be different. Currently only NIRCam files for a given exposure
will be grouped together. For other instruments multiple files for
a given exposure will be kept separate from one another and no
mosaic will be made. Stage 3 files will remain as individual
files, and will not be grouped together with any other files.
Parameters
----------
filenames : list
list of filenames
Returns
-------
grouped : list
grouped list of filenames where each element is a list and
contains the names of filenames with matching exposure
information.
"""
# Some initializations
grouped, matched_names = [], []
filenames.sort()
# Loop over each file in the list of good files
for filename in filenames:
# Holds list of matching files for exposure
subgroup = []
# Generate string to be matched with other filenames
try:
filename_dict = filename_parser(os.path.basename(filename))
except ValueError:
logging.warning('Could not parse filename for {}'.format(filename))
break
# If the filename was already involved in a match, then skip
if filename not in matched_names:
# For stage 3 filenames, treat individually
if 'stage_3' in filename_dict['filename_type']:
matched_names.append(filename)
subgroup.append(filename)
# Group together stage 1 and 2 filenames
elif filename_dict['filename_type'] == 'stage_1_and_2':
# Determine detector naming convention
if filename_dict['detector'].upper() in NIRCAM_SHORTWAVE_DETECTORS:
detector_str = 'NRC[AB][1234]'
elif filename_dict['detector'].upper() in NIRCAM_LONGWAVE_DETECTORS:
detector_str = 'NRC[AB]5'
else: # non-NIRCam detectors
detector_str = filename_dict['detector'].upper()
# Build pattern to match against
base_output_name = get_base_output_name(filename_dict)
match_str = '{}{}_{}.fits'.format(base_output_name, detector_str, filename_dict['suffix'])
match_str = os.path.join(os.path.dirname(filename), match_str)
pattern = re.compile(match_str, re.IGNORECASE)
# Try to match the substring to each good file
for file_to_match in filenames:
if pattern.match(file_to_match) is not None:
matched_names.append(file_to_match)
subgroup.append(file_to_match)
else:
# filename_dict['filename_type'] may be 'guider' or 'time_series', for instance. Treat individually.
matched_names.append(filename)
subgroup.append(filename)
if len(subgroup) > 0:
grouped.append(subgroup)
return grouped | 15e002ac6d34c54f3116d3232a7b253a5567a1a2 | 3,629,308 |
def check_can_unpublish_activity(user, activity_rights):
"""Checks whether the user can unpublish given activity.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
activity_rights: ActivityRights or None. Rights object for the given
activity.
Returns:
bool. Whether the user can unpublish given activity.
"""
if activity_rights is None:
return False
if activity_rights.community_owned:
return False
if activity_rights.is_published():
if role_services.ACTION_UNPUBLISH_ANY_PUBLIC_ACTIVITY in user.actions:
return True
return False | 6de5451497e75cb369fab5b1c8d09d809fa41929 | 3,629,309 |
def gql_add_user_attendance(time: str, user_id: int = None, date: str = None, attendance_id: int = None,
is_clock_in: bool = True, comment: str = None):
"""
GQL mutation to insert/update attendance
:param time: time string in HH:MM:SS format
:param user_id: unique user id
:param date: date string in YYYY-MM-DD format
:param attendance_id: unique attendance id
:param is_clock_in: toggle for insert attendance (clock_in) or update attendance (clock_out), default True i.e. insert attendance
:param comment: comment on attendance record
:return: GQL result dict
"""
if is_clock_in:
# attendance clock in => create new record
if user_id is None or date is None or time is None:
return "ERROR: arguments missing, user_id, date and time are required."
query = gql(
'''
mutation addNewAttendance($user_id: Int!, $time: time!, $date: date!, $comment: String) {
insert_attendance_one(object: {clock_in: $time, user_id: $user_id, date: $date, comment: $comment}) {
id
user_id
date
}
}
'''
)
variables = {
"user_id": user_id,
"date": date,
"time": time,
"comment": comment
}
else:
# attendance clock out => update existing record using attendance record id
if attendance_id is None or time is None:
return "ERROR: arguments missing, attendance_id and time are required."
query = gql(
'''
mutation updateAttendance($id: Int!, $time: time!, $comment: String) {
update_attendance_by_pk(pk_columns: {id: $id}, _set: {clock_out: $time, comment: $comment}) {
id
user_id
date
}
}
'''
)
variables = {
"id": attendance_id,
"time": time,
"comment": comment
}
try:
result = client.execute(query, variable_values=variables)
return result
except Exception as error:
return error | 7dd070824486408dcdab15fa29f385a83d2ad826 | 3,629,310 |
def make_pipeline(steps, params):
"""
Args:
steps (dict): (name, module_name, method_name) Tuples to specify steps of the pipeline to fit.
params (dict): string -> object. Parameters passed to the fit method of
each step, where each parameter name is prefixed
such that parameter `p` for step `s` has key `s__p`.
Returns:
pipeline: sklearn Pipeline object.
"""
step_estimators = []
for step_name, step_fullname in steps.items():
step_estimator = make_object(step_fullname)
step_estimators.append((step_name, step_estimator))
pipeline = Pipeline(steps=step_estimators)
try:
pipeline.set_params(**params)
except ValueError:
raise ValueError("Could not set params of pipeline. Check the validity. ")
return pipeline | feffe00156f0735bf0a59ef6cb0733b3f332a453 | 3,629,311 |
def format_fields(field_data, include_empty=True):
"""Format field labels and values.
Parameters
----------
field_data : |list| of |tuple|
2-tuples of field labels and values.
include_empty : |bool|, optional
Whether fields whose values are |None| or an empty |str| should
be included in the formatted fields.
Returns
-------
str
Formatted field labels and values.
Examples
--------
>>> field_data = [('Name', 'Jane'), ('Age', 30), ('DOB', None)]
>>> format_fields(field_data, include_empty=True)
Name: Jane
Age: 30
DOB: None
>>> field_data = [('Name', 'Jane'), ('Age', 30), ('DOB', None)]
>>> format_fields(field_data, include_empty=False)
Name: Jane
Age: 30
"""
max_label = 0
for (label, value) in field_data:
label_length = len(label)
if label_length > max_label:
max_label = label_length
fields = []
for (label, value) in field_data:
empty = str(value).strip() in ['', 'None']
if not empty or include_empty:
label_length = len(label.strip())
extra_spaces = ' ' * (max_label - label_length)
label_sep = ':' + extra_spaces + ' '
joined_field = label_sep.join([label, str(value)])
fields.append(joined_field)
return '\n'.join(fields) | cce4b5279e01c33fec0f83c6f86141c33012fc4c | 3,629,312 |
def loadDecomposition(name):
"""Load a tree decomposition in the PACE-2016 format"""
B = 0 # number of bags
TW = 0
Bags = []
f = open(name, "r")
lines = f.readlines()
for l in lines:
s = l.split()
if len(s) < 1:
continue
if s[0] == "c":
continue
elif s[0] == "s":
B = int(s[2])
TW = int(s[3])
Bags = [Bag(x) for x in range(B + 1)]
elif s[0] == "b":
b = int(s[1])
l = set([int(x) for x in s[2:]])
Bags[b].bag = l
else:
(x, y) = (int(s[0]), int(s[1]))
Bags[x].children.add(y)
Bags[y].children.add(x)
f.close()
setParents(Bags, 1)
return Bags | 7463ee6117bacc2fe07f6937fbdbd35073fea5f0 | 3,629,313 |
def testing_report():
""" Testing Site Daily Summary Report: RESTful CRUD Controller """
return crud_controller() | 9dbe01c4e5cd490dc3dd9b507320efd7bce2c798 | 3,629,314 |
def virtual_network_present(
name,
address_prefixes,
resource_group,
dns_servers=None,
tags=None,
connection_auth=None,
**kwargs
):
"""
.. versionadded:: 2019.2.0
Ensure a virtual network exists.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param address_prefixes:
A list of CIDR blocks which can be used by subnets within the virtual network.
:param dns_servers:
A list of DNS server addresses.
:param tags:
A dictionary of strings can be passed as tag metadata to the virtual network object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: vnet1
- resource_group: group1
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
ret[
"comment"
] = "Connection information must be specified via connection_auth dictionary!"
return ret
vnet = __salt__["azurearm_network.virtual_network_get"](
name, resource_group, azurearm_log_level="info", **connection_auth
)
if "error" not in vnet:
tag_changes = __utils__["dictdiffer.deep_diff"](
vnet.get("tags", {}), tags or {}
)
if tag_changes:
ret["changes"]["tags"] = tag_changes
dns_changes = set(dns_servers or []).symmetric_difference(
set(vnet.get("dhcp_options", {}).get("dns_servers", []))
)
if dns_changes:
ret["changes"]["dns_servers"] = {
"old": vnet.get("dhcp_options", {}).get("dns_servers", []),
"new": dns_servers,
}
addr_changes = set(address_prefixes or []).symmetric_difference(
set(vnet.get("address_space", {}).get("address_prefixes", []))
)
if addr_changes:
ret["changes"]["address_space"] = {
"address_prefixes": {
"old": vnet.get("address_space", {}).get("address_prefixes", []),
"new": address_prefixes,
}
}
if kwargs.get("enable_ddos_protection", False) != vnet.get(
"enable_ddos_protection"
):
ret["changes"]["enable_ddos_protection"] = {
"old": vnet.get("enable_ddos_protection"),
"new": kwargs.get("enable_ddos_protection"),
}
if kwargs.get("enable_vm_protection", False) != vnet.get(
"enable_vm_protection"
):
ret["changes"]["enable_vm_protection"] = {
"old": vnet.get("enable_vm_protection"),
"new": kwargs.get("enable_vm_protection"),
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Virtual network {} is already present.".format(name)
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Virtual network {} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"resource_group": resource_group,
"address_space": {"address_prefixes": address_prefixes},
"dhcp_options": {"dns_servers": dns_servers},
"enable_ddos_protection": kwargs.get("enable_ddos_protection", False),
"enable_vm_protection": kwargs.get("enable_vm_protection", False),
"tags": tags,
},
}
if __opts__["test"]:
ret["comment"] = "Virtual network {} would be created.".format(name)
ret["result"] = None
return ret
vnet_kwargs = kwargs.copy()
vnet_kwargs.update(connection_auth)
vnet = __salt__["azurearm_network.virtual_network_create_or_update"](
name=name,
resource_group=resource_group,
address_prefixes=address_prefixes,
dns_servers=dns_servers,
tags=tags,
**vnet_kwargs
)
if "error" not in vnet:
ret["result"] = True
ret["comment"] = "Virtual network {} has been created.".format(name)
return ret
ret["comment"] = "Failed to create virtual network {}! ({})".format(
name, vnet.get("error")
)
return ret | fea32c4afa44b1e1c1b9d72290e4892e9dd2c484 | 3,629,315 |
def parse_config_yaml(current_config, first_run=False):
"""This function parses the configuration values contained in the
configuration dictionary obtained by reading the YAML file."""
config_options = current_config
used_options = {}
if config_options is None:
return validate_config(
cfg_error=True, cfg_error_ctx="The configuration file cannot be empty!"
)
# Aggressively parsing the current configuration options allowing
# neither empty nor unknown options
for key, value in config_options.items():
if key == "general" and isinstance(value, dict):
for gen_key, gen_value in config_options[key].items():
if gen_key == "log file path" and not isinstance(gen_value, dict):
used_options[gen_key] = gen_value
elif gen_key == "download" and not isinstance(gen_value, dict):
used_options[gen_key] = gen_value
elif gen_key == "image editing" and not isinstance(gen_value, dict):
used_options[gen_key] = gen_value
elif gen_key == "font" and not isinstance(gen_value, dict):
used_options[gen_key] = gen_value
elif gen_key == "update frequency" and not isinstance(gen_value, dict):
used_options[gen_key] = gen_value
else:
raise ParseError(
"Unknown or malformatted option <%s> in %s section!"
% (gen_key, key)
if not isinstance(gen_value, dict)
else "Invalid value <%s> for option <%s> in '%s' section!"
% (gen_value, gen_key, key)
)
elif key == "interface" and isinstance(config_options[key], dict):
for inter_key, inter_value in value.items():
if inter_key == "api" and isinstance(inter_value, dict):
for api_key, api_value in value[inter_key].items():
if api_key == "google search api key" and not isinstance(
api_value, dict
):
used_options[api_key] = api_value
elif api_key == "imdb custom search id" and not isinstance(
api_value, dict
):
used_options[api_key] = api_value
else:
raise ParseError(
"Unknown or malformatted option <%s> in %s section!"
% (api_key, key)
if not isinstance(api_value, dict)
else "Invalid value <%s> for option <%s> in '%s' section!"
% (api_value, api_key, key)
)
else:
raise ParseError(
"Unknown or malformatted option <%s> in '%s' section!"
% (inter_key, key)
)
else:
raise ParseError(
"Unknown or malformatted section <%s> in the configuration file!" % key
)
return validate_config(used_options, check_api=first_run) | 74f021f3f8b1c290d32527b931da863a23958706 | 3,629,316 |
def acf(x, length=20):
""" Compute autocorrelation. """
return np.array([1] + [np.corrcoef(x[:-i], x[i:])[0,1] for i in range(1, length)]) | 7d32fdc2365b621b53c4c91ff4191781f0350c02 | 3,629,317 |
def normalizeInfinity(a):
"""
Normalize array a so that the maximum absolute value is 1.
Parameters
----------
a : ndarray of float
The array to be normalized.
Returns
-------
ndarray of float, same shape as a
The normalized array.
"""
return a / xplib.xp.max(xplib.xp.abs(a)) | 449e437925f28181aacd62d22896edb8a3670c6a | 3,629,318 |
def getConfigPath():
"""
Returns the config location (either default or user defined)
"""
global args, ConfigPathDefault
if args.config_location:
return args.config_location
return ConfigPathDefault | b53f1dc76f08859bd261f5f76ab6832a5193fe5a | 3,629,319 |
import re
def only_bf(txt):
""" Strip a string from all characters, except brainfuck chars """
return re.sub(r"[^\.,<>\+-\]\[]", "", txt) | 8c32b11d511f5c7b92d7454dcbfea09627ddf172 | 3,629,320 |
import subprocess
def run_hidef_cmd(cmd):
"""
Runs hidef command as a command line process
:param cmd_to_run: command to run as list
:type cmd_to_run: list
:return: (return code, standard out, standard error)
:rtype: tuple
"""
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return p.returncode, out, err | 10ccaefedf262039d2cd7725fc3e8cb0c7f904d6 | 3,629,321 |
def create_ice_connection(user_token):
"""Creates an instance of the ICE API using common settings."""
# Use getattr to load settings without raising AttributeError
key_id = getattr(settings, "ICE_KEY_ID", None)
url = getattr(settings, "ICE_URL", None)
verify = getattr(settings, "ICE_VERIFY_CERT", False)
timeout = getattr(settings, "ICE_REQUEST_TIMEOUT", None)
if key_id and url:
try:
auth = HmacAuth(key_id=key_id, username=user_token)
ice = IceApi(auth=auth, base_url=url, verify_ssl_cert=verify)
if timeout:
ice.timeout = timeout
ice.write_enabled = True
return ice
except Exception as e:
logger.error("Failed to create ICE connection: %s", e)
return None | 04a81fd1c1b6a55d68bc854e942c8cbd2b5e3306 | 3,629,322 |
def make_uris(sids, start, end):
"""Make the URIs for all stations
Args:
sids (list): List of IACO station identifiers (str)
start (str): Starting date in YYYY-mm-dd
end (str): ending date in YYYY-mm-dd
Returns:
a list of URIs, one for each dataset
"""
service = BASE_URI + "cgi-bin/request/asos.py?"
# speed (mph), direction, and gust
service += "data=sped&data=drct&data=gust_mph&"
# comma-separated (no debug header) no latlot or elev
service += "&tz=Etc/UTC&format=onlycomma&latlon=no&elev=no&"
# get the hourly "routine + SPECals" (report type 2)
# report type 1 is MADIS HFMETAR
service += "report_type=2&"
# add start and ending dates
start, end = start.split("-"), end.split("-")
service += f"year1={start[0]}&month1={start[1]}&day1={start[2]}&"
service += f"year2={end[0]}&month2={end[1]}&day2={end[2]}&"
return [service + f"station={sid}" for sid in sids] | 86b891dd09989c60ed94368a928f280e809d5791 | 3,629,323 |
import os
def is_valid_slack_app(qs):
"""
Checks to see if the request is being made by a valid Slack app.
"""
data = parse_qs(qs)
return safe_list_get(data["api_app_id"], 0) == os.environ.get("SLACK_APP_ID") | 91628d0453c073b5a749eea6cfdea3b6f0ffdccd | 3,629,324 |
def sr_inverse_org(J, k=1.0):
"""Return SR-inverse of given J
Definition of SR-inverse is following.
:math:`J^* = J^T(JJ^T + kI_m)^{-1}`
Parameters
----------
J : numpy.ndarray
jacobian
k : float
coefficients
Returns
-------
sr_inverse : numpy.ndarray
calculated SR-inverse
"""
r, _ = J.shape
return np.matmul(J.T,
np.linalg.inv(np.matmul(J, J.T) + k * np.eye(r))) | 63d2dc829be258c32ba00f345124a0d0a36472ea | 3,629,325 |
def get_embeddingset(veclist, textlist):
"""gets a whatlies.embeddingset from the encoding given by the language model
Args:
veclist (numpy.ndarray): ndarray of all encodings
textlist (list): vector of encoded texts
Returns:
whatlies.EmbeddingSet: whatlies EmbeddingSet for easier transformation
"""
return EmbeddingSet(*[get_embedding(veclist[q], textlist[q]) for q in range(len(textlist))]) | e5a48d817d5195ce5f6c02f8d66453e22821fa3d | 3,629,326 |
from typing import Sequence
from typing import List
def render_quoted_form(compiler, form, level):
"""
Render a quoted form as a new hy Expression.
`level` is the level of quasiquoting of the current form. We can
unquote if level is 0.
Returns a two-tuple (`expression`, `splice`).
The `splice` return value is used to mark `unquote-splice`d forms.
We need to distinguish them as want to concatenate them instead of
just nesting them.
"""
op = None
if isinstance(form, Expression) and form and isinstance(form[0], Symbol):
op = unmangle(mangle(form[0]))
if level == 0 and op in ("unquote", "unquote-splice"):
if len(form) != 2:
raise HyTypeError(
"`%s' needs 1 argument, got %s" % op,
len(form) - 1,
compiler.filename,
form,
compiler.source,
)
return form[1], op == "unquote-splice"
elif op == "quasiquote":
level += 1
elif op in ("unquote", "unquote-splice"):
level -= 1
hytype = form.__class__
name = ".".join((hytype.__module__, hytype.__name__))
body = [form]
if isinstance(form, Sequence):
contents = []
for x in form:
f_contents, splice = render_quoted_form(compiler, x, level)
if splice:
f_contents = Expression(
[
Symbol("unpack-iterable"),
Expression([Symbol("or"), f_contents, List()]),
]
)
contents.append(f_contents)
body = [List(contents)]
if isinstance(form, FString) and form.brackets is not None:
body.extend([Keyword("brackets"), form.brackets])
elif isinstance(form, FComponent) and form.conversion is not None:
body.extend([Keyword("conversion"), String(form.conversion)])
elif isinstance(form, Symbol):
body = [String(form)]
elif isinstance(form, Keyword):
body = [String(form.name)]
elif isinstance(form, String):
if form.brackets is not None:
body.extend([Keyword("brackets"), form.brackets])
ret = Expression([Symbol(name), *body]).replace(form)
return ret, False | e3ec7a37e1d00fb185a7bfd7e22d07716a33213c | 3,629,327 |
def get_index_portfolio_deposit_file(date: str, ticker: str) -> list:
"""지수구성종목을 리스트로 반환
Args:
date (str): 조회 일자 (YYMMDD)
ticker (str): 인덱스 ticker
Returns:
list: ['005930', '000660', '051910', ...]
"""
df = 지수구성종목().fetch(date, ticker[1:], ticker[0])
if df.empty:
return []
return df['ISU_SRT_CD'].tolist() | 2336fcf17e1bd6c00581c41e41354f3e50bc7b59 | 3,629,328 |
from typing import Optional
def get_account_by_path(
path_string: str, dongle: Optional[Dongle] = None
) -> LedgerAccount:
"""Return an account for a specific `BIP-44`_ derivation path
:param path_string: (:code:`str`) - HID derivation path for the account to
sign with.
:param dongle: (:class:`ledgerblue.Dongle.Dongle`) - The Dongle instance to
use to communicate with the Ledger device
:return: :class:`ledgereth.objects.LedgerAccount` instance for the given
account
.. _`BIP-44`: https://en.bitcoin.it/wiki/BIP_0044
"""
dongle = init_dongle(dongle)
path = parse_bip32_path(path_string)
lc = len(path).to_bytes(1, "big")
data = (len(path) // 4).to_bytes(1, "big") + path
response = dongle_send_data(dongle, "GET_ADDRESS_NO_CONFIRM", data, Lc=lc)
return LedgerAccount(path_string, decode_response_address(response)) | 043ac11f355c939ce18c5b0ee675454a863a0e45 | 3,629,329 |
import os
def _search_path(file_path):
"""Completes the file path if given a filename and not a directory. If only given a file name and
not a directory, the system will search for it in the library folder. The library folder is defined
either in rosparam:`/baxter/playback_library_dir` or if doesn't exist. Defaults to
'baxter_general_toolkit/trajectory_playback_library'
"""
if os.path.split(file_path)[0] == "":
#This means that this isn't a directory
playback_directory = rospy.get_param('/baxter/playback_library_dir',
os.path.join(rospkg.RosPack().get_path('baxter_general_toolkit'), 'trajectory_playback_library'))
file_path = os.path.join(playback_directory, file_path)
return file_path | 60303fc28b7c8a7c2f6b0d3f0e8597e98f0839c9 | 3,629,330 |
def down_capture_nb(returns, factor_returns, ann_factor):
"""2-dim version of `down_capture_1d_nb`."""
result = np.empty(returns.shape[1], dtype=np.float_)
for col in range(returns.shape[1]):
result[col] = down_capture_1d_nb(returns[:, col], factor_returns[:, col], ann_factor)
return result | f59aeda7483dcf5b615223fa34a1a4fc0a5d10f4 | 3,629,331 |
import re
import string
def validate_word(word, text):
"""Check if something is a valid "word" submission with previous existing text.
Return (valid, formatted_word, message), where valid is a boolean, formatted_word is
the word ready to be added to existing text (adding a space if applicable for example), and
message is an error message if the word was not valid.
It can be a word, or ?, !, . for now. Can make it a little more complicated later."""
if not text:
if re.fullmatch("[a-zA-Z']+", word):
return (True, string.capwords(word), "")
else:
return (False, "", "Story must begin with a word")
if word == "":
return (False, "", "You have to write something!")
if re.fullmatch("[a-zA-Z']+", word):
if text[-1] in ["?", ".", "!", "\n"]:
return (True, (' ' + string.capwords(word)), "")
else:
return (True, (' ' + word), "")
if re.fullmatch("\-[a-zA-Z']+", word):
if not text[-1].isalpha():
return (False, "", "You can only hyphenate after a word.")
if re.search("\-'", word):
return(False, "", "An apostrophe cannot directly follow a hyphen.")
else:
return (True, word, "")
if re.search(",", word):
if re.fullmatch(", [a-zA-Z']+", word):
if text[-1].isalpha():
return (True, word, "")
else:
return (False, "", "A comma can only come after a word.")
else:
return (False, "", "Invalid comma use.")
if word in ["?", ".", "!"]:
if text[-1].isalpha():
return (True, word, "")
else:
return (False, "", "Sentence-ending punctuation can only go after a word.")
if " " in word:
return (False, "", "Word cannot contain spaces except after a comma.")
else:
return (False, "", "Not a valid word for some reason (disallowed characters?)") | 658873c8cbf446cbe53ec5f806db668ceecaa2cf | 3,629,332 |
def process_content_updates(results):
"""Process Content Updates
Args:
results (Element): XML results from firewall
Returns:
max_app_version (str): A string containing the latest App-ID version
"""
app_version_list = []
version_list = results.findall('./result/content-updates/entry')
for version in version_list:
app_version = version.find('./version').text
app_version_list.append(app_version)
max_app_version = max(app_version_list)
return max_app_version | 021c9ac9246034874a1fe274fb49aabfa0f15d61 | 3,629,333 |
import pickle
def load_synthetic(name):
"""
Loads expression data from pickle file with the given name (produced by save_synthetic function)
:param name: name of the pickle file in SYNTHETIC_DIR containing the expression data
:return: np.array of expression with Shape=(nb_samples, nb_genes) and list of gene symbols matching the columns
of expr
"""
file = '{}/{}.pkl'.format(SYNTHETIC_DIR, name)
with open(file, 'rb') as f:
data = pickle.load(f)
return data['expr'], data['gene_symbols'] | 62d85dd427b0e233572eab44d43ecef33e9e2d23 | 3,629,334 |
def cell_count(ring):
"""
>>> cell_count(0) == contour_len(0)
True
>>> cell_count(1) == contour_len(0) + contour_len(1)
True
>>> cell_count(2) == contour_len(0) + contour_len(1) + contour_len(2)
True
>>> cell_count(2)
25
>>> cell_count(3)
49
"""
if ring == 0:
return 1
else:
return 1 + 4 * (ring + 1) * ring | 90eaaaea4544f0db6f3216bea4971ce82004a9c4 | 3,629,335 |
def exponential_search(ordered_list, x):
"""
implementation of an exponential search algorithm
taken from: https://en.wikipedia.org/wiki/Exponential_search
"""
list_size = len(ordered_list)
bound = 1
while bound*2 < list_size and ordered_list[2*bound-1] < x:
bound = bound * 2
return binary_search_tn(ordered_list, x, bound) | 5cf0870c00807405a5b4df8811b9ecf95aa34aaa | 3,629,336 |
import base64
def verify(payload, signature, public_key):
"""
Verify payload using (base64 encoded) signature and verification key. public_key should be obtained from load_public_key
Uses RSA-PSS with SHA-512 and maximum salt length.
The corresponding openssl command to create signatures that this function can verify is:
::
openssl dgst -sha512 -sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:-2 -sign <PRIVATE_KEY> <PAYLOAD> | openssl base64 -A
:param payload: the payload
:type payload: str
:param signature: base64 encoded signature
:type signature: bytes
:param public_key: a public key from ``lib50.crypto.load_public_key``
:return: True iff the payload could be verified
:type: bool
"""
try:
public_key.verify(signature=base64.b64decode(signature),
data=payload,
padding=padding.PSS(
mgf=padding.MGF1(hashes.SHA512()),
salt_length=padding.PSS.MAX_LENGTH),
algorithm=hashes.SHA512())
except InvalidSignature:
return False
return True | 10491642bf5f793d776931ae212141c395049d86 | 3,629,337 |
import os
def get_models_from_api_spec(
api_spec: dict, model_dir: str = "/mnt/model"
) -> CuratedModelResources:
"""
Only effective for predictor:model_path, predictor:models:paths or for predictor:models:dir when the dir is a local path.
It does not apply for when predictor:models:dir is set to an S3 model path.
"""
predictor = api_spec["predictor"]
if not predictor["model_path"] and not predictor["models"]:
return CuratedModelResources([])
predictor_type = predictor_type_from_api_spec(api_spec)
# for predictor.model_path
models = []
if predictor["model_path"]:
model = {
"name": cortex.consts.SINGLE_MODEL_NAME,
"model_path": predictor["model_path"],
"signature_key": predictor["signature_key"],
}
models.append(model)
# for predictor.models.paths
if predictor["models"] and predictor["models"]["paths"]:
for model in predictor["models"]["paths"]:
models.append(
{
"name": model["name"],
"model_path": model["model_path"],
"signature_key": model["signature_key"],
}
)
# building model resources for predictor.model_path or predictor.models.paths
model_resources = []
for model in models:
model_resource = {}
model_resource["name"] = model["name"]
model_resource["s3_path"] = model["model_path"].startswith("s3://")
model_resource["gcs_path"] = model["model_path"].startswith("gs://")
model_resource["local_path"] = (
not model_resource["s3_path"] and not model_resource["gcs_path"]
)
if not model["signature_key"] and predictor["models"]:
model_resource["signature_key"] = predictor["models"]["signature_key"]
else:
model_resource["signature_key"] = model["signature_key"]
if model_resource["s3_path"] or model_resource["gcs_path"]:
model_resource["model_path"] = model["model_path"]
_, versions, _, _, _, _, _ = find_all_cloud_models(
False, "", predictor_type, [model_resource["model_path"]], [model_resource["name"]]
)
if model_resource["name"] not in versions:
continue
model_resource["versions"] = versions[model_resource["name"]]
else:
model_resource["model_path"] = os.path.join(model_dir, model_resource["name"])
model_resource["versions"] = os.listdir(model_resource["model_path"])
model_resources.append(model_resource)
# building model resources for predictor.models.dir
if (
predictor["models"]
and predictor["models"]["dir"]
and not predictor["models"]["dir"].startswith("s3://")
and not predictor["models"]["dir"].startswith("gs://")
):
for model_name in os.listdir(model_dir):
model_resource = {}
model_resource["name"] = model_name
model_resource["s3_path"] = False
model_resource["gcs_path"] = False
model_resource["local_path"] = True
model_resource["signature_key"] = predictor["models"]["signature_key"]
model_resource["model_path"] = os.path.join(model_dir, model_name)
model_resource["versions"] = os.listdir(model_resource["model_path"])
model_resources.append(model_resource)
return CuratedModelResources(model_resources) | 65899ac0d5cbc63a35eb91d71335367615b7e2a6 | 3,629,338 |
from re import T
import torch
def prepare_segment(net, source, dev='cuda'):
"""
Prepares target image before foreground and background separation
Parameters:
net (pytorch_vision.model): Segmentation Model
source (cv2.Mat): Hand image
dev (str): Computation device, default GPU
Returns:
(cv2.Mat): Hand image with new bg
"""
# CV2 to PIL Image
img = cv2.cvtColor(source, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img)
# Standardized image size & normalize for better inference
transformer = T.Compose([T.Resize(400),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
img_transformed = transformer(img).unsqueeze(0).to(dev)
# Copy data image into computation device memory
img_model = net.to(dev)(img_transformed)['out']
# Compute using CPU tensor convert torch tensor to numpy array
img_tensor = torch.argmax(img_model.squeeze(), dim=0).detach().cpu().numpy()
return decode_segment_map(img_tensor, source) | ec5ebdb822494c655b674d34a86345b258501512 | 3,629,339 |
import csv
import json
def extract_information_per_turker(filename):
"""
Extracts dictionary with turker's details
Returns:
user_information: dictionary with user details
"""
user_information = {}
with open(filename) as file:
csv_reader = csv.DictReader(file)
for row in csv_reader:
worker_id = row['WorkerId']
location_info = json.loads(row['Answer.ClientLocation'])
quality_info = json.loads(row['Answer.CountTries'])
if worker_id not in user_information:
if 'backUpLocation' in location_info:
user_information[worker_id] = location_info['backUpLocation']
elif 'latitude' in location_info and 'longitude' in location_info:
user_information[worker_id] = {'latitude': location_info['latitude'],
'longitude': location_info['longitude']}
else:
user_information[worker_id] = {'latitude': 'unknown', 'longitude': 'unknown'}
user_information[worker_id]['responseTimes'] = []
user_information[worker_id]['countTries'] = 0
user_information[worker_id]['tasks'] = 0
user_information[worker_id]['countTries'] += quality_info['countTries']
user_information[worker_id]['tasks'] += 1
user_information[worker_id]['triesPerTask'] = user_information[worker_id]['countTries'] / \
user_information[worker_id]['tasks']
user_information[worker_id]['acceptTime'] = row['AcceptTime']
user_information[worker_id]['submitTime'] = row['SubmitTime']
time_of_start = row['AcceptTime'].split()[3]
time_of_submit = row['SubmitTime'].split()[3]
user_information[worker_id]['responseTimes'].append(
get_time_difference(time_of_start, time_of_submit).total_seconds())
for worker_id in user_information.keys():
user_information[worker_id]['averageResponseTime'] = np.mean(user_information[worker_id]['responseTimes'])
return user_information | 8e5fb7ee742f1ca9456b5b4b35faaee48d66ad6c | 3,629,340 |
def upload_str(name: str, content: str, bucket_name: str):
"""
:param name: Name of file including directories i.e. /my/path/file.txt
:param content: UTF-8 encoded file content
:param bucket_name: Name of GCS bucket, i.e. deepdriveio
:return: Url of the public file
"""
key = name
bucket = storage.Client().get_bucket(bucket_name)
blob = bucket.get_blob(key)
blob = blob if blob is not None else bucket.blob(key)
blob.upload_from_string(content)
url = f'https://storage.googleapis.com/{bucket_name}/{key}'
return url | 7a7eabb207f95257bd9e917290e70338ced8d173 | 3,629,341 |
import os
def main(orig_dir, new_dir, blocksize, comparison_operator):
""" main source of pain """
if not os.path.isdir(orig_dir):
raise comparisonException("Directory {d} does not exist".format(d=orig_dir))
if not os.path.isdir(new_dir):
raise comparisonException("Directory {d} does not exist".format(d=new_dir))
changed_files = []
unchanged_files = []
print("Initializing comparison, this might take a while...")
# dirInfo objects:
print("Listing files in {0}".format(orig_dir))
orig_dir_obj = dirInfo(orig_dir, blocksize)
print("Listing files in {0}".format(new_dir))
new_dir_obj = dirInfo(new_dir, blocksize)
# fileInfo objects:
print("Generating information of files in {0}".format(orig_dir))
orig_dir_file_objs = orig_dir_obj.generate_fileInfo_objects()
print("Generating information of files in {0}".format(new_dir))
new_dir_file_objs = new_dir_obj.generate_fileInfo_objects()
print("Comparing the contents of {od} and {nd}".format(od=orig_dir, nd=new_dir))
orig_files, new_files, common_files = compare_directories(orig_dir_obj.get_files_in_dir(), new_dir_obj.get_files_in_dir())
if comparison_operator == "sha256":
print("Checking differences between ALL files in both directories via sha256 hashes... This might take a while!")
#print("Comparing common files by their hashes...")
# compare existing files by their hashes:
changed_files, unchanged_files, hashmapping = compare_full_dirs(orig_dir_obj, new_dir_obj)
elif comparison_operator == "common_only":
print("Comparing common files by their sha256 hashes")
#changed_files, unchanged_files = _compare_files(orig)
changed_files, unchanged_files = compare_files(orig_dir_file_objs, new_dir_file_objs, common_files, "hash")
hashmapping = None
elif comparison_operator == "filecmp":
print("Comparing common files via filecmp.cmp")
changed_files, unchanged_files = compare_files(orig_dir_file_objs, new_dir_file_objs, common_files, "stat")
hashmapping = None
else:
raise comparisonException("Unknown comparison operator: {0}".format(comparison_operator))
return orig_files, new_files, common_files, changed_files, unchanged_files, hashmapping | 1621012523d765eeabbc04af705d06b1255a7fdf | 3,629,342 |
def inverso(x):
"""
El inverso de un número.
.. math::
\\frac{1}{x}
Args:
x (float): Número a invertir.
Returns:
float: El inverso.
"""
return 1 / x | 16f2cb9466efa661d3ee8b10b6a0d637273f6b7c | 3,629,343 |
from typing import Union
def _scale(scale:bool,X:Union[pd.Series,np.ndarray]):
"""
Normalization of the Time Serie
Parameters
----------
scale : bool
X : pd.Serie or Numpy Array
Returns
-------
global_mean : float, mean of the Time Serie
global_sdt : float, sdt of the Time Serie
X : numpy array, 1D Array
"""
if isinstance(X,pd.Series):
X=X.values.copy()
if(scale):
global_mean=np.mean(X)
global_sdt=np.std(X)
X=(X-global_mean)/global_sdt
return global_mean,global_sdt,X
else:
return 0.0,1.1,X | b86149deddefc22f1776f5de424f413790f8b2d2 | 3,629,344 |
def update_user_count_eponymous(set_of_contributors, anonymous_coward_comments_counter):
"""
Eponymous user count update.
Input: - set_of_contributors: A python set of user ids.
- anonymous_coward_comments_counter: The number of comments posted by anonymous user(s).
Output: - user_count: The number of eponymous users active in the information cascade.
"""
user_count = len(set_of_contributors)
return user_count | 4d96d5f22c489a9bae9e0958bd83346df9d60b6c | 3,629,345 |
import argparse
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Translate DNA/RNA to proteins',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'positional', metavar='str', help='DNA/RNA sequence')
parser.add_argument(
'-c',
'--codons',
help='A file with codon translations',
metavar='str',
type=str,
required=True)
parser.add_argument(
'-o',
'--out',
help='Output filename',
metavar='str',
type=str,
default='out.txt')
# parser.add_argument(
# '-f', '--flag', help='A boolean flag', action='store_true')
return parser.parse_args() | 3271d81d31b247bd4291224486e7d823acb45a58 | 3,629,346 |
def translation(component, direction=1, kernel=interpolation.lanczos, padding=3):
"""Shift the morphology by a given amount
"""
dy, dx = component.shift
dy *= direction
dx *= direction
_kernel, _, _ = interpolation.get_separable_kernel(dy, dx, kernel=kernel)
component.morph[:] = interpolation.fft_resample(component.morph, dy, dx)
return component | a185fb15eaa92f46a4ee66f344044f02fbda51a0 | 3,629,347 |
from typing import Optional
def workflow(
user_id: Optional[int] = None,
screen_name: Optional[str] = None,
total_followers: int = 100000,
batch_size: int = 5000,
output_csv: Optional[str] = None
) -> pd.DataFrame:
"""
Pull a user's Twitter following and map each follower to a row
of their identity attributes. Return as a pandas DataFrame.
Parameters
----------
user_id : Optional[int]
The Twitter API ID for the user whose followers we should pull
screen_name : Optional[str]
Twitter handle for the user whose followers we should pull
total_followers : int
The total number of followers that this user has.
Defaults to 100,000.
batch_size : int
How many users should be held in memory at any given time.
Maximum value of 5000. Defaults to 5000.
output_csv : Optional[str]
A file to output the DataFrame of attributes to. If None, do not
save the DataFrame to disk. Defaults to None.
"""
client = api_key_list(get_all_api_keys())
df = pd.DataFrame(client.get_followers(
user_id=user_id,
screen_name=screen_name,
min_count=total_followers,
batch_size=batch_size,
streaming_fn=_get_user_attributes
))
df.to_csv(output_csv)
return df | a70524d21d83de7c2a24948e2aaec16353c5556a | 3,629,348 |
def dcaFuse(X, Y, L):
"""
X (p*n)
Y (q*n)
L (n)
"""
# X = np.array([[1,2,3,4],
# [4,5,6,7],
# ])
# Y = np.array([[4,4,4,4],
# [1,1,1,1]]
# )
# L = np.array([0,1,2,2])
p, n = X.shape
q = Y.shape[0]
# Normalize
X = (X - np.tile(np.mean(X, axis=1, dtype=np.float64).reshape([p,1]), n)) / np.tile(np.std(X, axis=1, dtype=np.float64).reshape([p,1]), n)
Y = (Y - np.tile(np.mean(Y, axis=1, dtype=np.float64).reshape([q,1]), n)) / np.tile(np.std(Y, axis=1, dtype=np.float64).reshape([q,1]), n)
classes = np.unique(L)
c = len(classes)
nSample = np.zeros([c])
cellX = []
cellY = []
for i in range(c):
idx = np.squeeze(np.argwhere(L == classes[i]), axis=1)
cellX.append(X[:,idx])
cellY.append(Y[:,idx])
nSample[i] = len(idx)
meanX = np.mean(X, axis=1)
meanY = np.mean(Y, axis=1)
classMeanX = np.zeros([p,c])
classMeanY = np.zeros([q,c])
for i in range(c):
classMeanX[:,i] = np.mean(cellX[i], axis=1)
classMeanY[:,i] = np.mean(cellY[i], axis=1)
PhibX = np.zeros([p,c])
PhibY = np.zeros([q,c])
for i in range(c):
PhibX[:,i] = np.sqrt(nSample[i]) * (classMeanX[:,i] - meanX)
PhibY[:,i] = np.sqrt(nSample[i]) * (classMeanY[:,i] - meanY)
del L, idx, cellX, cellY, meanX, meanY, classMeanX, classMeanY
"""
Diagolalize the between-class scatter matrix (Sb) for X and Y
"""
cx, Wbx = Diag_Bx(PhibX)
cy, Wby = Diag_Bx(PhibY)
"""
Project data in a space, where the between-class scatter matrices are
identity and the classes are separated
"""
r = min(cx, cy)
Wbx = Wbx[:, :r]
Wby = Wby[:, :r]
Xp = np.dot(Wbx.T, X)
Yp = np.dot(Wby.T, Y)
"""
Unitize the between-set covariance matrix (Sxy)
Note that Syx == Sxy'
"""
Sxy = np.dot(Xp, Yp.T) # Between-set covariance matrix
Wcx, S_val, Wcy = svd(Sxy)
S = np.diag(S_val)
Wcx = np.dot(Wcx, fractional_matrix_power(S, -0.5))
Wcy = np.dot(Wcy, fractional_matrix_power(S, -0.5))
Xs = np.dot(Wcx.T, Xp)
Ys = np.dot(Wcy.T, Yp)
Ax = np.dot(Wcx.T, Wbx.T)
Ay = np.dot(Wcy.T, Wby.T)
return Xs, Ys, Ax, Ay | 2ae41b829de7bde8d8d4b55c91806ac65b05ba1e | 3,629,349 |
def flowRate(t):
""" gives fixed flow rate as a function of time
notice that it depends on pump coastdown time constant, so
be sure to change that if needed. right now, it's set to 5s """
tau = 5.0
return 21.7 * np.exp(-t/tau) | 9b72b58e6e1334e6813b115876d71fac69898fed | 3,629,350 |
def generator_from_atom_argument(arg: AtomSpec) -> IndexGenerator:
"""
Returns a generator function for selecting a subset of sites from a pymatgen :obj:`Structure` object.
Args:
arg (various): Argument used to construct the generator function.
Returns:
(func): Generator function that takes one argument (:obj:`Structure`) and
returns an appropriate list of site indices.
"""
if callable(arg):
return arg
if type(arg) is str:
return partial(_get_indices_from_str, arg=arg)
elif type(arg) in [list, tuple]:
if type(arg[0]) is str:
return partial(_get_indices_from_list_str, arg=arg)
elif type(arg[0]) is int:
return partial(_get_indices_from_list_int, arg=arg)
else:
raise TypeError
else:
raise TypeError | 64216cf7dcd0328d9cad2df9ca35881417fc13f1 | 3,629,351 |
def get_total_memory():
"""
get_total_memory()
Returns the total memory in GBs
"""
total_memory = shell_tools.run('sysctl -a | grep hw.memsize')['stdout']
return (int(total_memory.split('=')[-1]) / (1024 * 3)) | eb977d7aad8ced4d0e824a5a6e4b7f28f312c860 | 3,629,352 |
import os
def generate_arg_defaults():
"""Return a dict of programmatically determined argument defaults."""
return {'user': os.getlogin(),
'uid': os.getuid(),
'gid': os.getgid(),
'tag': 'latest',
'dockerfile': './docker/escadrille'} | 5865501d2355c92d0a8447bb36e9afc70ac84d66 | 3,629,353 |
from labml_nn.transformers.primer_ez import MultiDConvHeadAttention
def _d_conv_mha(c: TransformerConfigs):
"""
Add the [option](https://docs.labml.ai/api/configs.html#labml.configs.option)
of [**Multi-DConv-Head Attention**](index.html) to
[configurable transformer](../configs.html#TransformerConfigs)
"""
return MultiDConvHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout) | 2e2e73d20da2082c120cd5737ecca3343eb82190 | 3,629,354 |
def c_commands(context, commands):
"""
The c in c_commands refers to the c file.
This function filters a list of commands for the generated .c file.
WGL core functions are not dynamically loaded but need to be linked,
this functions filters out wgl core functions for the .c file.
:param context: jinja context
:param commands: list of commands
:return: commands filtered
"""
spec = context['spec']
if not spec.name == WGL.NAME:
return commands
feature_set = context['feature_set']
core = feature_set.features[0].get_requirements(spec, feature_set=feature_set)
return [command for command in commands if not command in core] | 06ddacdcd01c7dada78f814175ff484ceaeea2da | 3,629,355 |
import asyncio
async def get_job_status(job_id: int, request: Request, response: Response):
"""Get the status of a previously-submitted job.
**Arguments:**
- **job_id**: Identifier of the submitted job, as returned by the "Submit Job" endpoint.
"""
loop = asyncio.get_running_loop()
try:
job_state, _ = await loop.run_in_executor(None, gitlab.get_job_state, job_id, False)
except gitlab.GitlabHttpError:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
if job_state.status == "success":
job_state.web_url = f"{request.url}/results"
response.headers["LOCATION"] = job_state.web_url
return job_state
# TODO: Re-evaluate when get_job_result works.
if job_state.status != "success":
return job_state
else:
return RedirectResponse(url=job_state.web_url, status_code=status.HTTP_303_SEE_OTHER) | c13c0bdb1d07cd7be3dfd7c91e4625f8e739953a | 3,629,356 |
import os
import re
def replace_tool(string_file, old_tool_file, new_tool_file, old_tool_name='', new_tool_name='', N=0):
"""Swaps old_tool_file for new_tool_file in string_file. Also replaces the tools Name field.
Parameters
----------
string_file : str
Path to an Adams Drill string file. Accepts full path or cdb aliases.
old_tool_file : str
Path to an Adams Drill tool property file that exists in string_file. Accepts full path or cdb aliases.
new_tool_file : str
Path to an Adams Drill tool property file to replace old_tool_file. Accepts full path or cdb aliases.
N : int
Number of replacements to make. Default is 0 which will replace all instances.
old_tool_name : str
Name of the tool to replace. Default is the filename.
new_tool_name : str
Name of the new tool. Default is the filename.
Returns
-------
int
Number of replacements that were made
"""
# Convert tool filenames to full paths
string_file = get_full_path(string_file)
if old_tool_name == '':
old_tool_name = os.path.splitext(os.path.split(old_tool_file)[-1])[0]
if new_tool_name == '':
new_tool_name = os.path.splitext(os.path.split(new_tool_file)[-1])[0]
# Open the original string file for reading and a new string file for writing
fid_old = open(string_file,'r')
fid_new = open(string_file.replace('.str','.tmp'),'w')
# Initiate the number of replacements made
n = 0
# If the tool is a hole
if os.path.splitext(old_tool_file) == '.hol':
for line in fid_old:
# Loop through the string file to find the hole property file line
if re.match(' *Hole_Property_File *= *.*', line, flags=re.IGNORECASE):
# If at a hole property file line, convert new_tool_file to cdb or full notation depending on what's already in the file
new_tool_file = get_cdb_path(new_tool_file) if '<' in line else get_full_path(new_tool_file)
# Write the modified line
fid_new.write(" Hole_Property_File = '{}'\n".format(new_tool_file))
# Increment the number of replacements made
n += 1
else:
# If not at a hole property file line, write the line unchanged
fid_new.write(line)
else:
# If the tool is not a hole, initialize a flag indicating a replacement should be made on the next line
replace = False
for line in fid_old:
# Loop through the string file to find and replace the corresponding tool block
if re.match(' *Type *= *.*', line, flags=re.IGNORECASE):
fid_new.write(line)
elif re.match(' *Stack_Order *= *.*', line, flags=re.IGNORECASE):
stack_order = int(line.replace(' ','').replace('\n','').split('=')[1])
fid_new.write(line)
elif re.match(' *Name *= *\'{}.*\'.*'.format(old_tool_name), line, flags=re.IGNORECASE) and (n<N or N==0):
# If this is the tool name line for the tool to replace
if re.match(' *Name *= *\'{}_{:02d}\'.*'.format(old_tool_name, stack_order), line, flags=re.IGNORECASE):
# If the tool name has a stack order appended
fid_new.write(" Name = '{}_{:02d}'\n".format(new_tool_name, stack_order))
else:
# If the tool name does not have a stack order appended
fid_new.write(" Name = '{}'\n".format(new_tool_name))
# Indicate that a replacement should be made on the next property file line
replace = True
elif re.match(' *Property_File *= *.*', line, flags=re.IGNORECASE) and replace:
# If at a property file line, convert new_tool_file to cdb or full notation depending on what's already in the file
new_tool_file = get_cdb_path(new_tool_file) if '<' in line else get_full_path(new_tool_file)
# Write the modified line
fid_new.write(" Property_File = '{}'\n".format(new_tool_file))
# Indicate that a replacement should not be made on the next property file line
replace = False
# Increment the number of replacements made
n += 1
else:
# If not at a property file line, write the line unchanged
fid_new.write(line)
# Close the string files, delete the original one, and rename the new one
fid_old.close()
fid_new.close()
os.remove(string_file)
os.rename(string_file.replace('.str','.tmp'), string_file)
return n | 87e2518d65bebfcb5989877a69938a9cc8bdd08d | 3,629,357 |
from util import Stack
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
"""
# stackXY: ((x,y),[path]) #
stackXY = Stack()
visited = [] # Visited states
path = [] # Every state keeps it's path from the starting state
# Check if initial state is goal state #
if problem.isGoalState(problem.getStartState()):
return []
# Start from the beginning and find a solution, path is an empty list #
stackXY.push((problem.getStartState(),[]))
while(True):
# Terminate condition: can't find solution #
if stackXY.isEmpty():
return []
# Get informations of current state #
xy,path = stackXY.pop() # Take position and path
visited.append(xy)
# Comment this and uncomment 125. This only works for autograder #
# In lectures we check if a state is a goal when we find successors #
# Terminate condition: reach goal #
if problem.isGoalState(xy):
return path
# Get successors of current state #
succ = problem.getSuccessors(xy)
# Add new states in stack and fix their path #
if succ:
for item in succ:
if item[0] not in visited:
# Lectures code:
# All impementations run in autograder and in comments i write
# the proper code that i have been taught in lectures
# if item[0] not in visited and item[0] not in (state[0] for state in stackXY.list):
# if problem.isGoalState(item[0]):
# return path + [item[1]]
newPath = path + [item[1]] # Calculate new path
stackXY.push((item[0],newPath)) | f9263a944574c902a7a9c2122cf6d03c4307f53b | 3,629,358 |
from distributed.client import default_client
import tokenize
def read_bytes(urlpath, delimiter=None, not_zero=False, blocksize=2**27,
sample=True, compression=None, **kwargs):
""" Convert path to a list of delayed values
The path may be a filename like ``'2015-01-01.csv'`` or a globstring
like ``'2015-*-*.csv'``.
The path may be preceded by a protocol, like ``s3://`` or ``hdfs://`` if
those libraries are installed.
This cleanly breaks data by a delimiter if given, so that block boundaries
start directly after a delimiter and end on the delimiter.
Parameters
----------
urlpath: string
Absolute or relative filepath, URL (may include protocols like
``s3://``), or globstring pointing to data.
delimiter: bytes
An optional delimiter, like ``b'\\n'`` on which to split blocks of
bytes.
not_zero: bool
Force seek of start-of-file delimiter, discarding header.
blocksize: int (=128MB)
Chunk size in bytes
compression: string or None
String like 'gzip' or 'xz'. Must support efficient random access.
sample: bool or int
Whether or not to return a header sample. If an integer is given it is
used as sample size, otherwise the default sample size is 10kB.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> sample, blocks = read_bytes('2015-*-*.csv', delimiter=b'\\n') # doctest: +SKIP
>>> sample, blocks = read_bytes('s3://bucket/2015-*-*.csv', delimiter=b'\\n') # doctest: +SKIP
Returns
-------
A sample header and list of ``dask.Delayed`` objects or list of lists of
delayed objects if ``fn`` is a globstring.
"""
fs, paths, myopen = get_fs_paths_myopen(urlpath, compression, 'rb',
None, **kwargs)
client = None
if len(paths) == 0:
raise IOError("%s resolved to no files" % urlpath)
if blocksize is not None:
if not is_integer(blocksize):
raise TypeError("blocksize must be an integer")
blocksize = int(blocksize)
blocks, lengths, machines = fs.get_block_locations(paths)
if blocks:
offsets = blocks
elif blocksize is None:
offsets = [[0]] * len(paths)
lengths = [[None]] * len(offsets)
machines = [[None]] * len(offsets)
else:
offsets = []
lengths = []
for path in paths:
try:
size = fs.logical_size(path, compression)
except KeyError:
raise ValueError('Cannot read compressed files (%s) in byte chunks,'
'use blocksize=None' % infer_compression(urlpath))
off = list(range(0, size, blocksize))
length = [blocksize] * len(off)
if not_zero:
off[0] = 1
length[0] -= 1
offsets.append(off)
lengths.append(length)
machines = [[None]] * len(offsets)
out = []
for path, offset, length, machine in zip(paths, offsets, lengths, machines):
ukey = fs.ukey(path)
keys = ['read-block-%s-%s' %
(o, tokenize(path, compression, offset, ukey, kwargs, delimiter))
for o in offset]
L = [delayed(read_block_from_file)(myopen(path, mode='rb'), o,
l, delimiter, dask_key_name=key)
for (o, key, l) in zip(offset, keys, length)]
out.append(L)
if machine is not None: # blocks are in preferred locations
if client is None:
try:
client = default_client()
except (ImportError, ValueError): # no distributed client
client = False
if client:
restrictions = {key: w for key, w in zip(keys, machine)}
client._send_to_scheduler({'op': 'update-graph', 'tasks': {},
'dependencies': [], 'keys': [],
'restrictions': restrictions,
'loose_restrictions': list(restrictions),
'client': client.id})
if sample is not True:
nbytes = sample
else:
nbytes = 10000
if sample:
# myopen = OpenFileCreator(urlpath, compression)
with myopen(paths[0], 'rb') as f:
sample = read_block(f, 0, nbytes, delimiter)
return sample, out | f65dc33b8fbc699187ffd49483b07363c94d1bf0 | 3,629,359 |
from dials.algorithms.scaling.scaler_factory import TargetScalerFactory
def scale_against_target(
reflection_table,
experiment,
target_reflection_table,
target_experiment,
params=None,
model="KB",
):
"""Determine scale factors for a single dataset, by scaling against a target
reflection table. Requires a single reflection table for the reflections to
scale and the target dataset, and an ExperimentList for both datasets. The
params option can also be specified, if None then the default scaling
configuration is used. The scaling model can be specified individually.
Returns the reflection table, with added columns 'inverse_scale_factor' and
'inverse_scale_factor_variance'."""
if not params:
phil_scope = phil.parse(
"""
include scope dials.algorithms.scaling.scaling_options.phil_scope
include scope dials.algorithms.scaling.model.model.model_phil_scope
include scope dials.algorithms.scaling.scaling_refiner.scaling_refinery_phil_scope
""",
process_includes=True,
)
optionparser = OptionParser(phil=phil_scope, check_format=False)
params, _ = optionparser.parse_args(args=[], quick_parse=True)
params.model = model
reflections = [reflection_table, target_reflection_table]
experiment.append(target_experiment[0])
experiments = create_scaling_model(params, experiment, reflections)
experiments[-1].scaling_model.set_scaling_model_as_scaled()
scaler = TargetScalerFactory.create(params, experiments, reflections)
scaler.perform_scaling()
scaler.expand_scales_to_all_reflections(calc_cov=True)
return scaler.unscaled_scalers[0].reflection_table | 619952e066470f66b334791989d901892d51765d | 3,629,360 |
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import DBSCAN
def billing_pattern(data, params, hitlist):
"""
In this model, outliers from the general billing pattern (see below) are flagged,
based on how far away from the nearest cluster they are.
The pattern is defined as the fraction of money billed for different procedures.
This multi-dimensional space is reduced to three dimensions. In that 3D space, a
DBSCAN cluster finder is used on the standardized locations, so the same density
threshold for clusters can be used, no matter the values of input parameters.
No monetary loss is defined, as it is not at all clear how this would be defined.
"""
# Parameters are passed for consistency, in this version it runs without any user-defined parameters.
# Create a pivot table with amounts per procedure code for all providers, then normalize
piv_proc = pd.pivot_table(data, values='Paid_amt', index='Provider_ID', columns='Procedure_code', aggfunc='sum')
piv_proc.replace(np.nan, 0, inplace=True)
fractional_proc = piv_proc.div(piv_proc.sum(axis=1), axis=0)
# Create a lookup for the specialism
prov_spec = data.loc[:,['Provider_ID', 'Prov_specialism']].drop_duplicates()
prov_spec.set_index('Provider_ID', inplace=True)
specs = np.array(prov_spec.values)
# Use PCA to be able to select three main axes.
pca=PCA()
pcas = pca.fit_transform(fractional_proc)
# Scale all axes to zero mean, unit stdev and do a density scan.
X = StandardScaler().fit_transform(pcas[:,:3])
scanner = DBSCAN(eps=0.5)
results = scanner.fit(X)
# Select outliers and compute scores
# Compute, for all outliers, the distance to the nearest cluster center and normalize by stdev of that cluster.
labels = results.labels_
nclusters = len(np.unique(labels)) - (1 if -1 in labels else 0)
# Calculate cluster centers and sizes
center = np.zeros([nclusters, 4])
for iclus in range(nclusters):
coords = np.array(pcas[labels == iclus])
center[iclus, :3] = np.array([np.mean(coords[:,0]), np.mean(coords[:,1]), np.mean(coords[:,2])])
center[iclus, 3] = np.sqrt(np.std(coords[:,0])**2 + np.std(coords[:,1])**2 + np.std(coords[:,2])**2 )
out_pcas = pcas[labels == -1][:,:3]
ids = piv_proc.index[labels == -1]
outliers = list(ids)
score = []
money = []
for pca in out_pcas:
distsq = np.zeros(nclusters)
for iclus in range(nclusters):
distsq[iclus] = np.sum((np.array(pca) - np.array(center[iclus, :3]))**2)
score.append(np.sqrt(np.min(distsq)) / (3*center[np.argmin(distsq), 3]))
money.append(0)
hl_add = pd.DataFrame({'Provider_ID':outliers, 'Score':score, 'Monetary':money,
'Model': ['Billing pattern']*len(score), 'Weight':[params['Weight']]*len(score)})
hitlist = hitlist.append(hl_add, sort=True)
return hitlist | 567ad6cef171d5055dc13a0af879575f52704189 | 3,629,361 |
def partition_annots_into_singleton_multiton(ibs, aid_list):
"""
aid_list = aid_list_
"""
aids_list = ibs.group_annots_by_name(aid_list)[0]
singletons = [aids for aids in aids_list if len(aids) == 1]
multitons = [aids for aids in aids_list if len(aids) > 1]
return singletons, multitons | 7d062644923b12a59ef2e4bffd76ec4caf0bcaeb | 3,629,362 |
def teaser_block(parser, token):
"""
Template tag that takes a model instance and returns the given instance as
a template-formatted block. Inserts two objects into the context:
``instance`` - The model instance
``fields`` - A list of (name, label, value) tuples representing the
instance's fields
You would want to use the tag instead of the filter primarily because, with
a tag you can override the default template used for a particular model.
For example, in the example project, Pepulators have Jambs, so when we put
a detail block of a Pepulator on a page, a list of Jambs will show up. Say
we don't want the default detail of jambs to display; say we want a custom
Jamb detail template. Then we can say the following:
{% with pepulator_factory_jamb_teaser_template="pepulator_factory/jamb_detail.html" %}
{% teaser_block pepulator %}
{% endwith %}
The custom template is named by the app name (``pepulator_factory``), the
model name in all lowercase (``jamb``) and the suffix ``_template``.
"""
try:
tag_name, instance_name = token.split_contents()
except ValueError:
raise TemplateSyntaxError("%r tag requires exactly two arguments" %
token.contents.split()[0])
node = ModelTeaserNode(instance_name, resolved=False)
return node | 481eba0978cd052575975295ad6d10197265fa5a | 3,629,363 |
def galeshapley(suitor_pref_dict, reviewer_pref_dict, max_iteration):
""" The Gale-Shapley algorithm. This is known to provide a unique, stable
suitor-optimal matching. The algorithm is as follows:
(1) Assign all suitors and reviewers to be unmatched.
(2) Take any unmatched suitor, s, and their most preferred reviewer, r.
- If r is unmatched, match s to r.
- Else, if r is matched, consider their current partner, r_partner.
- If r prefers s to r_partner, unmatch r_partner from r and
match s to r.
- Else, leave s unmatched and remove r from their preference
list.
(3) Go to (2) until all suitors are matched, then end.
Parameters
----------
suitor_pref_dict : dict
A dictionary with suitors as keys and their respective preference lists
as values
reviewer_pref_dict : dict
A dictionary with reviewers as keys and their respective preference
lists as values
max_iteration : int
An integer as the maximum iterations
Returns
-------
matching : dict
The suitor-optimal (stable) matching with suitors as keys and the
reviewer they are matched with as values
"""
suitors = list(suitor_pref_dict.keys())
matching = dict()
rev_matching = dict()
for i in range(max_iteration):
if len(suitors) <= 0:
break
for s in suitors:
r = suitor_pref_dict[s][0]
if r not in matching.values():
matching[s] = r
rev_matching[r] = s
else:
r_partner = rev_matching.get(r)
if reviewer_pref_dict[r].index(s) < reviewer_pref_dict[r].index(r_partner):
del matching[r_partner]
matching[s] = r
rev_matching[r] = s
else:
suitor_pref_dict[s].remove(r)
suitors = list(set(suitor_pref_dict.keys()) - set(matching.keys()))
return matching | 5b52cb165d15a0992b58c38958daf222d8d642cd | 3,629,364 |
def geom_bar(mapping=None, *, data=None, stat=None, position=None, show_legend=None, sampling=None, tooltips=None,
**other_args):
"""
Display a bar chart which makes the height of the bar proportional to the
number of observed variable values, mapped to x axis.
Parameters
----------
mapping : `FeatureSpec`
Set of aesthetic mappings created by `aes()` function.
Aesthetic mappings describe the way that variables in the data are
mapped to plot "aesthetics".
data : dict or `DataFrame`
The data to be displayed in this layer. If None, the default, the data
is inherited from the plot data as specified in the call to ggplot.
stat : str, default='count'
The statistical transformation to use on the data for this layer, as a string.
Supported transformations: 'identity' (leaves the data unchanged),
'count' (counts number of points with same x-axis coordinate),
'bin' (counts number of points with x-axis coordinate in the same bin),
'smooth' (performs smoothing - linear default),
'density' (computes and draws kernel density estimate).
position : str or `FeatureSpec`
Position adjustment, either as a string ('identity', 'stack', 'dodge', ...),
or the result of a call to a position adjustment function.
show_legend : bool, default=True
False - do not show legend for this layer.
sampling : `FeatureSpec`
Result of the call to the `sampling_xxx()` function.
Value None (or 'none') will disable sampling for this layer.
tooltips : `layer_tooltips`
Result of the call to the `layer_tooltips()` function.
Specifies appearance, style and content.
other_args
Other arguments passed on to the layer.
These are often aesthetics settings used to set an aesthetic to a fixed value,
like color='red', fill='blue', size=3 or shape=21.
They may also be parameters to the paired geom/stat.
Returns
-------
`LayerSpec`
Geom object specification.
Notes
-----
`geom_bar()` makes the height of the bar proportional to the number
of observed variable values, mapped to x axis. Is intended to use for discrete data.
If used for continuous data with stat='bin' produces histogram for binned data.
`geom_bar()` handles no group aesthetics.
Computed variables:
- ..count.. : number of points with same x-axis coordinate.
`geom_bar()` understands the following aesthetics mappings:
- x : x-axis value (this values will produce cases or bins for bars).
- y : y-axis value (this value will be used to multiply the case's or bin's counts).
- alpha : transparency level of a layer. Understands numbers between 0 and 1.
- color (colour) : color of a geometry lines. Can be continuous or discrete. For continuous value this will be a color gradient between two colors.
- fill : color of geometry filling.
- size : lines width. Defines bar line width.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 6
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
data = {'x': np.random.randint(10, size=100)}
ggplot(data, aes(x='x')) + geom_bar()
|
.. jupyter-execute::
:linenos:
:emphasize-lines: 9-10
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
n = 10
x = np.arange(n)
y = 1 + np.random.randint(5, size=n)
ggplot() + \\
geom_bar(aes(x='x', y='y', fill='x'), data={'x': x, 'y': y}, \\
stat='identity', show_legend=False) + \\
scale_fill_discrete()
|
.. jupyter-execute::
:linenos:
:emphasize-lines: 9-12
import numpy as np
from lets_plot import *
LetsPlot.setup_html()
np.random.seed(42)
n = 5000
x = np.random.normal(size=n)
c = np.random.choice(list('abcde'), size=n)
ggplot({'x': x, 'class': c}, aes(x='x')) + \\
geom_bar(aes(group='class', fill='class', color='class'), \\
stat='bin', sampling=sampling_pick(n=500), alpha=.3, \\
tooltips=layer_tooltips().line('@|@class')
.line('count|@..count..'))
"""
return _geom('bar',
mapping=mapping,
data=data,
stat=stat,
position=position,
show_legend=show_legend,
sampling=sampling,
tooltips=tooltips,
**other_args) | 818e8ba7cd30913168acf11a8267a630dbd2d8d3 | 3,629,365 |
def diff_string(old, new):
"""given a old and new int value, return a string representing the
difference
"""
diff = abs(old - new)
diff_str = "%s%s" % (CMPS[cmp(old, new)], diff and ('%.2f' % diff) or '')
return diff_str | dc6d4a6456c6399307aca8cd4c609e64fd6ad02a | 3,629,366 |
def _tile_grid_to_cesium1x_source_options(tile_grid: TileGrid, url: str):
"""
Convert TileGrid into options to be used with Cesium.UrlTemplateImageryProvider(options) of Cesium 1.45+.
See
* https://cesiumjs.org/Cesium/Build/Documentation/UrlTemplateImageryProvider.html?classFilter=UrlTemplateImageryProvider
:param tile_grid: tile grid
:param url: source url
:return:
"""
ge = tile_grid.geo_extent
rectangle = dict(west=ge.west, south=ge.south, east=ge.east, north=ge.north)
return dict(url=url,
rectangle=rectangle,
minimumLevel=0,
maximumLevel=tile_grid.num_levels - 1,
tileWidth=tile_grid.tile_size[0],
tileHeight=tile_grid.tile_size[1],
tilingScheme=dict(rectangle=rectangle,
numberOfLevelZeroTilesX=tile_grid.num_level_zero_tiles_x,
numberOfLevelZeroTilesY=tile_grid.num_level_zero_tiles_y)) | 24363d34f1d0fe590eac648fc1a1d1c584dd069f | 3,629,367 |
def update_code(code_id: int, code: schemas.CodeUpdate, db: Session = Depends(get_db)):
"""
Update Code
"""
db_code = crud.get_code(db=db, code_id=code_id)
if db_code is None:
raise HTTPException(status_code=404, detail="Code not found")
db_device = crud.get_device(db=db, device_id=code.device_id)
if db_device is None:
raise HTTPException(status_code=400, detail="Device does NOT exist")
db_same_code = crud.get_code_by_code_str(db=db, code_str=code.code)
if db_same_code:
if db_same_code.id != code_id:
raise HTTPException(status_code=400, detail="Code already registered")
return crud.update_code(db=db, code_id=code_id, code=code) | 5a9be20a25a584d2ea95e057049d8b354848ca81 | 3,629,368 |
def vgg19_lstar():
"""
Setup the VGG19 neural network, protect it using Gu and Rigazio's L* defense
mechanism and perform an inference on an example input.
"""
model = networks.VGG19()
dm = dms.GuRigazio(
keras_model = model.wrapped_model(),
noise_stddev = 4.71e-4,
how = 'L*',
interpretation = 'weights'
)
sample_img = sample_image(SAMPLE_IMAGE_PATH)
votes = dm.predict_n(sample_img)
prediction = dms.aggregate_predict_n_by(['count', 'mean'], votes)
return prediction | ad5c1a0cd57e40aff24e6fe2b2eaf2edc418fcdc | 3,629,369 |
def find_setting(group, key, site=None):
"""Get a setting or longsetting by group and key, cache and return it."""
siteid = _safe_get_siteid(site)
setting = None
use_db, overrides = get_overrides(siteid)
ck = cache_key('Setting', siteid, group, key)
if use_db:
try:
setting = cache_get(ck)
except NotCachedError as nce:
if hasattr(apps, 'ready'):
app_cache_ready = apps.ready
else:
app_cache_ready = apps.app_cache_ready()
if app_cache_ready:
try:
setting = Setting.objects.get(site__id__exact=siteid, key__exact=key, group__exact=group)
except Setting.DoesNotExist:
# maybe it is a "long setting"
try:
setting = LongSetting.objects.get(site__id__exact=siteid, key__exact=key, group__exact=group)
except LongSetting.DoesNotExist:
pass
cache_set(ck, value=setting)
else:
grp = overrides.get(group, None)
if grp and key in grp:
val = grp[key]
setting = ImmutableSetting(key=key, group=group, value=val)
log.debug('Returning overridden: %s', setting)
if not setting:
raise SettingNotSet(key, cachekey=ck)
return setting | 0088610e08d78316d036551fd58984b48bc49b6a | 3,629,370 |
def isoslice(var,prop,isoval, grd, Cpos='rho', masking=True, vert=False):
"""
isoslice, lon, lat = isoslice(variable,property, isoval, grd)
optional switch:
- Cpos='rho', 'u' or 'v' specify the C-grid position where
the variable rely
- masking=True mask the output if True
- vert=True/False If True, return the position of
the verticies
- mode='linear' or 'spline' specify the type of interpolation
result is a projection of variable at property == isoval in the first
nonsingleton dimension. In the case when there is more than one zero
crossing, the results are averaged.
lon, and lat contain the C-grid position of the slice for plotting.
If vert=True, lon and lat and z contain contain the position of the
verticies (to be used with pcolor)
EXAMPLE:
s_at_m5 = isoslice(s,z,-5); # s at z == -5
h_at_s30 = isoslice(z,s,30); # z at s == 30
"""
if (len(var.squeeze().shape)<=2):
raise ValueError, 'variable must have at least two dimensions'
if not prop.shape == var.shape:
raise ValueError, 'dimension of var and prop must be identical'
# compute the depth on Arakawa-C grid position
if Cpos is 'u':
# average z_r at Arakawa-C u points
z = 0.5 * (grd.vgrid.z_r[0,:,:,:-1] + grd.vgrid.z_r[0,:,:,1:])
if vert == True:
lon = 0.5 * (grd.hgrid.lon_vert[:,:-1] + grd.hgrid.lon_vert[:,1:])
lat = 0.5 * (grd.hgrid.lat_vert[:,:-1] + grd.hgrid.lat_vert[:,1:])
else:
lon = grd.hgrid.lon_u[:]
lat = grd.hgrid.lat_u[:]
mask = grd.hgrid.mask_u[:]
elif Cpos is 'v':
# average z_r at Arakawa-C v points
z = 0.5 * (grd.vgrid.z_r[0,:,:-1,:] + grd.vgrid.z_r[0,:,1:,:])
if vert == True:
lon = 0.5 * (grd.hgrid.lon_vert[:-1,:] + grd.hgrid.lon_vert[1:,:])
lat = 0.5 * (grd.hgrid.lat_vert[:-1,:] + grd.hgrid.lat_vert[1:,:])
else:
lon = grd.hgrid.lon_v[:]
lat = grd.hgrid.lat_v[:]
mask = grd.hgrid.mask_v[:]
elif Cpos is 'rho':
# for temp, salt, rho, w
z = grd.vgrid.z_r[0,:]
if vert == True:
lon = grd.hgrid.lon_vert[:]
lat = grd.hgrid.lat_vert[:]
else:
lon = grd.hgrid.lon_rho[:]
lat = grd.hgrid.lat_rho[:]
mask = grd.hgrid.mask_rho[:]
else:
raise Warning, '%s bad position. Valid Arakawa-C are \
rho, u or v.' % Cpos
prop = prop-isoval
sz = np.shape(var)
var = var.reshape(sz[0],-1)
prop = prop.reshape(sz[0],-1)
#find zero-crossings (zc == 1)
zc = np.where( (prop[:-1,:] * prop[1:,:])<0 ,1., 0.)
varl = var[:-1,:] * zc
varh = var[1:,:] * zc
propl = prop[:-1,:] * zc
proph = prop[1:,:] * zc
isoslice = varl - propl * (varh - varl) / (proph - propl)
isoslice = np.where(zc==1., isoslice, 0.)
szc = zc.sum(axis=0)
szc = np.where(szc==0., 1, szc)
isoslice = isoslice.sum(axis=0)/szc
if masking:
isoslice = np.ma.masked_where(zc.sum(axis=0)==0, isoslice)
if all(isoslice.mask):
raise Warning, 'property==%f out of range (%f, %f)' % \
(isoval, (prop+isoval).min(), (prop+isoval).max())
isoslice = isoslice.reshape(sz[1:])
# mask land
isoslice = np.ma.masked_where(mask == 0, isoslice)
return isoslice, lon, lat | 50bbac87cfadcd3aa1a280b17551721788456723 | 3,629,371 |
def get_solution(x):
"""
Args:
x (numpy.ndarray) : binary string as numpy array.
Returns:
numpy.ndarray: graph solution as binary numpy array.
"""
return 1 - x | dd4c92baeaab0d3231f9b24cd950a42d589218aa | 3,629,372 |
from typing import Any
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
controller = hass.data[DOMAIN][config_entry.entry_id]
diag: dict[str, Any] = {}
diag["entry"]: dict[str, Any] = {}
diag["entry"]["data"] = async_redact_data(config_entry.data, TO_REDACT)
diag["entry"]["options"] = async_redact_data(config_entry.options, TO_REDACT)
diag["data"] = async_redact_data(controller.data, TO_REDACT)
return diag | af70d72e804c8946015e5a3d6e74ec0105de6168 | 3,629,373 |
from pathlib import Path
def create_upload_file(tmp_path):
"""Create temporary text file for upload."""
file_path = Path(tmp_path, "test_upload_1.txt")
with open(file_path, "w") as f:
f.write("Hello World")
return file_path | 50b707f59736ae1b1e06018aedec451b578eafc8 | 3,629,374 |
def makeFigure():
"""Get a list of the axis objects and create a figure"""
ax, f = getSetup((10, 8), (3, 4), multz={8: 1, 10: 1})
subplotLabel(ax)
Tcell_pathname = path_here + "/data/flow/2019-11-08 monomer IL-2 Fc signaling/CD4 T cells - IL2-060 mono, IL2-060 dimeric"
NK_CD8_pathname = path_here + "/data/flow/2019-11-08 monomer IL-2 Fc signaling/NK CD8 T cells - IL2-060 mono, IL2-060 dimeric"
Tcell_sample, _ = importF2(Tcell_pathname, "A")
NK_CD8_sample, _ = importF2(NK_CD8_pathname, "A")
Tcell_sample = combineWells(Tcell_sample).subsample(0.2)
NK_CD8_sample = combineWells(NK_CD8_sample).subsample(0.2)
Tcell_sample = applyMatrix(Tcell_sample, compMatrix('2019-11-08', '1', 'A'))
NK_CD8_sample = applyMatrix(NK_CD8_sample, compMatrix('2019-11-08', '1', 'B'))
Tcell_sample = Tcell_sample.transform("tlog", channels=['VL1-H', 'VL4-H', 'BL1-H', 'BL3-H']) # Tlog transformations
NK_CD8_sample = NK_CD8_sample.transform("tlog", channels=['RL1-H', 'VL4-H', 'BL1-H', 'BL2-H']) # Tlog transformations
cd4_gate = ThresholdGate(6500.0, ['VL4-H'], region='above') & ThresholdGate(8000.0, ['VL4-H'], region='below')
ax[0] = Tcell_sample.plot(['VL4-H'], gates=cd4_gate, ax=ax[0]) # CD4
plt.title("Singlet Lymphocytes")
#ax.set(xlabel= "CD4", ylabel="Events")
plt.grid()
sampleCD4 = Tcell_sample.gate(cd4_gate)
Treg_gate = PolyGate([(4.2e3, 7.2e3), (6.5e03, 7.2e03), (6.5e03, 5.3e03), (4.9e03, 5.3e03), (4.2e03, 5.7e03)], ('VL1-H', 'BL1-H'), region='in', name='treg')
Thelp_gate = PolyGate([(1.8e03, 3.1e03), (1.8e03, 4.9e03), (6.0e03, 4.9e03), (6.0e03, 3.1e03)], ('VL1-H', 'BL1-H'), region='in', name='thelper')
_ = sampleCD4.plot(['VL1-H', 'BL1-H'], gates=[Treg_gate, Thelp_gate], gate_colors=['red', 'red'], cmap=cm.jet, ax=ax[1]) # CD4
plt.title("CD4+ Cells")
plt.xlabel("CD25")
plt.ylabel("FOXP3")
plt.grid()
#CD8+ Cells
CD3CD8gate = PolyGate([(7.5e3, 8.4e3), (4.7e3, 8.4e3), (4.7e03, 6.5e03), (7.5e03, 6.5e03)], ('VL4-H', 'RL1-H'), region='in', name='treg')
_ = NK_CD8_sample.plot(['VL4-H', 'RL1-H'], gates=CD3CD8gate, gate_colors='red', cmap=cm.jet, ax=ax[2]) # CD3, CD8
plt.title("Singlet Lymphocytes")
plt.xlabel("CD3")
plt.ylabel("CD8")
plt.grid()
# NK Cells
NKgate = PolyGate([(4.8e3, 5.1e3), (5.9e3, 5.1e3), (5.9e03, 6.1e03), (4.8e03, 6.1e03)], ('VL4-H', 'BL1-H'), region='in', name='treg')
CD56brightgate = PolyGate([(4.8e3, 6.3e3), (5.9e3, 6.3e3), (5.9e03, 7.3e03), (4.8e03, 7.3e03)], ('VL4-H', 'BL1-H'), region='in', name='treg')
_ = NK_CD8_sample.plot(['VL4-H', 'BL1-H'], gates=[NKgate, CD56brightgate], gate_colors=['red', 'red'], cmap=cm.jet, ax=ax[3]) # CD3, CD56
plt.title("Singlet Lymphocytes")
plt.xlabel("CD3")
plt.ylabel("CD56")
plt.grid()
# Gating for live cells
sample1A, unstained, isotype = importF("4-23", "1", "A", 1, "IL2R", None)
sample2B, unstained, isotype = importF("4-23", "1", "B", 2, "IL2R", None)
sample3C, unstained, isotype = importF("4-23", "1", "C", 3, "IL2R", None)
panel1 = sample1A.transform("tlog", channels=['VL6-H', 'VL4-H', 'BL1-H', 'VL1-H', 'BL3-H']).subsample(0.2)
panel2 = sample2B.transform("tlog", channels=['VL4-H', 'BL3-H']).subsample(0.2)
panel3 = sample3C.transform("tlog", channels=['VL6-H', 'VL4-H', 'BL3-H']).subsample(0.2)
cd3cd4_gate = PolyGate([(5.0e03, 7.3e03), (5.3e03, 5.6e03), (8.0e03, 5.6e03), (8.0e03, 7.3e03)], ('VL4-H', 'VL6-H'), region='in', name='cd3cd4')
_ = panel1.plot(['VL4-H', 'VL6-H'], gates=cd3cd4_gate, gate_colors=['red'], cmap=cm.jet, ax=ax[4]) # CD3, CD4
plt.title("Singlet Lymphocytes")
plt.xlabel("CD3")
plt.ylabel("CD4")
plt.grid()
samplecd3cd4 = panel1.gate(cd3cd4_gate)
thelp_gate = PolyGate([(0.2e03, 6.8e03), (0.2e03, 4.4e03), (3.7e03, 4.4e03), (5.7e03, 5.9e03), (5.7e03, 6.8e03)], ('VL1-H', 'BL1-H'), region='in', name='thelp')
treg_gate = PolyGate([(3.8e03, 4.4e03), (3.8e03, 3.0e03), (6.5e03, 2.9e03), (6.5e03, 5.0e03), (5.7e03, 5.8e03)], ('VL1-H', 'BL1-H'), region='in', name='treg')
_ = samplecd3cd4.plot(['VL1-H', 'BL1-H'], gates=[thelp_gate, treg_gate], gate_colors=['red', 'red'], cmap=cm.jet, ax=ax[5]) # CD3, CD4
plt.title("CD3+CD4+ cells")
plt.xlabel("CD25")
plt.ylabel("CD127")
plt.grid()
nk_gate = PolyGate([(3.3e3, 5.4e3), (5.3e3, 5.4e3), (5.3e3, 7.3e3), (3.3e3, 7.3e3)], ('VL4-H', 'BL3-H'), region='in', name='nk')
nkt_gate = PolyGate([(5.6e3, 5.1e3), (7.6e3, 5.1e3), (7.6e3, 7.1e3), (5.6e3, 7.1e3)], ('VL4-H', 'BL3-H'), region='in', name='nkt')
_ = panel2.plot(['VL4-H', 'BL3-H'], gates=[nk_gate, nkt_gate], gate_colors=['red', 'red'], cmap=cm.jet, ax=ax[6]) # CD56 vs. CD3
samplenk = panel2.gate(nk_gate)
samplenkt = panel2.gate(nkt_gate)
plt.title("Singlet Lymphocytes")
plt.xlabel("CD3")
plt.ylabel("CD56")
plt.grid()
cd8_gate = PolyGate([(4.2e3, 5.7e3), (8.1e3, 5.7e3), (8.1e3, 8.0e3), (4.2e3, 8.0e3)], ('VL4-H', 'VL6-H'), region='in', name='cd8')
_ = panel3.plot(['VL4-H', 'VL6-H'], gates=cd8_gate, gate_colors=['red'], cmap=cm.jet, ax=ax[7]) # CD8 vs. CD3
plt.title("Singlet Lymphocytes")
plt.xlabel("CD3")
plt.ylabel("CD8")
for i, axs in enumerate(ax):
if i == 0:
print(" ")
# weird error replace later, axs is not correct object type
# axs.set(xlabel='CD4',ylabel='Events')
elif i == 1:
axs.set_title('T Cell Gating')
axs.set(xlabel='CD25', ylabel='FOXP3')
elif i == 2:
axs.set_title('CD8+ Cells Gating')
axs.set(xlabel='CD3', ylabel='CD8')
elif i == 3:
axs.set_title('NK Cells Gating')
axs.set(xlabel='CD3', ylabel='CD56')
elif i == 4:
axs.set_title('CD3+CD4+ Gating')
axs.set(xlabel='CD3', ylabel='CD4')
elif i == 5:
axs.set_title('T reg and T Helper Gating')
axs.set(xlabel='CD25', ylabel='CD127')
elif i == 6:
axs.set_title('NK and NKT Gating')
axs.set(xlabel='CD3', ylabel='CD56')
elif i == 7:
axs.set_title('CD3+CD8+ Gating')
axs.set(xlabel='CD3', ylabel='CD8')
if i != 0:
axs.grid()
receptorPlot(ax[8])
IL2RahiLoPlot(ax[9])
return f | 6849598c09e9367c2702b0ee47232e9f2c69d1a3 | 3,629,375 |
def KimKim2011(medium="Water", p_steam=120, deltaT_sub=5, Theta=90, CAH=10,
Theta_a=None, Theta_r=None, k_coat=15, delta_coat=0, h_i=None,
c=1, N_s=250, print_properties=False, **kwargs):
""" main function, calculates dropwise condensation heat flux as described in:
Kim, S., & Kim, K. J. (2011). Dropwise Condensation Modeling Suitable for Superhydrophobic Surfaces. Journal of
Heat Transfer, 133(8), 081502–081502. https://doi.org/10.1115/1.4003742
Parameters
----------
medium: str
defines the condensing fluid to calculate fluid properties using CoolProp, list of viable fluids:
http://www.coolprop.org/fluid_properties/PurePseudoPure.html#list-of-fluids
p_steam: float
pressure in mbar = 100 Pa
deltaT_sub: float
temperature difference to the cooled wall in K
Theta: float
static contact angle in deg
CAH: float
contact angle hysteresis in deg, only used as a fallback if no values for Theta_a and Theta_r are given
Theta_a: float
advancing contact angle in deg
Theta_r: float
receding contact angle in deg
k_coat: float
thermal conductivity of the coating in W/(mK)
delta_coat: float
thickness of the coating in m
h_i: float
interfacial heat transfer coefficient in MW/m²K, if no value is given, h_i is calculated
c: float
numerical constant, "depends on the shape of the drop and on the steepness of the substrate surface"
N_s: float
number of Nucleation sites in 10^9 1/m²
print_properties: bool
if set to true, calculated fluid properties are printed
r_lower: float, optional
sets a lower boundary for the heat flux calculation, only droplets with a larger radii are considered
r_upper: float, optional
sets an upper boundary for the heat flux calculation, only droplets with a smaller radii are considered
Returns
----------
q: float
heat flux density in W/m²
q_n: float
heat flux density through small droplets in W/m²
q_N: float
heat flux density through large droplets in W/m²
r_min: float
minimum droplet radius in m
r_e: float
effective drop radius in m
r_max: float
effective maximum drop radius in m
Q_drop: partial function
rate of heat flow in W depending on drop radius in m
n: partial function
drop size distribution for small drops depending on drop radius r in m
N: partial function
drop size distribuion for large drops depending on drop radius r in m
"""
# get kwargs
r_lower = kwargs.get("r_lower", False)
r_upper = kwargs.get("r_upper", False)
# prepare input parameters
Theta, Theta_a, Theta_r, h_i, N_s, T_sat, sigma, k_c, h_fg, rho, g, R_s, rho_g \
= init_parameters(Theta_a=Theta_a, Theta_r=Theta_r, Theta=Theta,
CAH=CAH, p_steam=p_steam, h_i=h_i, medium=medium, N_s=N_s)
# calculate interfacial heat transfer coefficient h_i
h_i_calc = h_i_Schrage(R_s=R_s, T_sat=T_sat, h_fg=h_fg, rho_g=rho_g, sigma_c=1)
if not h_i:
h_i = h_i_calc
# calculate drop radii
r_min = r_min_KimKim(T_sat=T_sat, sigma=sigma, h_fg=h_fg,
rho=rho, deltaT_sub=deltaT_sub)
r_e = r_e_KimKim(N_s)
r_max = r_max_KimKim(c=c, Theta_r=Theta_r, Theta_a=Theta_a,
Theta=Theta, sigma=sigma, rho=rho, g=g)
# define functions for rate of heat flow through a single droplet and drop size distribution
Q_drop = partial(Q_drop_KimKim, deltaT_sub=deltaT_sub, r_min=r_min, delta_coat=delta_coat, k_coat=k_coat, k_c=k_c,
Theta=Theta, h_i=h_i)
Q_drop.__doc__ = "rate of heat flow in W depending on drop radius r in m"
n = partial(n_KimKim, deltaT_sub=deltaT_sub, r_min=r_min, delta_coat=delta_coat, k_coat=k_coat, k_c=k_c,
Theta=Theta, h_i=h_i, rho=rho, h_fg=h_fg, r_e=r_e, r_max=r_max)
n.__doc__ = "drop size distribution for small drops depending on drop radius r in m"
N = partial(N_LeFevre, r_max=r_max)
N.__doc__ = "drop size distribution for large drops depending on drop radius r in m"
# integrate and calculate heat flux density
def Q_drop_n(r):
"""small drops"""
Q_drop_n = Q_drop(r) * n(r)
return Q_drop_n
def Q_drop_N(r):
"""large drops"""
Q_drop_N = Q_drop(r) * N(r)
return Q_drop_N
# optional boundaries for integration
if (not r_lower or r_lower < r_min):
r_lower = r_min
if (not r_upper or r_upper > r_max):
r_upper = r_max
if r_lower < r_e:
if r_upper > r_e:
q_n, q_n_interr = integrate.quad(Q_drop_n, r_lower, r_e)
else:
q_n, q_n_interr = integrate.quad(Q_drop_n, r_lower, r_upper)
else:
q_n = 0
if r_upper > r_e:
if r_lower < r_e:
q_N, q_N_interr = integrate.quad(Q_drop_N, r_e, r_upper)
else:
q_N, q_N_interr = integrate.quad(Q_drop_N, r_lower, r_upper)
else:
q_N = 0
q = q_n + q_N
# calculate additional values
misc = {}
misc["Bo"] = bond_number(r_max, sigma, rho, g)
# optional output of calculated fluid properties
if print_properties:
print("\nfluid properties:")
print("fluid: \t", medium)
print("T_sat: \t", T_sat-273.15, "°C")
print("sigma: \t", sigma*1000, "mN/m")
print("h_fg: \t", h_fg/1000, "kJ/kg")
print("rho_l:\t", rho, "kg/m³")
print("rho_g:\t", rho_g, "kg/m³")
print("R_s:\t", R_s, "J/(kg*K)")
print("\ninterfacial heat transfer coefficient:")
print("h_i: \t ", round(h_i, 1), "W/m²K")
print("h_i_calc:", round(h_i_calc, 1), "W/m²K")
return q, q_n, q_N, r_min, r_e, r_max, Q_drop, n, N, misc | b2723d279ef8eee3a51fabde4462cbcad2ccd521 | 3,629,376 |
def cumall(series):
"""
Calculates cumulative all of values. Equivalent to
`series.expanding().apply(np.all).astype(bool)`.
Args:
series: column to compute cumulative all for.
"""
alls = series.expanding().apply(np.all).astype(bool)
return alls | 6d660dae0760f1157d91a21739729d91880761c1 | 3,629,377 |
def manhattan_distance(a: Point, b: Point) -> int:
"""Return the Manhattan distance between two points."""
return abs(a[0] - b[0]) + abs(a[1] - b[1]) | 627d846b6aaea04d98d75dd04ca082578c090b5d | 3,629,378 |
import json
def gen_new_dns_json():
"""
Generate json file with IPs and their respective domains.
:return: Stats about domains
"""
json_dict = []
for ip in DATA.keys():
entry = {'ip': ip, 'domain': DATA.get(ip)}
json_dict.append(entry)
dist = {"domains": json_dict}
with open(f'{PATH}{DOMAIN_SUFIX}', 'w') as out_file:
out_file.write(json.dumps(dist, indent=2, sort_keys=True))
stat = len(dist["domains"])
return f'{stat} IPs were processed. ' | 69ca6e7471f864cb0e3f224f93a24546fa314b6e | 3,629,379 |
def recorderLocations(year = 2014):
"""
This function returns all survey locations and recorder abbreviations for a given year. Only valid from 2009 onwards.
"""
if year > 2009:
stryear = str(year)
groups = loadTable('groups')
recorderids = loadTable('recorderinstall')
reclocs = groups.merge(recorderids, left_on='GroupID', right_on='GROUP_ID')
reclocs['recorder_abrv'] = reclocs['RECORDER_ID'].apply(lambda x:x[:3])
yearlocs = reclocs.loc[reclocs['Year']== stryear,['GroupID','LocName','recorder_abrv']].drop_duplicates()
locations = yearlocs.sort_values('LocName')
return locations
else:
print('Recorder locations can only be returned for years after 2009.') | 759ad3de6369ed7ee20e1f02fba5e06b54373294 | 3,629,380 |
def ensure_rng(rng=None):
"""Simple version of the ``kwarray.ensure_rng``
Args:
rng (int | numpy.random.RandomState | None):
if None, then defaults to the global rng. Otherwise this can be an
integer or a RandomState class
Returns:
(numpy.random.RandomState) : rng -
a numpy random number generator
References:
https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270
"""
if rng is None:
rng = np.random.mtrand._rand
elif isinstance(rng, int):
rng = np.random.RandomState(rng)
else:
rng = rng
return rng | a58d9aac689e89dfda7fa0b0b389a3a1e2a913e0 | 3,629,381 |
def def_grad_surf(surf, u, v):
""""
Compute deformation gradient via a NURBS surface interpolation of control point displacement at u, v
parametric coordinates
:param surf: NURBS surface interpolating control point displacements
:type surf: NURBS surface object
:param u: u parametric location
:type u: float
:param v: v parametric location
:type v: float
:returns: computed inverse jacobian
:rtype: ndarray
"""
F = np.zeros((2, 2))
tangents = surf.tangent((u, v), normalize=False)
F[0, 0] = 1 + tangents[1][0] # d delta x1 dx1
F[0, 1] = tangents[1][1] # d delta x1 dx2
F[1, 0] = tangents[2][1] # d delta x2 dx1
F[1, 1] = 1 + tangents[2][1] # d delta x2 dx2
return F | 892e1a9adc825e784b17b98cd400963e45ca949c | 3,629,382 |
def assign(tensor, val):
"""
Compatibility assignment operation
Args:
tensor: Tensor, to be assigned value of T2.
val: Tensor or python value, which will be assigned to T1.
Returns:
Assigned Tensor
"""
if _VERSION == 1:
tf.assign(tensor, val)
else:
tf.compat.v1.assign(tensor, val)
return tensor | d5ef567bad80f663c04d53214578a798a10adb22 | 3,629,383 |
from django.contrib.auth import get_user_model
def get_v3_users_from_v2_user_ids(v2_user_ids):
"""
Get v3 users with last_login==null and has a matching v2 user id
"""
return get_user_model().objects\
.filter(last_login__isnull=True)\
.filter(id__in=v2_user_ids) | 378ad4e1e8904fe57d91b8657994f5965d5bebbb | 3,629,384 |
def filter_intersection(data, hoax_pages, print_results=False):
"""returns the dataset filtered with only the users who liked at least
one post belonging to a hoax page and one post belonging to a non-hoax page
print_results: if True, prints the filtering effect
output: sparse like_matrix and page/hoax label columns
"""
likes_hoax, likes_nhoax = split_likes(data, hoax_pages)
hoax_likers = set(likes_hoax)
nonhoax_likers = set(likes_nhoax)
both_likers = nonhoax_likers.intersection(hoax_likers)
if print_results:
print('Total users: %d, Intersection users: %d' % (len(set(hoax_likers.union(nonhoax_likers))),len(both_likers)))
#user filter
data_users_f = {post: [user for user in users if user in both_likers]
for post, users in data.items()
if len([user for user in users if user in both_likers])>0}
#postid, userid conversion
uid2n = dict((user_id, user_n) for (user_n, user_id) in enumerate(both_likers))
pid2n = dict((post_id, post_n) for (post_n, post_id) in enumerate(data_users_f.keys()))
#matrix bulding
like_matrix = dok_matrix((len(pid2n), len(uid2n)), dtype=np.int8)
pages = []
hoaxes = []
for post, users in data_users_f.items():
page = post.split('_')[0]
pages.append(page)
if page in hoax_pages:
hoaxes.append(1)
else:
hoaxes.append(0)
for user in users:
like_matrix[pid2n[post], uid2n[user]] = True
if print_results:
nlikes_filtered = len(like_matrix)
nposts_filtered = len(pid2n)
n_users_filtered = len(uid2n)
lf = nlikes_filtered / (nposts_filtered*n_users_filtered)
print("%d non-zero values out of %d (loading factor: %.2f%%)" %
(nlikes_filtered, len(pid2n)*len(uid2n), lf*100))
return like_matrix, pages, hoaxes | 6ebaec0eb520098ad9f85ce79bf4a93e75ea70f0 | 3,629,385 |
def get_path_up_down(path_source, path_target):
"""paths for up/down
NOTE: both lists always show the LOWER level element
even for the so for path up, it shows the source,
for path down the target!
Args:
path_source(list)
path_target(list)
"""
# find common part of path
path_shared = []
for pu, pd in zip(path_source, path_target):
if pu != pd:
break
path_shared.append(pu)
n = len(path_shared) # root is always shared
peak = path_shared[-1]
path_down = path_target[n:]
path_up = list(reversed(path_source[n:]))
return path_up, peak, path_down | ba4719b42e0703ea0ac885de29b36466b7eb3676 | 3,629,386 |
def hsitogramEqualize(imgOrig: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray):
"""
Equalizes the histogram of an image
:param imgOrig: Original Histogram
:return
"""
num_of_pixels = imgOrig.shape[0] * imgOrig.shape[1]
if len(imgOrig.shape) == 2:
norm_255 = normalize_image(imgOrig) * NUM_OF_PIXELS_255
lut, hist_origin = hsitogram_norm(norm_255, num_of_pixels)
img_eq = norm_255
for i in range(imgOrig.shape[0]):
for j in range(imgOrig.shape[1]):
img_eq[i][j] = lut[int(norm_255[i][j])]
hist_eq, edges = np.histogram(img_eq, NUM_OF_PIXELS_256, [0, NUM_OF_PIXELS_256])
return normalize_image(img_eq), hist_origin, hist_eq
else:
yiq = transformRGB2YIQ(imgOrig)
norm_255 = normalize_image(yiq[:, :, 0]) * NUM_OF_PIXELS_255
lut, hist_origin = hsitogram_norm(norm_255, num_of_pixels)
norm_255_new = norm_255
for i in range(imgOrig.shape[0]):
for j in range(imgOrig.shape[1]):
norm_255_new[i][j] = lut[int(norm_255[i][j])]
hist_eq, edges = np.histogram(norm_255_new, NUM_OF_PIXELS_256, [0, NUM_OF_PIXELS_256])
yiq[:, :, 0] = normalize_image(norm_255_new)
img_eq = normalize_image(transformYIQ2RGB(yiq))
return img_eq, hist_origin, hist_eq | b196d3ffe8b6323883591960e0627a9e7eb40735 | 3,629,387 |
import os
def topic_arn():
"""
Get the SNS topic ARN from environment variable
:return: The SNS topic ARN
"""
return os.environ["SNS_TOPIC_ARN"] | e4729fbb47a4efefb2037dd5e590fba2706e43dc | 3,629,388 |
def findCentroid(points):
"""
Compute the centroid for the vectors of a group of Active Site instance
Input: n ActiveSite instances
Output: the centroid vector
"""
centroid = [0.0,0.0,0.0]
for item in points:
centroid = [centroid[0]+item.vector[0],centroid[1]+item.vector[1],centroid[2]+item.vector[2]]
centroid = [centroid[0]/len(points),centroid[1]/len(points),centroid[2]/len(points)]
return centroid | 16f0c4b0052edad8c37ca4abee93bff7c1d5937b | 3,629,389 |
def setup_view(f):
"""Decorator for setup views."""
def new_function(*args, **kwargs):
request = args[0]
if not settings.DEBUG:
raise Http404("Site is not in DEBUG mode.")
if request.kbsite and request.kbsite.is_setup:
raise Http404("Site is already setup, wizard disabled.")
return f(*args, **kwargs)
return wraps(f)(new_function) | 7ab1eade1426893b70895dee4cd032feb3c21dc7 | 3,629,390 |
def weth_instance(web3_eth): # pylint: disable=redefined-outer-name
"""Get an instance of the WrapperEther contract."""
return web3_eth.contract(
address=to_checksum_address(
NETWORK_TO_ADDRESSES[NetworkId.GANACHE].ether_token
),
abi=abi_by_name("WETH9"),
) | d0dbbeb33e9d0679196b9e1c6f0c2cdac3a3dc30 | 3,629,391 |
def get_editops_stats(alignment, gap_char):
"""Get stats for character level edit operations that need to be done to
transform the source string to the target string. Inputs must not be empty
and must be the result of calling the runing the align function.
Args:
alignment (tuple(str, str)): the results from the string alignment biopy function
gap_char (str): gap character used in alignment
Raises:
ValueError: If any of the string in the alignment are empty
Returns:
[type]: [description]
"""
aligned_src, aligned_target = alignment
if aligned_src == "" or aligned_target == "":
raise ValueError("one of the input strings is empty")
stats = {
"edit_insert": 0,
"edit_delete": 0,
"edit_replace": 0,
"edit_insert_spacing": 0,
"edit_delete_spacing": 0,
}
actions = {}
for i, (char_1, char_2) in enumerate(zip(aligned_src, aligned_target)):
if LOG_LEVEL > 1:
_log(char_1, char_2)
if char_1 == gap_char:
# insert
if char_2 == " ":
stats["edit_insert_spacing"] += 1
else:
stats["edit_insert"] += 1
actions[i] = ("I", char_2)
elif char_2 == gap_char:
# delete
if char_1 == " ":
stats["edit_delete_spacing"] += 1
else:
stats["edit_delete"] += 1
actions[i] = "D"
elif char_2 != char_1:
stats["edit_replace"] += 1
actions[i] = ("R", char_2)
return stats, actions | 90a902fa3dadd05c064f9853ce94fc680e5177a9 | 3,629,392 |
def conv(inputs, out_filters, ksize=(3, 3), strides=(1, 1), dilation=(1, 1),
use_bias=True):
"""
Convolution layer
Parameters
----------
inputs: Input tensor
out_filters: Number of output filters
ksize: Kernel size. One integer of tuple of two integers
strides: Strides for moving kernel. One integer of tuple of two integers
dilation: Dilation of kernel. One integer of tuple of two integers
use_bias: Whether to use bias
"""
return Conv2D(filters=out_filters,
kernel_size=ksize,
strides=strides,
padding='same',
dilation_rate=dilation,
use_bias=use_bias)(inputs) | e8e6ca7be7463106e1bbe67381b36eeffeebb7f2 | 3,629,393 |
def is_active(host, port):
"""Check if server is active.
Send HTTP GET for a fake /style.css which server will respond to if it's
alive.
Args:
host: server ip address
port: server port
Returns:
Boolean for server state.
"""
try:
url = 'http://{}:{}/style.css'.format(host, port)
req = urllib2.Request(url, headers={'User-Agent': UA})
f = urllib2.urlopen(req)
if f.code == 200:
return True
except urllib2.URLError:
pass
return False | 8c6af3307326ddcf9c57bc7745d26103ed5ded2e | 3,629,394 |
def norm_lrelu_conv(feat_in, feat_out):
"""InstanceNorm3D + LeakyReLU + Conv3D block"""
return nn.Sequential(
nn.InstanceNorm3d(feat_in),
nn.LeakyReLU(),
nn.Conv3d(feat_in, feat_out, kernel_size=3, stride=1, padding=1, bias=False)
) | d62c2becb6f88b1f925c2ddbb8395937eadc2174 | 3,629,395 |
import pandas
from typing import Optional
def dataframe_divisions(
request: Request,
reader=Depends(reader),
format: Optional[str] = None,
serialization_registry=Depends(get_serialization_registry),
):
"""
Fetch the Apache Arrow serialization of the index values at the partition edges.
"""
request.state.endpoint = "data"
if reader.structure_family != "dataframe":
raise HTTPException(
status_code=404,
detail=f"Cannot read {reader.structure_family} structure with /dataframe/division route.",
)
divisions = reader.microstructure().divisions
# divisions is a tuple. Wrap it in a DataFrame so
# that we can easily serialize it with Arrow in the normal way.
divisions_wrapped_in_df = pandas.DataFrame({"divisions": list(divisions)})
with record_timing(request.state.metrics, "pack"):
content = serialization_registry(
"dataframe", APACHE_ARROW_FILE_MIME_TYPE, divisions_wrapped_in_df, {}
)
headers = {"ETag": md5(content).hexdigest()}
return PatchedResponse(
content, media_type=APACHE_ARROW_FILE_MIME_TYPE, headers=headers
) | c42e6208e5fbbe1cf8383d2df66f8f0899c35f90 | 3,629,396 |
import requests
import sys
def get_key_volumes(
svm_name: str,
volume_name: str,
cluster: str,
headers_inc: str):
""" get volume keys"""
print()
url = "https://{}/api/storage/volumes?name={}&svm.name={}".format(
cluster, volume_name, svm_name)
try:
response = requests.get(url, headers=headers_inc, verify=False)
except requests.exceptions.HTTPError as err:
print(err)
sys.exit(1)
except requests.exceptions.RequestException as err:
print(err)
sys.exit(1)
url_text = response.json()
if 'error' in url_text:
print(url_text)
sys.exit(1)
respdict = dict(response.json())
volumes = respdict['records']
print("The UUID of the Volume is ")
for volume in volumes:
print(volume['uuid'])
return volume['uuid'] | fcc081051541fa5f13446709eea26df02a00bb4e | 3,629,397 |
def get_integer_array(obj,name):
"""
Retrieves the value of a resource that uses a one-dimensional integer array.
iarr = Ngl.get_integer_array(plotid, resource_name)
plotid -- The identifier returned from Ngl.open_wks, or any PyNGL
function that returns a PlotId.
resource_name -- The name of the resource whose value you want to
retrieve.
"""
return(NhlGetIntegerArray(_int_id(obj),name)) | fb694d87e7de8f674be5a5d3326b94b2127d071b | 3,629,398 |
import os
def test(model, dataloader, use_cuda, criterion, full_return=False, log_path=None):
"""
Computes the balanced accuracy of the model
:param model: the network (subclass of nn.Module)
:param dataloader: a DataLoader wrapping a dataset
:param use_cuda: if True a gpu is used
:param full_return: if True also returns the sensitivities and specificities for a multiclass problem
:return:
balanced accuracy of the model (float)
total loss on the dataloader
"""
model.eval()
columns = ["participant_id", "true_age", "predicted_age"]
results_df = pd.DataFrame(columns=columns)
total_loss = 0
for i, data in enumerate(dataloader, 0):
if use_cuda:
inputs, labels = data['image'].cuda(), data['label'].cuda()
data['covars'] = data['covars'].cuda()
else:
inputs, labels = data['image'], data['label']
data['covars'] = data['covars'].cpu()
age = data['age']
outputs = model(inputs, covars=data['covars'])
loss = criterion(outputs, labels.unsqueeze(1))
total_loss += loss.item()
predicted = outputs.data.squeeze(1)
# Generate detailed DataFrame
for idx, sub in enumerate(data['subject_ID']):
prediction = predicted[idx]
if 'v' in dataloader.dataset.normalization:
prediction *= dataloader.dataset.age_std
if 'm' in dataloader.dataset.normalization:
prediction += dataloader.dataset.age_mean
row = [sub, age[idx].item(), prediction.item()]
row_df = pd.DataFrame(np.array(row).reshape(1, -1), columns=columns)
results_df = pd.concat([results_df, row_df])
del inputs, outputs, labels
results_df.reset_index(inplace=True, drop=True)
mae = np.mean(np.abs(results_df.predicted_age.astype(float) - results_df.true_age.astype(float)))
model.train()
if log_path is not None:
if os.path.isfile(log_path) and not os.path.exists(os.path.dirname(log_path)):
# if file is given
os.makedirs(os.path.dirname(log_path))
elif os.path.isdir(log_path) and not os.path.exists(log_path):
# if directory is given
os.makedirs(log_path)
log_path = os.path.join(log_path, "result.tsv")
results_df.to_csv(log_path, sep='\t', index=False)
if full_return:
return total_loss, mae, results_df
return total_loss, mae | 84d3be88dbe8ae6f6d23eba86f1427a37f26d8f3 | 3,629,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.