content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import re
def tokenize(text):
"""
The function to tokenize and lemmatize the text.
Inputs:
text: the text which needs to be tokenized
Outputs:
tokens: tokens which can be used in machine learning
"""
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
tokens = word_tokenize(text)
tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
return tokens
|
b41e66c4a065d898b2c3cf05fa261f6100d0f413
| 3,649,500
|
def remove_task(name: str):
"""
Delete a task based on information "name":
- **name**: each tasks must have a name
"""
name_idx = _db_has_name(name)
if name_idx == None:
raise HTTPException(status_code = 400, detail = {"message" : "name doesn't exists"})
else:
del db["tasks"][name_idx]
_write_json()
return name
|
4190e3e6a0ac55defe5ba6dcac3036f7c7df290b
| 3,649,501
|
def set_dj_definition(cls, type_map: dict = None) -> None:
"""Set the definition property of a class by inspecting its attributes.
Params:
cls: The class whose definition attribute should be set
type_map: Optional additional type mappings
"""
# A mapping between python types and DataJoint types
_type_map = {
"int": "int",
"str": "varchar(256)",
"float": "float",
"Quantity": "float",
"datetime": "datetime",
"datetime.datetime": "datetime",
"bool": "tinyint",
"list": "longblob",
}
# A list of python types which have no DataJoint
# equivalent and so are unsupported
unsupported = [dict]
if type_map:
_type_map.update(type_map)
dj_def = "%s_id: int auto_increment\n---\n" % cls.__name__.lower()
cls_items = cls.__annotations__.items()
for attr, type_hint in cls_items:
if type_hint in unsupported:
continue
name = getattr(type_hint, "__name__", type_hint)
default = getattr(cls, attr)
if isinstance(default, str):
default = '"%s"' % default
elif isinstance(default, bool):
default = int(default)
else:
default = "NULL"
if getattr(type_hint, '_name', "") == 'Dict':
cls = handle_dict(cls, _type_map, attr, type_hint)
continue
elif name in _type_map:
dj_def += "%s = %s : %s\n" % (attr, default, _type_map[name])
else:
dj_def += "-> %s\n" % name
cls.definition = dj_def
return cls
|
9335e1b4413ce03f98ca885bcf4a888af9d014a1
| 3,649,502
|
def mmap_zeros(shape, dtype):
"""
Create an empty shared memory array.
"""
new = anonymousmemmap(shape, dtype)
new[:] = 0.0
return new
|
5f78b5e227ab7f5115bc04af2e3f7ea62a769cd8
| 3,649,503
|
from typing import Iterable
def edge_disjoint_paths(g: Graph, source: Node, sink: Node) -> Iterable:
""" Given directed graph G, and two nodes s and t, find k paths from
s to t such that no two paths share an edge.
Menger’s Theorem: Given a directed graph G with nodes s,t the maximum number of
edge-disjoint s-t paths equals the minimum number of edges whose
removal separates s from t.
Suppose you want to send k large files from s to t but never have two files use
the same network link (to avoid congestion on the links).
"""
for u in g:
for v in g[u]:
g[u][v].cap = 1
fifo_push_relabel(g, source, sink)
# use dfs to find the paths
S, paths = [source], []
visited = defaultdict(lambda: False)
pred = defaultdict(lambda: None)
while S:
u = S.pop()
if u == sink:
path = [sink]
current = pred[sink]
while current is not None:
path.append(current)
current = pred[current]
paths.append(tuple(reversed(path)))
continue
if visited[u]:
continue
visited[u] = True
for v in adjacency(g, u):
if not visited[u] and g[u][v].flow:
S.append(v)
pred[v] = u
return iter(paths)
|
d638923e9091eefcee0c0e2623adba095e33cc0c
| 3,649,504
|
def listToMLlibVectorUDF(col):
""" Map struct column from list to MLlib vector """
return Column(default().listToMLlibVectorUDF(col._jc))
|
9b785839234bc2dfcb44c193cc12930e4f26f1a8
| 3,649,505
|
def change_file_paths_to_showcase(df, showcase_dir="/showcase_data/raw_data"):
"""Changes file paths to use showcase directory"""
output = df.copy()
if "file_path" in df.columns:
output.loc[:, "file_path"] = df.file_path.apply(
lambda x: add_path(x, showcase_dir)
)
if "file_path_feature_values" in df.columns:
output.loc[:, "file_path_feature_values"] = df.file_path_feature_values.apply(
lambda x: add_path(x, showcase_dir)
)
if "cluster_id_path" in df.columns:
output.loc[:, "cluster_id_path"] = df.cluster_id_path.apply(
lambda x: add_path(x, showcase_dir)
)
if "thumbnail_path" in df.columns:
output.loc[:, "thumbnail_path"] = df.thumbnail_path.apply(
lambda x: add_path(x, showcase_dir)
)
if "file_path_small" in df.columns:
output.loc[:, "file_path_small"] = df.file_path_small.apply(
lambda x: add_path(x, showcase_dir)
)
return output
|
6789ff426794b620eebb1d80ae96b1a18c9a2dc8
| 3,649,506
|
def match_conftest_error(line):
"""
Extract `ConftestImportFailure` error message from a string.
:param line: A string to pattern match against.
:returns: A dictionary where the key `file_path` holds the file path and the
key `error` the error description. If not matched, the dictionary is
empty.
"""
return match_pattern(
r"^E\s+.*ConftestImportFailure: "
"\(local\('(?P<file_path>.*)'\), \((?P<error>.*)\)\)$",
line,
)
|
90b10831b672bb053cc46e5adbefaea5597607e3
| 3,649,507
|
import string
def modifyModlist(
old_entry,new_entry,ignore_attr_types=None,ignore_oldexistent=0
):
"""
Build differential modify list for calling LDAPObject.modify()/modify_s()
old_entry
Dictionary holding the old entry
new_entry
Dictionary holding what the new entry should be
ignore_attr_types
List of attribute type names to be ignored completely
ignore_oldexistent
If non-zero attribute type names which are in old_entry
but are not found in new_entry at all are not deleted.
This is handy for situations where your application
sets attribute value to '' for deleting an attribute.
In most cases leave zero.
"""
ignore_attr_types = list_dict(map(string.lower,(ignore_attr_types or [])))
modlist = []
attrtype_lower_map = {}
for a in old_entry.keys():
attrtype_lower_map[string.lower(a)]=a
for attrtype in new_entry.keys():
attrtype_lower = string.lower(attrtype)
if ignore_attr_types.has_key(attrtype_lower):
# This attribute type is ignored
continue
# Filter away null-strings
new_value = filter(lambda x:x!=None,new_entry[attrtype])
if attrtype_lower_map.has_key(attrtype_lower):
old_value = old_entry.get(attrtype_lower_map[attrtype_lower],[])
old_value = filter(lambda x:x!=None,old_value)
del attrtype_lower_map[attrtype_lower]
else:
old_value = []
if not old_value and new_value:
# Add a new attribute to entry
modlist.append((ldap.MOD_ADD,attrtype,new_value))
elif old_value and new_value:
# Replace existing attribute
replace_attr_value = len(old_value)!=len(new_value)
if not replace_attr_value:
old_value_dict=list_dict(old_value)
new_value_dict=list_dict(new_value)
delete_values = []
for v in old_value:
if not new_value_dict.has_key(v):
replace_attr_value = 1
break
add_values = []
if not replace_attr_value:
for v in new_value:
if not old_value_dict.has_key(v):
replace_attr_value = 1
break
if replace_attr_value:
modlist.append((ldap.MOD_DELETE,attrtype,None))
modlist.append((ldap.MOD_ADD,attrtype,new_value))
elif old_value and not new_value:
# Completely delete an existing attribute
modlist.append((ldap.MOD_DELETE,attrtype,None))
if not ignore_oldexistent:
# Remove all attributes of old_entry which are not present
# in new_entry at all
for a in attrtype_lower_map.keys():
if ignore_attr_types.has_key(a):
# This attribute type is ignored
continue
attrtype = attrtype_lower_map[a]
modlist.append((ldap.MOD_DELETE,attrtype,None))
return modlist
|
f28814b4659ccc8e9e27eec0dee5da8700a732ac
| 3,649,508
|
def regnety_3200m(**kwargs):
"""
Constructs a RegNet-Y model under 3200M FLOPs.
"""
model = RegNet(regnetY_3200M_config, **kwargs)
return model
|
6799f96df72769e6f8bd0819e47353e141acefd0
| 3,649,509
|
import operator
import numpy
def _get_exposure(fname, stop=None):
"""
:param fname:
path of the XML file containing the exposure
:param stop:
node at which to stop parsing (or None)
:returns:
a pair (Exposure instance, list of asset nodes)
"""
[exposure] = nrml.read(fname, stop=stop)
if not exposure.tag.endswith('exposureModel'):
raise InvalidFile('%s: expected exposureModel, got %s' %
(fname, exposure.tag))
description = exposure.description
try:
conversions = exposure.conversions
except AttributeError:
conversions = Node('conversions', nodes=[Node('costTypes', [])])
try:
inslimit = conversions.insuranceLimit
except AttributeError:
inslimit = Node('insuranceLimit', text=True)
try:
deductible = conversions.deductible
except AttributeError:
deductible = Node('deductible', text=True)
try:
area = conversions.area
except AttributeError:
# NB: the area type cannot be an empty string because when sending
# around the CostCalculator object we would run into this numpy bug
# about pickling dictionaries with empty strings:
# https://github.com/numpy/numpy/pull/5475
area = Node('area', dict(type='?'))
try:
occupancy_periods = exposure.occupancyPeriods.text or ''
except AttributeError:
occupancy_periods = ''
try:
tagNames = exposure.tagNames
except AttributeError:
tagNames = Node('tagNames', text='')
tagnames = ~tagNames or []
tagnames.insert(0, 'taxonomy')
# read the cost types and make some check
cost_types = []
retrofitted = False
for ct in conversions.costTypes:
with context(fname, ct):
ctname = ct['name']
if ctname == 'structural' and 'retrofittedType' in ct.attrib:
if ct['retrofittedType'] != ct['type']:
raise ValueError(
'The retrofittedType %s is different from the type'
'%s' % (ct['retrofittedType'], ct['type']))
if ct['retrofittedUnit'] != ct['unit']:
raise ValueError(
'The retrofittedUnit %s is different from the unit'
'%s' % (ct['retrofittedUnit'], ct['unit']))
retrofitted = True
cost_types.append(
(ctname, valid.cost_type_type(ct['type']), ct['unit']))
if 'occupants' in cost_types:
cost_types.append(('occupants', 'per_area', 'people'))
cost_types.sort(key=operator.itemgetter(0))
cost_types = numpy.array(cost_types, cost_type_dt)
insurance_limit_is_absolute = il = inslimit.get('isAbsolute')
deductible_is_absolute = de = deductible.get('isAbsolute')
cc = CostCalculator(
{}, {}, {},
True if de is None else de,
True if il is None else il,
{name: i for i, name in enumerate(tagnames)},
)
for ct in cost_types:
name = ct['name'] # structural, nonstructural, ...
cc.cost_types[name] = ct['type'] # aggregated, per_asset, per_area
cc.area_types[name] = area['type']
cc.units[name] = ct['unit']
assets = []
asset_refs = []
exp = Exposure(
exposure['id'], exposure['category'],
description.text, cost_types, occupancy_periods,
insurance_limit_is_absolute, deductible_is_absolute, retrofitted,
area.attrib, assets, asset_refs, cc, TagCollection(tagnames))
return exp, exposure.assets
|
d782826c427fb30e5561908f4342b2f6983ab46b
| 3,649,510
|
def dcos_api_session(dcos_api_session_factory):
""" Overrides the dcos_api_session fixture to use
exhibitor settings currently used in the cluster
"""
args = dcos_api_session_factory.get_args_from_env()
exhibitor_admin_password = None
expanded_config = get_expanded_config()
if expanded_config['exhibitor_admin_password_enabled'] == 'true':
exhibitor_admin_password = expanded_config['exhibitor_admin_password']
api = dcos_api_session_factory(
exhibitor_admin_password=exhibitor_admin_password,
**args)
api.wait_for_dcos()
return api
|
ab677802d1228b3af4bfc8ecc2cbf6040edbc6b0
| 3,649,511
|
def BarycentricInterpolation(bins, pnts):
"""
barycentricinterpolation for given points,
return the barycentric coordinates for points within the grids
INPUT
bins - grids for discretization,
m-length array where bins[i] indicates the mesh along dimension i
pnts - an array of pnts, each points is an m-length indicates the Cartesian coordinates
can be n pnts in total
RETURN
indices - an n-length list of indices, each indices is d-length (d=m+1) for interpolating points invovled
coeffs - an n-length list of coefficients, each coefficients is d-length for reconstructing points n
A pythonic version barycentricinterpolation from Russ' drake utility function
does not support dcoefs currently...
"""
#note here the layout of input and output is different from the C++ version of drake
m = pnts.shape[1]
n = pnts.shape[0]
d = m+1
if len(bins) != m:
print 'The number of bins must equal to the dimension of the points.' #validation
return None, None
binsize = [len(bins[i]) for i in range(m)]
nskip = np.concatenate([[1], np.cumprod([binsize[i] for i in range(m-1)])])
#a list of bary points for future sorting...
b = [{'dim':0, 'fracway':0.0, 'dfracway':0.0} for i in range(d)]
indices = np.zeros((n, d))
coeffs = np.zeros((n, d))
for j in range(n):
sidx = 0 # 0-index in our case...
for i in range(m):
pt = pnts[j, i]
curr_bin = bins[i]
curr_bin_size = binsize[i]
b[i]['dim'] = i
if curr_bin_size == 1: #singleton dimensions
#sidx is unchanged
b[i]['fracway'] = 1.0
elif pt > curr_bin[curr_bin_size-1]:
#larger than max bound of bin
sidx += nskip[i] * (curr_bin_size-1)
b[i]['fracway'] = 1.0
b[i]['dfracway'] = 0.0
elif pt < curr_bin[0]:
#less than min bound of bin
sidx += nskip[i]
b[i]['fracway'] = 0.0
b[i]['dfracway'] = 0.0
else:
#Russ commented that smarter search can be done here...
#i guess we can do it in a pythonic way...
next_bin_index = np.argmax(curr_bin>pt)
sidx += nskip[i]*next_bin_index
b[i]['fracway'] = (pt - curr_bin[next_bin_index-1])/(curr_bin[next_bin_index]- curr_bin[next_bin_index-1])
b[i]['dfracway'] = 1./(curr_bin[next_bin_index]- curr_bin[next_bin_index-1])
#sort dimension based on fracway (lowest to highest)
b_sorted = sorted(b[:-1], key=lambda b_elem: b_elem['fracway'])
# final element of b_sorted,
b_sorted.append({'dim':m-1,'fracway':1.0, 'dfracway':0.0})
# top right corner
indices[j, 0] = sidx
coeffs[j, 0] = b_sorted[0]['fracway']
for i in range(m):
if binsize[b_sorted[i]['dim']] > 1:
#support singletone dimension
sidx -= nskip[b_sorted[i]['dim']]
indices[j, i+1] = sidx
coeffs[j, i+1] = b_sorted[i+1]['fracway'] - b_sorted[i]['fracway']
return indices, coeffs
|
aa12be78a581cae154887da4546b0a9e94297e00
| 3,649,512
|
from pathlib import Path
import shutil
def submit_rgi_job(sample_instance: AnalysisSample) -> RGIResult:
"""
Given an input AnalysisSample instance, runs RGI and stores result in the database
:param sample_instance: Instance of AnalysisSample object
:return: Populated RGIResult object generated by the method
"""
logger.info(f"Received RGI job request for {sample_instance}")
assembly_instance = SampleAssemblyData.objects.get(sample_id=sample_instance.sample_id)
rgi_dir_name = f'RGI_{sample_instance.user}_{sample_instance.pk}'
root_sample_instance = Sample.objects.get(sample_id=sample_instance.sample_id)
outdir = MEDIA_ROOT / Path(str(sample_instance.sample_id.fwd_reads)).parent / rgi_dir_name
if not assembly_instance.assembly_exists():
logger.warning(f"Could not find assembly for {assembly_instance} - cannot proceed with job")
return
else:
assembly_path = assembly_instance.get_assembly_path()
# Remove previous analysis if it exists
if outdir.exists():
shutil.rmtree(outdir, ignore_errors=True)
outdir.mkdir(parents=True)
# Call RGI
rgi_text_results, rgi_json_results = call_rgi_main(fasta=assembly_path, outdir=outdir,
sample_id=root_sample_instance.sample_id)
# Populate database with results
rgi_result_object = RGIResult.objects.create(analysis_sample=sample_instance)
rgi_result_object.rgi_main_text_results = upload_analysis_file(instance=root_sample_instance,
filename=rgi_text_results.name,
analysis_folder=rgi_dir_name)
rgi_result_object.rgi_main_json_results = upload_analysis_file(instance=root_sample_instance,
filename=rgi_json_results.name,
analysis_folder=rgi_dir_name)
rgi_result_object.save()
logger.info(f"Completed running RGI on {sample_instance}")
return rgi_result_object
|
3610f59fe62c01c211fcbf93658bc0c70eb25b12
| 3,649,513
|
def forwardslash2shift(args=None):
"""
Make forward slash shift when pressed with another key
"""
run_mapper(premade.ForwardSlash2Shift)
return 0
|
cb9cbbe3272fbfd2cdf16fc4d5fce90b378b4b32
| 3,649,514
|
def plotly_figure(figure, id: str):
"""
:param figure: plotly graph object or px figure
:param id: unique id string of format 'id_xxx' with x representin a number
:return: html style string containing a plotly figure
"""
json_figure = figure.to_json()
html = """
<div id="""+id+"""></div>
<script>
var plotly_data = {}
Plotly.react("""+id+""", plotly_data.data, plotly_data.layout);
</script>
"""
local_text = html.format(json_figure)
return local_text
|
949415c70d467c48ee3aa1f028c9e3539099febf
| 3,649,515
|
def _add_resources_to_vault_obj(obj, data, columns):
"""Add associated resources to column and data tuples
"""
i = 0
for s in obj.resources:
if obj.resources[i].id:
name = 'resource_id_' + str(i + 1)
data += (obj.resources[i].id,)
columns = columns + (name,)
name = 'resource_type_' + str(i + 1)
data += (obj.resources[i].type,)
columns = columns + (name,)
i += 1
return data, columns
|
3a6dd7541ac853a7c62b638abf4d0eeb21bb6cb2
| 3,649,516
|
def classify_helmet_belt_worn(x):
"""
This function returns a strinig representation of the int value of the field which specifies whether the
person was wearing a setabelt or a helmet. This specification is from the Road Crash Statistics Victoria , 2013 Edition
document.
:param x: int value representing the classify helmet belt worn field
:return: string representation of the integer value
"""
if x == 1:
return 'Seatbelt Worn'
elif x == 2:
return 'Seatbelt Not Worn'
elif x == 3:
return 'Child Restraint Worn'
elif x == 4:
return 'Child Restraint Not Worn'
elif x == 5:
return 'Seatbelt/restraint Not fitted'
elif x == 6:
return 'Crash Helmet Worn'
elif x == 7:
return 'Crash Helmet Not Worn'
elif x == 8:
return 'Not Appropriate'
else:
return 'Not Known'
|
cba05be8d03c933e767a75400032d07e296e0ec3
| 3,649,517
|
def history_kernels ( estimated_stimulus_kernel, estimated_response_kernel, ci_kernels, ax=None, presentation="left/right", ground_truth=None ):
"""plot history kernels
:Parameters:
*estimated_stimulus_kernel*
stimulus kernel estimated from the data
*estimated_response_kernel*
response kernel estimated from the data
*ci_kernels*
a sequence of confidence regions for the kernels as returned by
statistics.history_kernel_ci()
*ax*
pylab.axes where the plot should go
*presentation*
how should the kernels be presented? Selection of either 'left/right'
or 'correct/incorrect'
:Example:
>>> skernel = [1.2,.5,.3,.1]
>>> rkernel = [.1,.1,0,0]
>>> ci_kernels = [ [[1.3,.6,.4,.2],[.8,.3,.1,-.05]],[[.2,.2,.1,.1],[-.05,0.,-.1,-.1]],[[1.5,.8,.5,.3],[.7,.3,0.,-.2]],[[1.2,.5,.5,.2],[.9,.2,0.,-.05]] ]
>>> history_kernels ( skernel, rkernel, ci_kernels )
>>> pl.savefig ( 'test/history_kernels.png' ); pl.close()
"""
if presentation=="left/right":
kernels = (estimated_stimulus_kernel,estimated_response_kernel)
colors = (stimulus_color,response_color)
labels = ("stimulus","response")
if not ci_kernels is None:
CI = np.array(ci_kernels[:2])
else:
CI = None
if not ground_truth is None:
true_kernels = ground_truth['stimulus_kernel'],\
ground_truth['response_kernel']
elif presentation=="correct/incorrect":
kernels = (estimated_stimulus_kernel+estimated_response_kernel,-estimated_stimulus_kernel+estimated_response_kernel)
colors = (correct_color,incorrect_color)
labels = ("correct","incorrect")
if not ci_kernels is None:
CI = np.array(ci_kernels[2:])
else:
CI = None
if not ground_truth is None:
true_kernels = ground_truth['stimulus_kernel']+\
ground_truth['response_kernel'],\
-ground_truth['stimulus_kernel']+\
ground_truth['response_kernel']
else:
raise ValueError("presentation should be either 'left/right' or 'correct/incorrect'")
if CI is None:
CI = np.array([[kernels[0],kernels[0]],[kernels[1],kernels[1]]])
if ax is None:
ax = pl.gca()
ax = prepare_axes ( ax )
# Plot confidence regions
lags = np.arange ( len(estimated_stimulus_kernel) ) + 1
for i in [0,1]:
fc = 0.5*np.array(colors[i])+0.5*np.ones(3)
ax.fill ( np.concatenate ( (lags,lags[::-1]) ), np.concatenate ( (CI[i,0,:],CI[i,1,::-1]) ),
facecolor=fc, edgecolor=0.5*colors[i], alpha=0.7 )
kernellines = []
for i in [0,1]:
if not ground_truth is None:
ax.plot ( lags, true_kernels[i], color=0.5*colors[i] )
kernellines += ax.plot ( lags, kernels[i], 'o',
markerfacecolor=colors[i], markeredgecolor=0.5*colors[i], label=labels[i] )
ax.set_xlim ( 1-0.01*len(estimated_stimulus_kernel),len(estimated_stimulus_kernel)+0.01*len(estimated_stimulus_kernel) )
ax.set_xticks ( lags )
# label_axes ( title="history kernels", xlabel="lag", ylabel="equivalent stimulus strength", legend='best', ax=ax )
return kernellines
|
c25751759079dbf11b7b63e9ed66b73d3552c040
| 3,649,518
|
def init_application():
"""Main entry point for initializing the Deckhand API service.
Create routes for the v1.0 API and sets up logging.
"""
config_files = _get_config_files()
paste_file = config_files[-1]
CONF([], project='deckhand', default_config_files=config_files)
setup_logging(CONF)
policy.Enforcer(CONF)
LOG.debug('Starting WSGI application using %s configuration file.',
paste_file)
db_api.drop_db()
db_api.setup_db(CONF.database.connection)
app = deploy.loadapp('config:%s' % paste_file, name='deckhand_api')
return app
|
3bc5b9d8f0378c4e19ce61f9ab4ef66a9a6733d6
| 3,649,519
|
def specs_url(self):
"""
The Swagger specifications absolute url (ie. `swagger.json`)
:rtype: str
"""
return url_for(self.endpoint('specs'), _external=False)
|
1620c8eb4d0b8e61c9a67aadca677b2acae5074f
| 3,649,520
|
import sqlite3
def construct_db(db: str) -> sqlite3:
"""Build empty database 'db'."""
conn = sqlite3.connect(db)
c = conn.cursor()
c.executescript('''
CREATE TABLE files (
ID INTEGER PRIMARY KEY,
Name TEXT,
Path TEXT,
FullPath TEXT,
isDir INTEGER,
Size INTEGER,
Mtime INTEGER,
Atime INTEGER,
Ctime INTEGER,
Btime INTEGER,
UID INTEGER,
GID INTEGER,
iNode INTEGER,
DevID INTEGER,
DP INTEGER,
XCount INTEGER,
MIME INTEGER,
Type INTEGER,
Offset INTEGER
);
CREATE TABLE xattrs (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
FileID INTEGER,
Key TEXT,
Value TEXT,
Raw BLOB
);
CREATE TABLE mtypes(
ID INTEGER PRIMARY KEY AUTOINCREMENT,
MIME TEXT
);
CREATE TABLE ftypes (
ID INTEGER PRIMARY KEY,
Type TEXT
);
CREATE VIEW localtime as
select
files.ID,
Name,
Path,
FullPath,
isDir,
Size,
datetime(mtime, 'unixepoch', 'localtime') as Mtime,
datetime(atime, 'unixepoch', 'localtime') as Atime,
datetime(ctime, 'unixepoch', 'localtime') as Ctime,
datetime(btime, 'unixepoch', 'localtime') as Btime,
UID,
GID,
iNode,
DevID as DeviceID,
mtypes.MIME,
ftypes.Type,
Xcount as ExtraAttrs,
'Offset' as ZipOffset,
Key as XattrKey,
Value as XattrValue,
Raw
from files
left join xattrs on files.ID = xattrs.FileID
left join mtypes on files.MIME = mtypes.ID
left join ftypes on files.Type = ftypes.ID;
CREATE VIEW utc as
select
files.ID,
Name,
Path,
FullPath,
isDir,
Size,
datetime(mtime, 'unixepoch') as Mtime,
datetime(atime, 'unixepoch') as Atime,
datetime(ctime, 'unixepoch') as Ctime,
datetime(btime, 'unixepoch') as Btime,
UID,
GID,
iNode,
DevID as DeviceID,
mtypes.MIME,
ftypes.Type,
Xcount as ExtraAttrs,
'Offset' as ZipOffset,
Key as XattrKey,
Value as XattrValue,
Raw
from files
left join xattrs on files.ID = xattrs.FileID
left join mtypes on files.MIME = mtypes.ID
left join ftypes on files.Type = ftypes.ID;
''')
conn.commit()
return conn
|
22759b9b8e68e7c39f8fdd6fc33124c0ecea3a24
| 3,649,521
|
def class_javadoc(ns, stmt):
""" Generate javadoc for class (string without '/**' and '*/' but with * on new line) """
description = ''
desc_stmt = search_one(stmt, 'description')
if desc_stmt is not None:
description += ''.join([str(desc_stmt.arg).replace('\n', '\n * ')])
description += ''.join(['\n * <br/>\n * Namespace: ', ns])
return description
|
9bfc093362bdb573ba8b41ca17b037b57da3891e
| 3,649,522
|
def r_precision(r):
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
r = np.asarray(r) != 0
z = r.nonzero()[0]
if not z.size:
return 0.
return np.mean(r[:z[-1] + 1])
|
998ff6750ce51455fa09ae5970a94934a4c3f383
| 3,649,523
|
import sklearn.metrics
import sklearn.utils.multiclass
from rasa.test import get_evaluation_metrics
from typing import List
from typing import Optional
from typing import Dict
import os
def evaluate_response_selections(
response_selection_results: List[ResponseSelectionEvaluationResult],
output_directory: Optional[Text],
successes: bool,
errors: bool,
disable_plotting: bool,
) -> Dict: # pragma: no cover
"""Creates summary statistics for response selection.
Only considers those examples with a set response.
Others are filtered out. Returns a dictionary of containing the
evaluation result.
Args:
response_selection_results: response selection evaluation results
output_directory: directory to store files to
successes: if True success are written down to disk
errors: if True errors are written down to disk
disable_plotting: if True no plots are created
Returns: dictionary with evaluation results
"""
# remove empty response targets
num_examples = len(response_selection_results)
response_selection_results = remove_empty_response_examples(
response_selection_results
)
logger.info(
f"Response Selection Evaluation: Only considering those "
f"{len(response_selection_results)} examples that have a defined response out "
f"of {num_examples} examples."
)
response_to_intent_target = {}
for result in response_selection_results:
response_to_intent_target[result.response_target] = result.intent_target
target_responses, predicted_responses = _targets_predictions_from(
response_selection_results, "response_target", "response_prediction"
)
confusion_matrix = sklearn.metrics.confusion_matrix(
target_responses, predicted_responses
)
labels = sklearn.utils.multiclass.unique_labels(
target_responses, predicted_responses
)
if output_directory:
report, precision, f1, accuracy = get_evaluation_metrics(
target_responses, predicted_responses, output_dict=True
)
report = _add_confused_labels_to_report(report, confusion_matrix, labels)
report_filename = os.path.join(
output_directory, "response_selection_report.json"
)
io_utils.dump_obj_as_json_to_file(report_filename, report)
logger.info(f"Classification report saved to {report_filename}.")
else:
report, precision, f1, accuracy = get_evaluation_metrics(
target_responses, predicted_responses
)
if isinstance(report, str):
log_evaluation_table(report, precision, f1, accuracy)
if successes:
successes_filename = "response_selection_successes.json"
if output_directory:
successes_filename = os.path.join(output_directory, successes_filename)
# save classified samples to file for debugging
write_response_successes(response_selection_results, successes_filename)
if errors:
errors_filename = "response_selection_errors.json"
if output_directory:
errors_filename = os.path.join(output_directory, errors_filename)
# log and save misclassified samples to file for debugging
write_response_errors(response_selection_results, errors_filename)
if not disable_plotting:
confusion_matrix_filename = "response_selection_confusion_matrix.png"
if output_directory:
confusion_matrix_filename = os.path.join(
output_directory, confusion_matrix_filename
)
_labels = [
response_to_intent_target[label]
if label in response_to_intent_target
else f"'{label[:20]}...' (response not present in test data)"
for label in labels
]
plot_utils.plot_confusion_matrix(
confusion_matrix,
classes=_labels,
title="Response Selection Confusion Matrix",
output_file=confusion_matrix_filename,
)
histogram_filename = "response_selection_histogram.png"
if output_directory:
histogram_filename = os.path.join(output_directory, histogram_filename)
plot_attribute_confidences(
response_selection_results,
histogram_filename,
"response_target",
"response_prediction",
title="Response Selection Prediction Confidence Distribution",
)
predictions = [
{
"text": res.message,
"intent_target": res.intent_target,
"response_target": res.response_target,
"response_predicted": res.response_prediction,
"confidence": res.confidence,
}
for res in response_selection_results
]
return {
"predictions": predictions,
"report": report,
"precision": precision,
"f1_score": f1,
"accuracy": accuracy,
}
|
cf108c5db93785a2ee8ab963cd6dc318c6a871fe
| 3,649,524
|
from typing import Callable
from typing import Coroutine
from typing import Any
def _async_climate_updater(
lookin_protocol: LookInHttpProtocol,
uuid: str,
) -> Callable[[], Coroutine[None, Any, Remote]]:
"""Create a function to capture the cell variable."""
async def _async_update() -> Climate:
return await lookin_protocol.get_conditioner(uuid)
return _async_update
|
b4160385fe7f304096de6bb9196822d3230c342f
| 3,649,525
|
def load_natural_movies(cpd=1.00):
"""load natural movies dataset
Parameters
----------
- cpd: float of cycles per degree, should be 1.00 or 1.33
"""
if cpd not in {1.00, 1.33}:
raise Exception('cpd must be in {1.00, 1.33}')
if cpd == 1.00:
cpd = '1.00'
elif cpd == 1.33:
cpd = '1.33'
else:
raise Exception('cpd must be in {1.00, 1.33}')
# load X
X_path = '/auto/k6/nbilenko/preproc_data/movie/dir{cpd}cpd_{dataset}stim.npy'
Xtrain = np.load(X_path.format(cpd=cpd, dataset='t'))
Xtest = np.load(X_path.format(cpd=cpd, dataset='v'))
# load Y
Y_path = 'auto/k8/anunez/proj/snmovies/datasets/snmovies_braindata_AH3T.hdf'
cci = glabtools.io.get_cc_interface('anunez_raid', verbose=False)
Y_data = cci.cloud2dict(Y_path, verbose=False)
Ytrain = Y_data['Ytrain']
Ytest = Y_data['Yval']
return {
'natural_movies_gabor_pyramid': {
'Xtrain': Xtrain,
'Ytrain': Ytrain,
'Xtest': Xtest,
'Ytest': Ytest,
},
'natural_movies_mean_gabor': {
'Xtrain': Xtrain.mean(1, keepdims=True),
'Ytrain': Ytrain,
'Xtest': Xtest.mean(1, keepdims=True),
'Ytest': Ytest,
},
}
|
bff8dd14cc2afac89aceb9407651f5cb91509a9a
| 3,649,526
|
import collections
def sort_dataset_by_len(dataset):
"""
returns a dict mapping length -> list of items of that length
an OrderedDict is used to that the mapping is sorted from smallest to largest
"""
sorted_dataset = collections.OrderedDict()
lengths = sorted(list(set(len(x[1]) for x in dataset)))
for l in lengths:
sorted_dataset[l] = []
for item in dataset:
sorted_dataset[len(item[1])].append(item)
return sorted_dataset
|
1e67da963c6d968fba39730cc33e100242fcafca
| 3,649,527
|
def rule_VisibleTo_if_in_same_visible_container(x, actor, world) :
"""Anything in the same visible container to the actor is visible
if the visible container is lit. We treat doors specially: if x
is in the get_room_doors of the visible container, then the door
is visible, too."""
actor_vis_cont = world[VisibleContainer(world[Location(actor)])]
if x in world.activity.get_room_doors(actor_vis_cont) :
return True
if actor_vis_cont == x :
# otherwise we'd be looking too many levels high
x_vis_cont = x
else :
loc = world[Location(x)]
if not loc : raise NotHandled()
x_vis_cont = world[VisibleContainer(loc)]
if actor_vis_cont == x_vis_cont and world[ContainsLight(actor_vis_cont)] :
return True
raise NotHandled()
|
8e4465d7684c95a9890e4271b2dbb75b665d2efd
| 3,649,528
|
import copy
import random
def select_random_user_goals(user_goals_no_req_slots, user_goals_with_req_slots, cardinality_no_req, cardinality_req):
"""
Helper method to randomly select user goals
"""
random_user_goals = {}
random_user_goals['all'] = []
# select randomly user goals without request slots
random_user_goals['all'].extend(copy.deepcopy(random.sample(user_goals_no_req_slots, cardinality_no_req)))
# select randomly user goals with request slots
random_user_goals['all'].extend(copy.deepcopy(random.sample(user_goals_with_req_slots, cardinality_req)))
return random_user_goals
|
ff51361d45cdbd62cc9ee9e8263d47870435b326
| 3,649,529
|
import copy
def dict_items_recursive_apply(config_dict, apply_method, **apply_method_parameters):
"""Recursive apply method to dict elements
>>> dict_items_recursive_apply(
... {"foo": {"bar":"baz"}, "qux": ["a","b"]},
... lambda k,v,x: v.upper()+x, **{"x":"!"}
... ) == {'foo': {'bar': 'BAZ!'}, 'qux': ['A!', 'B!']}
True
:param config_dict: input nested dictionnary
:type config_dict: dict
:param apply_method: method to be applied to dict elements
:type apply_method: :func:`apply_method`
:param apply_method_parameters: optional parameters passed to the method
:type apply_method_parameters: dict
:returns: updated dict
:rtype: dict
"""
result_dict = copy.deepcopy(config_dict)
for dict_k, dict_v in result_dict.items():
if isinstance(dict_v, dict):
result_dict[dict_k] = dict_items_recursive_apply(
dict_v, apply_method, **apply_method_parameters
)
elif any(isinstance(dict_v, t) for t in (list, tuple)):
result_dict[dict_k] = list_items_recursive_apply(
dict_v, apply_method, **apply_method_parameters
)
else:
result_dict[dict_k] = apply_method(
dict_k, dict_v, **apply_method_parameters
)
return result_dict
|
760e3de8e414dcd5300aa79cc703b0941a5852fd
| 3,649,530
|
def d_B_nu_d_T_d_nu_dimensionless(x):
"""
Calculates d^2(B_nu) / d (T) / d (nu),
as a function of dimensionless units, x = (h nu / k_B T)
Parameters
----------
x : float
Returns
-------
d_B_nu_d_T_d_nu_dimensionless : float
Not normalized to anything meaningful
"""
return - np.exp(x)*x**3 * (np.exp(x)*(x-4)+x+4) / (np.exp(x)-1)**3
|
eb1e42d48e15cbc5ea17877868cca27422d89346
| 3,649,531
|
def node_to_get_batch_value(shape_node: Node):
"""
The function returns a node that produces the batch value which is usually the element of the shape with index 0
:param shape_node: the node of 1D output shape to get batch from
:return: the node producing batch value
"""
return node_to_get_shape_value_of_range(shape_node, [0])
|
126570b69895cd34bb6821f179076d6d005c36db
| 3,649,532
|
def re2_full_match(input, pattern): # pylint: disable=redefined-builtin
"""Extract regex groups
Args:
input: A `tf.string` tensor
pattern: A pattern string.
"""
return core_ops.io_re2_full_match(input, pattern)
|
d9ca2606eae8faf21bf2cf7ec1730c69f609d4c5
| 3,649,533
|
import os
import socket
import requests
def user_agent():
"""
Return a User-Agent that identifies this client.
Example:
python-requests/2.9.1 edx-rest-api-client/1.7.2 ecommerce
The last item in the list will be the application name, taken from the
OS environment variable EDX_REST_API_CLIENT_NAME. If that environment
variable is not set, it will default to the hostname.
"""
client_name = 'unknown_client_name'
try:
client_name = os.environ.get("EDX_REST_API_CLIENT_NAME") or socket.gethostbyname(socket.gethostname())
except: # pylint: disable=bare-except
pass # using 'unknown_client_name' is good enough. no need to log.
return "{} edx-rest-api-client/{} {}".format(
requests.utils.default_user_agent(), # e.g. "python-requests/2.9.1"
__version__, # version of this client
client_name
)
|
b0e65ba2ddb3af1f0fc5b0ae1a561b330426d9de
| 3,649,534
|
import click
def optional_tools_or_packages_arg(multiple=False):
""" Decorate click method as optionally taking in the path to a tool
or directory of tools or a Conda package. If no such argument is given
the current working directory will be treated as a directory of tools.
"""
name = "paths" if multiple else "path"
nargs = -1 if multiple else 1
return click.argument(
name,
metavar="TARGET",
nargs=nargs,
)
|
4a34da51b4a644df70c5ce3ea8afb8b86ae2281d
| 3,649,535
|
import numpy
def linear_interpolate_cdf(base_cdf):
"""Linear interpolate regions of straight lines in the CDF.
Parameters:
base_cdf (list): n elements of non-decreasing order.
Returns:
list of length base_cdf where consecutive elements of straight lines
are linearly interpolated between the left and right sides.
"""
target_cdf = list(base_cdf)
index = 0
left_val = 0
while index < len(base_cdf)-1:
if base_cdf[index] == base_cdf[index+1]:
# search for where it ends
offset = index+1
while (offset < len(base_cdf)-1 and
base_cdf[offset] == base_cdf[offset+1]):
offset += 1
# linearly interpolate between index and offset
right_val = base_cdf[offset]
interp_val = numpy.interp(
list(range(index, offset+1, 1)),
[index-1, offset],
[float(left_val), float(right_val)])
target_cdf[index:offset+1] = interp_val
left_val = right_val
index = offset+1
else:
left_val = base_cdf[index]
index += 1
return target_cdf
|
8f119d1698a44e90253920decf1b3253db9171be
| 3,649,536
|
def hash_str(string: str) -> int:
"""
Create the hash for a string (poorly).
"""
hashed = 0
results = map(ord, string)
for result in results:
hashed += result
return hashed
|
b80c177974437966361e4117ba235c1563fee5c4
| 3,649,537
|
import plotly.graph_objects as go
import plotly.io as pio
def graph(g: nx.Graph, s: Optional[list] = None, plot_size: Tuple = (500, 500)): # pragma: no cover
"""Creates a plot of the input graph.
This function can plot the input graph only, or the graph with a specified subgraph highlighted.
Graphs are plotted using the Kamada-Kawai layout with an aspect ratio of 1:1.
**Example usage:**
>>> graph = nx.complete_graph(10)
>>> fig = plot.graph(graph, [0, 1, 2, 3])
>>> fig.show()
.. image:: ../../_static/complete_graph.png
:width: 40%
:align: center
:target: javascript:void(0);
Args:
g (nx.Graph): input graph
s (list): optional list of nodes comprising the subgraph to highlight
plot_size (int): size of the plot in pixels, given as a pair of integers ``(x_size,
y_size)``
Returns:
Figure: figure for graph and optionally highlighted subgraph
"""
try:
except ImportError:
raise ImportError(plotly_error)
try:
in_notebook = get_ipython().__class__.__name__ == "ZMQInteractiveShell"
except NameError:
in_notebook = False
if not in_notebook:
pio.renderers.default = "browser"
l = nx.kamada_kawai_layout(g)
g_nodes = go.Scatter(
**_node_coords(g, l),
mode="markers",
hoverinfo="text",
marker=dict(color=graph_node_colour, size=graph_node_size, line_width=2),
)
g_edges = go.Scatter(
**_edge_coords(g, l),
line=dict(width=1, color=graph_edge_colour),
hoverinfo="none",
mode="lines",
)
g_nodes.text = [str(i) for i in g.nodes()]
layout = go.Layout(
showlegend=False,
hovermode="closest",
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
margin=dict(b=0, l=0, r=0, t=25),
height=plot_size[1],
width=plot_size[0],
plot_bgcolor="#ffffff",
)
if s is not None:
s = g.subgraph(s)
s_edges = go.Scatter(
**_edge_coords(s, l),
line=dict(width=2, color=subgraph_edge_colour),
hoverinfo="none",
mode="lines",
)
s_nodes = go.Scatter(
**_node_coords(s, l),
mode="markers",
hoverinfo="text",
marker=dict(color=subgraph_node_colour, size=subgraph_node_size, line_width=2),
)
s_nodes.text = [str(i) for i in s.nodes()]
f = go.Figure(data=[g_edges, s_edges, g_nodes, s_nodes], layout=layout)
else:
f = go.Figure(data=[g_edges, g_nodes], layout=layout)
return f
|
9830ef44f3a85234002c11d0da1913a89c332491
| 3,649,538
|
def intersect(p1x, p1y, p2x, p2y, x0, y0):
"""Intersect segment defined by p1 and p2 with ray coming out of x0,y0 ray
can be horizontal y=y0 x=x0+dx , want dx>0.
Args:
p1x (float): x coordinate of point 1 of segment
p1y (float): y coordinate of point 1 of segment
p2x (float): x coordinate of point 2 of segment
p2y (float): y coordinate of point 2 of segment
x0 (float): x coordinate anchoring the intersection ray
y0 (float): y coordinate anchoring the intersection ray
Returns:
boolean int: (1) if intersecting, (0) if not intersecting
"""
if p1x != p2x and p1y != p2y:
m = (p2y - p1y) / (p2x - p1x)
x_inter = (y0 - p1y) / m + p1x
if x_inter >= x0 and np.min([p1y, p2y]) <= y0 <= np.max([p1y, p2y]):
ans = 1
else:
ans = 0
else:
if p1x == p2x: # vertical segment
if x0 <= p1x and np.min([p1y, p2y]) <= y0 <= np.max([p1y, p2y]):
ans = 1
else:
ans = 0
if p1y == p2y: # horizontal segment
if y0 == p1y:
ans = 1
else:
ans = 0
return ans
|
b58ae51cf179183689a7ed4b0854eefaeb28b895
| 3,649,539
|
from scipy import linalg
def impulse_matrix(params, dt, reduced=False):
"""Calculate the matrix exponential for integration of MAT model"""
a1, a2, b, w, R, tm, t1, t2, tv, tref = params
if not reduced:
A = - np.matrix([[1 / tm, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1 / t1, 0, 0, 0],
[0, 0, 0, 1 / t2, 0, 0],
[0, 0, 0, 0, 1 / tv, -1],
[b / tm, -b, 0, 0, 0, 1 / tv]])
else:
A = - np.matrix([[1 / tm, -1, 0, 0],
[0, 0, 0, 0],
[0, 0, 1 / tv, -1],
[b / tm, -b, 0, 1 / tv]])
return linalg.expm(A * dt)
|
4305d588680dd5de91765e79b170d26e43f82a01
| 3,649,540
|
def get_vimg(request):
"""
获取验证码
:param request:
:return:
"""
text, image = vcode.gen_captcha_text_and_image()
v_key = request.GET.get('vk')
ex_key = request.GET.get('ex')
if ex_key:
try:
redis_conn.delete(ex_key)
except Exception as e:
logger.error(e)
redis_conn.set(v_key, text, 60*3)
return HttpResponse(image.getvalue(), content_type='image/jpg')
|
f3398236cb4d69f21a04519de472d85dca885a2c
| 3,649,541
|
def generate_fcm_token():
"""Generate an FCM token
nLAUJTr5RIJ:MNmSQ8O52FoJSvfWEPF4KvWopcNScNFRPHHbXdepwzuXJJMfadpEfb2JlHoqEhWanFz7-N0sfPg-pW4gNubNdxyikiI0lrvGeWGTp86fn9-NA3sZ-Eizv9QE7YKHCOIa70fR38N1ZYsb
"""
return '{}:{}-{}-{}-{}-{}'.format(random_all(11),
random_all(68),
random_all(6),
random_all(30),
random_all(5),
random_all(27))
|
e535584bf630e1353a8f7458ff45cf2f0c1433fb
| 3,649,542
|
from typing import Optional
import grp
def _create_group(username: str, gid: Optional[int] = None, system: bool = False) -> Result[Group]:
"""
Create a new group.
"""
try:
get_group(username)
except KeyError:
pass
else:
raise ValueError("Username {!r} is already in use".format(username))
args = ["/usr/sbin/addgroup", username]
if gid:
try:
group = grp.getgrgid(gid)
except KeyError:
args[-1:-1] = ["--gid", str(gid)]
else:
raise ValueError("GID {} is already in use by {!r}".format(gid, group.gr_name))
if system:
args[-1:-1] = ["--system"]
command(args)
group = get_group(username)
LOG.debug("Created UNIX group: %r", group)
return Result(State.created, group)
|
117e25e91c9f3e37bc58a005d3e036dc8cc9f4d3
| 3,649,543
|
def evaluate(data_loader):
"""Evaluate given the data loader
Parameters
----------
data_loader : DataLoader
Returns
-------
avg_loss : float
Average loss
real_translation_out : list of list of str
The translation output
"""
translation_out = []
all_inst_ids = []
avg_loss_denom = 0
avg_loss = 0.0
for _, (src_seq, tgt_seq, src_valid_length, tgt_valid_length, inst_ids) \
in enumerate(data_loader):
src_seq = src_seq.as_in_context(ctx)
tgt_seq = tgt_seq.as_in_context(ctx)
src_valid_length = src_valid_length.as_in_context(ctx)
tgt_valid_length = tgt_valid_length.as_in_context(ctx)
# Calculating Loss
out, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1)
loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean().asscalar()
all_inst_ids.extend(inst_ids.asnumpy().astype(np.int32).tolist())
avg_loss += loss * (tgt_seq.shape[1] - 1)
avg_loss_denom += (tgt_seq.shape[1] - 1)
# Translate
samples, _, sample_valid_length =\
translator.translate(src_seq=src_seq, src_valid_length=src_valid_length)
max_score_sample = samples[:, 0, :].asnumpy()
sample_valid_length = sample_valid_length[:, 0].asnumpy()
for i in range(max_score_sample.shape[0]):
translation_out.append(
[tgt_vocab.idx_to_token[ele] for ele in
max_score_sample[i][1:(sample_valid_length[i] - 1)]])
avg_loss = avg_loss / avg_loss_denom
real_translation_out = [None for _ in range(len(all_inst_ids))]
for ind, sentence in zip(all_inst_ids, translation_out):
real_translation_out[ind] = sentence
return avg_loss, real_translation_out
|
f7697e9f22e5bda3af6b0892b3cc5c3b047771f0
| 3,649,544
|
def adding_equation(thetas, eta0, eta1, eta2, eta3, kappa3 = 0.0, polarized=False, tau1=0.0, tau2=0.0):
""" Return the reflectance of a 4 layers material (3 interfaces)
with all inter-reflections, using adding equation """
zeros = [np.zeros_like(thetas),np.zeros_like(thetas)] if polarized else np.zeros_like(thetas)
R01 = fresnel(np.cos(thetas), eta1/eta0, polarized=polarized) if eta1 != eta0 else zeros
ones = np.ones_like(R01)
T01 = ones - R01
thetas_t1 = clamp(np.arcsin(eta0 / eta1 * np.sin(thetas)))
thetas_t1 = np.where(thetas_t1 is not np.nan, thetas_t1, 0.0);
R10 = fresnel(np.cos(thetas_t1), eta0/eta1, polarized=polarized) if eta1 != eta0 else zeros
R12 = fresnel(np.cos(thetas_t1), eta2/eta1, polarized=polarized) if eta1 != eta2 else zeros
T12 = ones - R12
thetas_t2 = clamp(np.arcsin(eta1/eta2 * np.sin(thetas_t1)))
thetas_t2 = np.where(thetas_t2 is not np.nan, thetas_t2, 0.0);
R21 = fresnel(np.cos(thetas_t2), eta1/eta2, polarized=polarized) if eta1 != eta2 else zeros
k = 0.0 if kappa3 == 0.0 else kappa3/eta2
R23 = fresnel(np.cos(thetas_t2), eta3/eta2, k, polarized=polarized)
if polarized:
res = []
for i in range(2):
R13 = add_with_absorption(R12[i], R23[i], tau2, thetas_t2)
R03 = add_with_absorption(R01[i], R13, tau1, thetas_t1)
#R13 = add(R12[i], T12[i], R21[i], R23[i])
#R03 = add(R01[i], T01[i], R10[i], R13)
res.append(np.where(np.isfinite(R03), R03, ones[0]))
return res
#R13 = add(R12, T12, R21, R23)
#R03 = add(R01, T01, R10, R13)
R13 = add_with_absorption(R12, R23, tau2, thetas_t2)
R03 = add_with_absorption(R01, R13, tau1, thetas_t1)
return np.where(np.isfinite(R03), R03, 1.0)
|
1e1f7e56096d712f04354cca52987b2010fd322f
| 3,649,545
|
import logging
def text_expand(context):
"""
Give context, pick out the bible indexes, turn them into normalized scripture, and put the scripture back into the context
"""
output = []
end = 0
for m in candidate_filter(context):
output.append(m.group('out'))
try:
bucket = get_bucket(m)
formated = format_bucket(bucket)
output.extend(['《',':'.join(list(formated)), '》'])
except KeyError:
output.append(m.group(0))
except AttributeError:
output.append(m.group(0))
except:
logging.warning(print(context))
end = m.end()
output.append(context[end:])
return ''.join(output)
|
146cd85a1007215cc8bed53341418a7b3c23b532
| 3,649,546
|
from typing import Iterable
from datetime import datetime
def json_custom_parser(obj):
"""
A custom json parser to handle json.dumps calls properly for Decimal and
Datetime data types.
"""
if not isinstance(obj, string_types) and isinstance(obj, Iterable):
return list(obj)
elif isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date):
dot_ix = 19 # 'YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM'.find('.')
return obj.isoformat()[:dot_ix]
else:
raise TypeError(obj)
|
dbee1501376d2b1fc235b5351236e857fc9c5750
| 3,649,547
|
def like():
""" Function to automatically like a picture
:return: 0 or 1 where 1 = one picture liked
:rtype: int
"""
like_icons = driver.find_elements_by_xpath("//*[contains(@aria-label, 'Like')]")
unlike_icons = driver.find_elements_by_xpath("//*[contains(@aria-label, 'Unlike')]")
for icon in unlike_icons or like_icons:
height = icon.get_attribute("height")
fill_color = icon.get_attribute("fill")
# Ensuring it is the correct icon and that it has not been liked before
if height == "24" and fill_color == "#ed4956":
# Fill color of a post already liked is #ed4956
print("Picture already liked.")
return 0
elif height == "24" and fill_color == "#262626":
# Fill color of post NOT liked is #262626
# ('..') is used here to fetch the parent of icon using xpath
like_button = icon.find_element_by_xpath('..')
like_button.click()
print("Picture liked :)")
sleep(2)
return 1
else: # pragma: no cover
pass
|
5db4a43c4b29a3cb49d62bddf755a9e374f0ac4e
| 3,649,548
|
import os
def entity_tsv(args):
""" Get list of entities in TSV format. Download files for which the
encoding is undetected (e.g. ZIP archives). """
r = fapi.get_entities_tsv(args.project, args.workspace,
args.entity_type, args.attrs, args.model)
fapi._check_response_code(r, 200)
if r.apparent_encoding is not None:
return r.content.decode(r.apparent_encoding)
else:
content = r.headers['Content-Disposition'].split('; ')[-1].split('=')
if len(content) == 2 and content[0] == 'filename':
filename = content[1]
if os.path.exists(filename) and (args.yes or not _confirm_prompt(
'This will overwrite {}'.format(filename))):
return
with open(filename, 'wb') as outfile:
for chunk in r:
outfile.write(chunk)
print('Downloaded {}.'.format(filename))
return
else:
eprint("Unable to determine name of file to download.")
return 1
|
14f966144e21d91801f9e4b5966e0499699da83e
| 3,649,549
|
def compare_files(file_name1, file_name2):
"""
Compare two files, line by line, for equality.
Arguments:
file_name1 (str or unicode): file name.
file_name2 (str or unicode): file name.
Returns:
bool: True if files are equal, False otherwise.
"""
with open(file_name1) as file1, open(file_name2) as file2:
for line1, line2 in zip(file1, file2):
if line1 != line2:
file1.close()
file2.close()
return False
file1.close()
file2.close()
return True
|
3f77cf177ba60ddd121b95648379fff845d9877b
| 3,649,550
|
def like(request, pk):
"""Add a user to those who liked the post.
Only authenticated users are able to like a post.
"""
if request.method == 'POST':
# query the post in question
try:
post = Post.objects.get(pk=pk)
except Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
# add a user to the list of those who liked this post
# won't duplicate the relationship
post.users_who_liked.add(request.user)
return Response({'message': f'Liked the post {pk}.'})
|
d3ef1d9728592872a73b900c60e4021078d2ef2e
| 3,649,551
|
import os
import sys
import subprocess
def calc_power_VARIANT(input_data, working_dir, t_pt=0):
"""Calculate the power distributions from VARIANT
Parameters
----------
data : dict
DASSH input data dictionary
working_dir : str
Path to current working directory
Returns
-------
dict
DASSH Power objects for each type of assembly in the problem;
different objects are required because different assemblies
can have different unrodded region specifications
"""
cwd = os.getcwd()
if working_dir != '':
os.chdir(working_dir)
# Identify VARPOW keys for fuel and coolant
fuel_type = input_data['Power']['ARC']['fuel_material'].lower()
fuel_id = _FUELS[fuel_type]
if type(fuel_id) == dict:
alloy_type = input_data['Power']['ARC']['fuel_alloy'].lower()
fuel_id = fuel_id[alloy_type]
coolant_heating = input_data['Power']['ARC']['coolant_heating']
if coolant_heating is None:
coolant_heating = input_data['Core']['coolant_material']
if coolant_heating.lower() not in _COOLANTS.keys():
module_logger.error('Unknown coolant specification for '
'heating calculation; must choose '
'from options: Na, NaK, Pb, Pb-Bi')
else:
cool_id = _COOLANTS[coolant_heating.lower()]
# Run VARPOW, rename output files
path2varpow = os.path.dirname(os.path.abspath(__file__))
if sys.platform == 'darwin':
path2varpow = os.path.join(path2varpow, 'varpow_osx.x')
elif 'linux' in sys.platform:
path2varpow = os.path.join(path2varpow, 'varpow_linux.x')
else:
raise SystemError('DASSH currently supports only Linux and OSX')
with open('varpow_stdout.txt', 'w') as f:
subprocess.call([path2varpow,
str(fuel_id),
str(cool_id),
input_data['Power']['ARC']['pmatrx'][t_pt],
input_data['Power']['ARC']['geodst'][t_pt],
input_data['Power']['ARC']['ndxsrf'][t_pt],
input_data['Power']['ARC']['znatdn'][t_pt],
input_data['Power']['ARC']['nhflux'][t_pt],
input_data['Power']['ARC']['ghflux'][t_pt]],
stdout=f)
subprocess.call(['mv', 'MaterialPower.out',
'varpow_MatPower.out'])
subprocess.call(['mv', 'VariantMonoExponents.out',
'varpow_MonoExp.out'])
subprocess.call(['mv', 'Output.VARPOW', 'VARPOW.out'])
os.chdir(cwd)
return import_power_VARIANT(input_data, working_dir, t_pt)
|
acd58904ae1f67ef1f27de5e5399d21175c0ef9e
| 3,649,552
|
def from_float32(buffer):
"""Interprets an arbitrary string or NumPy array as Vax single-precision
floating-point binary values, and returns the equivalent array in IEEE
values."""
# Convert the buffer to 2-byte elements
if isinstance(buffer, (str, np.str_, bytes, bytearray)):
pairs = np.fromstring(buffer, dtype='uint16')
pairs = pairs.reshape(pairs.size//2, 2)
newshape = (pairs.size//2,)
else:
buffer = np.asarray(buffer)
pairs = buffer.view('uint16')
assert pairs.shape[-1] % 2 == 0, \
'buffer shape is incompatible with 4-byte elements'
if buffer.itemsize == 1:
newshape = buffer.shape[:-1] + (buffer.shape//4,)
elif buffer.itemsize == 2:
newshape = buffer.shape[:-1] + (buffer.shape//2,)
elif buffer.itemsize == 4:
newshape = buffer.shape[:-1] + (1,)
else:
newshape = buffer.shape + (buffer.itemsize//4,)
if newshape[-1] == 1: newshape = newshape[:-1]
# Perform a pairwise swap of the two-byte elements
swapped = np.empty(pairs.shape, dtype='uint16')
swapped[...,:] = pairs[...,::-1]
# The results are in LSB IEEE format aside from a scale factor of four
ieee = swapped.view('<f4') / 4.
return ieee.reshape(newshape)
|
2ab310b2d5cc6fcd7f9f094d97de319a1643dc7e
| 3,649,553
|
def get_file_stats(file_name, entity_type='file', lineno=None, cursorpos=None,
plugin=None, language=None, local_file=None):
"""Returns a hash of information about the entity."""
language = standardize_language(language, plugin)
stats = {
'language': language,
'dependencies': [],
'lines': None,
'lineno': lineno,
'cursorpos': cursorpos,
}
if entity_type == 'file':
lexer = get_lexer(language)
if not language:
language, lexer = guess_language(file_name, local_file)
parser = DependencyParser(local_file or file_name, lexer)
stats.update({
'language': use_root_language(language, lexer),
'dependencies': parser.parse(),
'lines': number_lines_in_file(local_file or file_name),
})
return stats
|
b1ccf3d0eb2af676fce690e5f81182d89d50596b
| 3,649,554
|
def setup(app: sphinx.application.Sphinx) -> dict[str, object]:
"""Called by Sphinx to set up the extension."""
app.add_config_value("gaphor_models", {}, "env", [dict])
app.add_directive("diagram", DiagramDirective)
app.connect("config-inited", config_inited)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
992d7436d31cd18b7cd50b02b013d9c56179eacb
| 3,649,555
|
from nipype.interfaces.afni import utils as afni_utils
def create_vmhc(use_ants, flirt_only=False, name='vmhc_workflow', ants_threads=1):
"""
Compute the map of brain functional homotopy, the high degree of synchrony in spontaneous activity between geometrically corresponding interhemispheric (i.e., homotopic) regions.
Parameters
----------
None
Returns
-------
vmhc_workflow : workflow
Voxel Mirrored Homotopic Connectivity Analysis Workflow
Notes
-----
`Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/vmhc/vmhc.py>`_
Workflow Inputs::
inputspec.brain : string (existing nifti file)
Anatomical image(without skull)
inputspec.symmetric_brain : string (existing nifti file)
MNI152_T1_2mm_symmetric_brain.nii.gz
inputspec.rest_res_filt : string (existing nifti file)
Band passed Image with nuisance signal regressed out(and optionally scrubbed). Recommended bandpass filter (0.001,0.1) )
inputspec.reorient : string (existing nifti file)
RPI oriented anatomical data
inputspec.example_func2highres_mat : string (existing affine transformation .mat file)
Specifies an affine transform that should be applied to the example_func before non linear warping
inputspec.standard_for_func: string (existing nifti file)
MNI152_T1_standard_resolution_brain.nii.gz
inputspec.symmetric_skull : string (existing nifti file)
MNI152_T1_2mm_symmetric.nii.gz
inputspec.twomm_brain_mask_dil : string (existing nifti file)
MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz
inputspec.config_file_twomm_symmetric : string (existing .cnf file)
T1_2_MNI152_2mm_symmetric.cnf
inputspec.rest_mask : string (existing nifti file)
A mask functional volume(derived by dilation from motion corrected functional volume)
fwhm_input.fwhm : list (float)
For spatial smoothing the Z-transformed correlations in MNI space.
Generally the value of this parameter is 1.5 or 2 times the voxel size of the input Image.
inputspec.mean_functional : string (existing nifti file)
The mean functional image for use in the func-to-anat registration matrix conversion
to ITK (ANTS) format, if the user selects to use ANTS.
Workflow Outputs::
outputspec.highres2symmstandard : string (nifti file)
Linear registration of T1 image to symmetric standard image
outputspec.highres2symmstandard_mat : string (affine transformation .mat file)
An affine transformation .mat file from linear registration and used in non linear registration
outputspec.highres2symmstandard_warp : string (nifti file)
warp file from Non Linear registration of T1 to symmetrical standard brain
outputspec.fnirt_highres2symmstandard : string (nifti file)
Non Linear registration of T1 to symmetrical standard brain
outputspec.highres2symmstandard_jac : string (nifti file)
jacobian determinant image from Non Linear registration of T1 to symmetrical standard brain
outputspec.rest_res_2symmstandard : string (nifti file)
nonlinear registration (func to standard) image
outputspec.VMHC_FWHM_img : string (nifti file)
pearson correlation between res2standard and flipped res2standard
outputspec.VMHC_Z_FWHM_img : string (nifti file)
Fisher Z transform map
outputspec.VMHC_Z_stat_FWHM_img : string (nifti file)
Z statistic map
Order of commands:
- Perform linear registration of Anatomical brain in T1 space to symmetric standard space. For details see `flirt <http://www.fmrib.ox.ac.uk/fsl/flirt/index.html>`_::
flirt
-ref MNI152_T1_2mm_symmetric_brain.nii.gz
-in mprage_brain.nii.gz
-out highres2symmstandard.nii.gz
-omat highres2symmstandard.mat
-cost corratio
-searchcost corratio
-dof 12
-interp trilinear
- Perform nonlinear registration (higres to standard) to symmetric standard brain. For details see `fnirt <http://fsl.fmrib.ox.ac.uk/fsl/fnirt/>`_::
fnirt
--in=head.nii.gz
--aff=highres2symmstandard.mat
--cout=highres2symmstandard_warp.nii.gz
--iout=fnirt_highres2symmstandard.nii.gz
--jout=highres2symmstandard_jac.nii.gz
--config=T1_2_MNI152_2mm_symmetric.cnf
--ref=MNI152_T1_2mm_symmetric.nii.gz
--refmask=MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz
--warpres=10,10,10
- Perform spatial smoothing on the input functional image(inputspec.rest_res_filt). For details see `PrinciplesSmoothing <http://imaging.mrc-cbu.cam.ac.uk/imaging/PrinciplesSmoothing>`_ `fslmaths <http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/intro/index.htm>`_::
fslmaths rest_res_filt.nii.gz
-kernel gauss FWHM/ sqrt(8-ln(2))
-fmean -mas rest_mask.nii.gz
rest_res_filt_FWHM.nii.gz
- Apply nonlinear registration (func to standard). For details see `applywarp <http://www.fmrib.ox.ac.uk/fsl/fnirt/warp_utils.html#applywarp>`_::
applywarp
--ref=MNI152_T1_2mm_symmetric.nii.gz
--in=rest_res_filt_FWHM.nii.gz
--out=rest_res_2symmstandard.nii.gz
--warp=highres2symmstandard_warp.nii.gz
--premat=example_func2highres.mat
- Copy and L/R swap the output of applywarp command (rest_res_2symmstandard.nii.gz). For details see `fslswapdim <http://fsl.fmrib.ox.ac.uk/fsl/fsl4.0/avwutils/index.html>`_::
fslswapdim
rest_res_2symmstandard.nii.gz
-x y z
tmp_LRflipped.nii.gz
- Calculate pearson correlation between rest_res_2symmstandard.nii.gz and flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz). For details see `3dTcorrelate <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorrelate.html>`_::
3dTcorrelate
-pearson
-polort -1
-prefix VMHC_FWHM.nii.gz
rest_res_2symmstandard.nii.gz
tmp_LRflipped.nii.gz
- Fisher Z Transform the correlation. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_::
3dcalc
-a VMHC_FWHM.nii.gz
-expr 'log((a+1)/(1-a))/2'
-prefix VMHC_FWHM_Z.nii.gz
- Calculate the number of volumes(nvols) in flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz) ::
-Use Nibabel to do this
- Compute the Z statistic map ::
3dcalc
-a VMHC_FWHM_Z.nii.gz
-expr 'a*sqrt('${nvols}'-3)'
-prefix VMHC_FWHM_Z_stat.nii.gz
Workflow:
.. image:: ../images/vmhc_graph.dot.png
:width: 500
Workflow Detailed:
.. image:: ../images/vmhc_detailed_graph.dot.png
:width: 500
References
----------
.. [1] Zuo, X.-N., Kelly, C., Di Martino, A., Mennes, M., Margulies, D. S., Bangaru, S., Grzadzinski, R., et al. (2010). Growing together and growing apart: regional and sex differences in the lifespan developmental trajectories of functional homotopy. The Journal of neuroscience : the official journal of the Society for Neuroscience, 30(45), 15034-43. doi:10.1523/JNEUROSCI.2612-10.2010
Examples
--------
>>> vmhc_w = create_vmhc()
>>> vmhc_w.inputs.inputspec.symmetric_brain = 'MNI152_T1_2mm_symmetric_brain.nii.gz'
>>> vmhc_w.inputs.inputspec.symmetric_skull = 'MNI152_T1_2mm_symmetric.nii.gz'
>>> vmhc_w.inputs.inputspec.twomm_brain_mask_dil = 'MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz'
>>> vmhc_w.inputs.inputspec.config_file_twomm = 'T1_2_MNI152_2mm_symmetric.cnf'
>>> vmhc_w.inputs.inputspec.standard_for_func= 'MNI152_T1_2mm.nii.gz'
>>> vmhc_w.inputs.fwhm_input.fwhm = [4.5, 6]
>>> vmhc_w.get_node('fwhm_input').iterables = ('fwhm', [4.5, 6])
>>> vmhc_w.inputs.inputspec.rest_res = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_res_filt.nii.gz')
>>> vmhc_w.inputs.inputspec.reorient = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_RPI.nii.gz')
>>> vmhc_w.inputs.inputspec.brain = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_brain.nii.gz')
>>> vmhc_w.inputs.inputspec.example_func2highres_mat = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/reg/example_func2highres.mat')
>>> vmhc_w.inputs.inputspec.rest_mask = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_mask.nii.gz')
>>> vmhc_w.run() # doctest: +SKIP
"""
vmhc = pe.Workflow(name=name)
inputNode = pe.Node(util.IdentityInterface(fields=['rest_res',
'example_func2highres_mat',
'rest_mask',
'standard_for_func',
'mean_functional',
'brain',
'flirt_linear_aff',
'fnirt_nonlinear_warp',
'ants_symm_initial_xfm',
'ants_symm_rigid_xfm',
'ants_symm_affine_xfm',
'ants_symm_warp_field']),
name='inputspec')
outputNode = pe.Node(util.IdentityInterface(fields=['rest_res_2symmstandard',
'VMHC_FWHM_img',
'VMHC_Z_FWHM_img',
'VMHC_Z_stat_FWHM_img']),
name='outputspec')
inputnode_fwhm = pe.Node(util.IdentityInterface(fields=['fwhm']),
name='fwhm_input')
if use_ants == False:
# Apply nonlinear registration (func to standard)
func_to_standard = pe.Node(interface=fsl.ApplyWarp(),
name='func_to_standard')
elif use_ants == True:
# ANTS warp image etc.
fsl_to_itk_vmhc = create_wf_c3d_fsl_to_itk(0, name='fsl_to_itk_vmhc')
collect_transforms_vmhc = create_wf_collect_transforms(0, name='collect_transforms_vmhc')
apply_ants_xfm_vmhc = create_wf_apply_ants_warp(map_node=False, name='apply_ants_xfm_vmhc',
ants_threads=ants_threads)
# this has to be 3 instead of default 0 because it is a 4D file
apply_ants_xfm_vmhc.inputs.inputspec.input_image_type = 3
# copy and L/R swap file
copy_and_L_R_swap = pe.Node(interface=fsl.SwapDimensions(),
name='copy_and_L_R_swap')
copy_and_L_R_swap.inputs.new_dims = ('-x', 'y', 'z')
# calculate vmhc
pearson_correlation = pe.Node(interface=preprocess.TCorrelate(),
name='pearson_correlation')
pearson_correlation.inputs.pearson = True
pearson_correlation.inputs.polort = -1
pearson_correlation.inputs.outputtype = 'NIFTI_GZ'
try:
z_trans = pe.Node(interface=preprocess.Calc(), name='z_trans')
z_stat = pe.Node(interface=preprocess.Calc(), name='z_stat')
except AttributeError:
z_trans = pe.Node(interface=afni_utils.Calc(), name='z_trans')
z_stat = pe.Node(interface=afni_utils.Calc(), name='z_stat')
z_trans.inputs.expr = 'log((1+a)/(1-a))/2'
z_trans.inputs.outputtype = 'NIFTI_GZ'
z_stat.inputs.outputtype = 'NIFTI_GZ'
NVOLS = pe.Node(util.Function(input_names=['in_files'],
output_names=['nvols'],
function=get_img_nvols),
name='NVOLS')
generateEXP = pe.Node(util.Function(input_names=['nvols'],
output_names=['expr'],
function=get_operand_expression),
name='generateEXP')
smooth = pe.Node(interface=fsl.MultiImageMaths(),
name='smooth')
if use_ants == False:
vmhc.connect(inputNode, 'rest_res',
smooth, 'in_file')
vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss),
smooth, 'op_string')
vmhc.connect(inputNode, 'rest_mask',
smooth, 'operand_files')
vmhc.connect(inputNode, 'standard_for_func',
func_to_standard, 'ref_file')
if not flirt_only:
vmhc.connect(inputNode, 'fnirt_nonlinear_warp',
func_to_standard, 'field_file')
vmhc.connect(smooth, 'out_file',
func_to_standard, 'in_file')
vmhc.connect(inputNode, 'example_func2highres_mat',
func_to_standard, 'premat')
else:
func_to_anat = pe.Node(interface=fsl.ApplyWarp(),
name='func_to_anat')
vmhc.connect(smooth, 'out_file', func_to_anat, 'in_file')
vmhc.connect(inputNode, 'brain', func_to_anat, 'ref_file')
vmhc.connect(inputNode, 'example_func2highres_mat',
func_to_anat, 'premat')
vmhc.connect(func_to_anat, 'out_file', func_to_standard, 'in_file')
vmhc.connect(inputNode, 'flirt_linear_aff',
func_to_standard, 'premat')
vmhc.connect(func_to_standard, 'out_file',
copy_and_L_R_swap, 'in_file')
vmhc.connect(func_to_standard, 'out_file',
pearson_correlation, 'xset')
elif use_ants == True:
# connections for ANTS stuff
# functional apply warp stuff
vmhc.connect(inputNode, 'rest_res',
smooth, 'in_file')
vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss),
smooth, 'op_string')
vmhc.connect(inputNode, 'rest_mask',
smooth, 'operand_files')
vmhc.connect(smooth, 'out_file',
apply_ants_xfm_vmhc, 'inputspec.input_image')
vmhc.connect(inputNode, 'ants_symm_initial_xfm',
collect_transforms_vmhc, 'inputspec.linear_initial')
vmhc.connect(inputNode, 'ants_symm_rigid_xfm',
collect_transforms_vmhc, 'inputspec.linear_rigid')
vmhc.connect(inputNode, 'ants_symm_affine_xfm',
collect_transforms_vmhc, 'inputspec.linear_affine')
vmhc.connect(inputNode, 'ants_symm_warp_field',
collect_transforms_vmhc, 'inputspec.warp_file')
# func->anat matrix (bbreg)
vmhc.connect(inputNode, 'example_func2highres_mat',
fsl_to_itk_vmhc, 'inputspec.affine_file')
vmhc.connect(inputNode, 'brain', fsl_to_itk_vmhc,
'inputspec.reference_file')
vmhc.connect(inputNode, 'mean_functional', fsl_to_itk_vmhc,
'inputspec.source_file')
vmhc.connect(fsl_to_itk_vmhc, 'outputspec.itk_transform',
collect_transforms_vmhc, 'inputspec.fsl_to_itk_affine')
vmhc.connect(inputNode, 'standard_for_func',
apply_ants_xfm_vmhc, 'inputspec.reference_image')
vmhc.connect(collect_transforms_vmhc,
'outputspec.transformation_series',
apply_ants_xfm_vmhc, 'inputspec.transforms')
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
copy_and_L_R_swap, 'in_file')
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
pearson_correlation, 'xset')
vmhc.connect(copy_and_L_R_swap, 'out_file',
pearson_correlation, 'yset')
vmhc.connect(pearson_correlation, 'out_file',
z_trans, 'in_file_a')
vmhc.connect(copy_and_L_R_swap, 'out_file',
NVOLS, 'in_files')
vmhc.connect(NVOLS, 'nvols',
generateEXP, 'nvols')
vmhc.connect(z_trans, 'out_file',
z_stat, 'in_file_a')
vmhc.connect(generateEXP, 'expr',
z_stat, 'expr')
if use_ants == False:
vmhc.connect(func_to_standard, 'out_file',
outputNode, 'rest_res_2symmstandard')
elif use_ants == True:
# ANTS warp outputs to outputnode
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
outputNode, 'rest_res_2symmstandard')
vmhc.connect(pearson_correlation, 'out_file',
outputNode, 'VMHC_FWHM_img')
vmhc.connect(z_trans, 'out_file',
outputNode, 'VMHC_Z_FWHM_img')
vmhc.connect(z_stat, 'out_file',
outputNode, 'VMHC_Z_stat_FWHM_img')
return vmhc
|
4c71974d962d86385de8de9d6752dc59b1e205d0
| 3,649,556
|
def get_chunk_index(connection, db, # pylint: disable=too-many-arguments
tbl, chunk,
ch_db='percona', ch_tbl='checksums'):
"""
Get index that was used to cut the chunk
:param connection: MySQLDb connection
:param db: database of the chunk
:param tbl: table of the chunk
:param chunk: chunk id
:param ch_db: Database where checksums are stored. Default percona.
:param ch_tbl: Table where checksums are stored. Default checksums.
:return: index name or None if no index was used
"""
cur = connection.cursor()
query = "SELECT chunk_index FROM `%s`.`%s` " \
"WHERE db='%s' AND tbl='%s' AND chunk = %s"
LOG.info('Executing %s', query % (ch_db, ch_tbl, db, tbl, chunk))
cur.execute(query % (ch_db, ch_tbl, db, tbl, chunk))
return cur.fetchone()[0]
|
6cf59174d766d68dc635e9c578c9d4d12dba55bf
| 3,649,557
|
import subprocess
import json
def _get_tree(code):
"""Return an AST tree of the JS passed in `code`."""
if not code:
return
# Acceptable unicode characters still need to be stripped. Just remove the
# slash: a character is necessary to prevent bad identifier errors.
code = JS_ESCAPE.sub("u", unicodehelper.decode(code))
shell_obj = subprocess.Popen(
["node", "./acorn.js"], shell=False, stdin=subprocess.PIPE,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
data, stderr = shell_obj.communicate(code.encode('utf-8'))
if stderr:
raise RuntimeError('Error calling acorn: %s' % stderr)
if not data:
raise JSReflectException("Reflection failed")
parsed = json.loads(unicodehelper.decode(data), strict=False)
if parsed.get("error"):
raise JSReflectException(
parsed["error_message"]).line_num(parsed["line_number"])
return parsed
|
7705ca63765a4408651b310f604141fc1ebc1d6e
| 3,649,558
|
from typing import List
def initial_assignment_alpha_MSS(agents: List[AdditiveAgent], items: List[str], alpha: float)->Allocation:
"""
Initial division for allocting agents according to their alpha-MMS.
:param agents: valuations of agents, normalized such that MMS=1 for all agents,
and valuation are ordered in ascending order
:param items: items names sorted from the highest valued to the lowest
:param alpha: parameter for how much to approximate MMS allocation.
:return Allocation: whats been allocated so far (in this function), items and agents are update during function
>>> ### allocation for 1 agent, 1 object (this pass!)
>>> a = AdditiveAgent({"x": 1}, name="Alice")
>>> agents=[a]
>>> a1 = initial_assignment_alpha_MSS(agents,['x'],0.75)
>>> print(a1, agents)
Alice gets {x} with value nan.
[]
>>> ### allocation for 1 agent, 2 object
>>> b = AdditiveAgent({"x": 0.5, "y": 0.4}, name="Blice")
>>> agents=[b]
>>> a1 = initial_assignment_alpha_MSS(agents,['x','y'],0.6)
>>> print(a1, agents)
Blice gets {x,y} with value nan.
[]
>>> ### allocation for 2 agent, 2 object
>>> a = AdditiveAgent({"x": 0.8, "y": 0.7}, name="Alice")
>>> b = AdditiveAgent({"x": 0.7, "y": 0.7}, name="Blice")
>>> agents=[a,b]
>>> a1= initial_assignment_alpha_MSS(agents,['x','y'],0.6)
>>> print(a1, agents)
Alice gets {x} with value nan.
Blice gets {y} with value nan.
[]
>>> ### allocation for 2 agent, 8 object
>>> a = AdditiveAgent({"x1": 0.647059, "x2": 0.588235, "x3": 0.470588, "x4": 0.411765, "x5": 0.352941, "x6": 0.294118, "x7": 0.176471, "x8": 0.117647}, name="A")
>>> b = AdditiveAgent({"x1": 1.298701, "x2": 0.714286, "x3": 0.649351, "x4": 0.428571, "x5": 0.155844, "x6": 0.064935, "x7": 0.051948, "x8": 0.012987}, name="B")
>>> c = AdditiveAgent({"x1": 0.6, "x2": 0.6, "x3": 0.48, "x4": 0.36, "x5": 0.32, "x6": 0.32, "x7": 0.28, "x8": 0.04}, name="C")
>>> agents=[a,b,c]
>>> a1 = initial_assignment_alpha_MSS(agents,['x1','x2','x3','x4','x5','x6','x7','x8'],0.75)
>>> print(a1, agents) # x6, x7, x8 weren't divided
A gets {x3,x4} with value nan.
B gets {x1} with value nan.
C gets {x2,x5} with value nan.
[]
"""
ag_alloc = {}
n = len(agents)-1
#if thereare less object than agents, mms is 0 for every one.
if(n+1>len(items)):
return Allocation(ag_alloc,agents)
#return None
names_agents=agent_names_from(agents)
while(True): # for every agents check if s1/s2/s3/s3>=alpha
num_items=len(items)
#fill si bundles
s1_bundle,s2_bundle,s3_bundle,s4_bundle=[],[],[],[]
#check index not out of bound
if num_items>0:
s1_bundle=[items[0]]
if num_items>n+1:
s2_bundle=[items[n] , items[n+1]]
if num_items>2*(n+1):
if 2*(n+1)-2>0:
s3_bundle=[items[(2*(n+1))-2], items[2*(n+1)-1] , items[2*(n+1)]]
s4_bundle=[items[0], items[2*(n+1)]]
s=[s1_bundle,s2_bundle,s3_bundle, s4_bundle]
for si in s:
willing_agent_index=willing_agent(agents,si,alpha)
if willing_agent_index!=None:
# give bundle to agent
ag_alloc[agents[willing_agent_index]._name] = si
# remove given items agent
for item in si:
items.remove(item)
agents.pop(willing_agent_index)
# update number of agents
n = n - 1
# go to begining of outside loop and redefine the si bundles
break
elif si==s4_bundle:
# no agent is satisfied by any of the si bundles
return Allocation (names_agents,ag_alloc)
|
5ac7fa947ee555dfd963c679696224e58e2c343a
| 3,649,559
|
import os
def find_furious_yaml(config_file=__file__):
"""
Traverse directory trees to find a furious.yaml file
Begins with the location of this file then checks the
working directory if not found
Args:
config_file: location of this file, override for
testing
Returns:
the path of furious.yaml or None if not found
"""
checked = set()
result = _find_furious_yaml(os.path.dirname(config_file), checked)
if not result:
result = _find_furious_yaml(os.getcwd(), checked)
return result
|
b2b3354fc411123f9360d01d0123946c9cf9fe24
| 3,649,560
|
def comp_axes(
self,
axes_list,
machine=None,
axes_dict_in=None,
is_periodicity_a=None,
is_periodicity_t=None,
per_a=None,
is_antiper_a=None,
per_t=None,
is_antiper_t=None,
):
"""Compute simulation axes such as time / angle / phase axes, with or without periodicities
and including normalizations
Parameters
----------
self : Input
an Input object
machine : Machine
a Machine object
axes_list: list
List of axes name to return in axes dict
axes_dict: {Data}
dict of axes containing time and angle axes (with or without (anti-)periodicity)
is_periodicity_a: bool
True if spatial periodicity is requested
is_periodicity_t: bool
True if time periodicity is requested
per_a : int
angle periodicity
is_antiper_a : bool
if the angle axis is antiperiodic
per_t : int
time periodicity
is_antiper_t : bool
if the time axis is antiperiodic
Returns
-------
axes_dict: {Data}
dict of axes containing requested axes
"""
if len(axes_list) == 0:
raise Exception("axes_list should not be empty")
if self.parent is not None:
simu = self.parent
else:
simu = None
if hasattr(simu, "parent") and simu.parent is not None:
output = simu.parent
else:
output = None
if (axes_list is None or len(axes_list) == 0) and (
axes_dict_in is None or len(axes_dict_in) == 0
):
raise Exception(
"Cannot calculate axes if both axes list and axes dict are None"
)
if machine is None:
# Fetch machine from input
if hasattr(simu, "machine") and simu.machine is not None:
machine = simu.machine
else:
raise Exception("Cannot calculate axes if simu.machine is None")
# Get machine pole pair number
p = machine.get_pole_pair_number()
# Fill periodicity parameters that are None
if per_a is None or is_antiper_a is None or per_t is None or is_antiper_t is None:
if output is not None:
# Get time and space (anti-)periodicities from the output
(
per_a_0,
is_antiper_a_0,
per_t_0,
is_antiper_t_0,
) = output.get_machine_periodicity()
else:
# Compute time and space (anti-)periodicities from the machine
per_a_0, is_antiper_a_0 = machine.comp_periodicity_spatial()
per_t_0, is_antiper_t_0, _, _ = machine.comp_periodicity_time()
if is_periodicity_t is None or is_periodicity_t:
# Enforce None values to machine time periodicity
per_t = per_t_0 if per_t is None else per_t
is_antiper_t = is_antiper_t_0 if is_antiper_t is None else is_antiper_t
if is_periodicity_t is None:
# Check time periodicity is included
is_periodicity_t = per_t > 1 or is_antiper_t
elif not is_periodicity_t:
# Remove time periodicity
per_t = 1
is_antiper_t = False
if is_periodicity_a is None or is_periodicity_a:
# Enforce None values to machine periodicity
per_a = per_a_0 if per_a is None else per_a
is_antiper_a = is_antiper_a_0 if is_antiper_a is None else is_antiper_a
if is_periodicity_a is None:
# Enforce requested angle periodicity
is_periodicity_a = per_a > 1 or is_antiper_a
elif not is_periodicity_a:
# Remove angle periodicity
per_a = 1
is_antiper_a = False
# Init axes_dict
axes_dict = dict()
# Get time axis
if "time" in axes_list:
# Check if Time is already in input dict of axes
if axes_dict_in is not None and "time" in axes_dict_in:
Time_in = axes_dict_in["time"]
else:
Time_in = None
# Calculate time axis
Time = self.comp_axis_time(p, per_t, is_antiper_t, Time_in)
# Store time axis in dict
axes_dict["time"] = Time
# Get angle axis
if "angle" in axes_list:
# Airgap radius
Rag = machine.comp_Rgap_mec()
# Check if Angle is already in input dict of axes
if axes_dict_in is not None and "angle" in axes_dict_in:
Angle_in = axes_dict_in["angle"]
else:
Angle_in = None
# Calculate angle axis
Angle = self.comp_axis_angle(p, Rag, per_a, is_antiper_a, Angle_in)
# Store angle axis in dict
axes_dict["angle"] = Angle
if "phase_S" in axes_list:
# Check if Phase is already in input dict of axes
stator_label = "phase_" + machine.stator.get_label()
if axes_dict_in is not None and stator_label in axes_dict_in:
Phase_in = axes_dict_in[stator_label]
else:
Phase_in = None
# Calculate stator phase axis
Phase = self.comp_axis_phase(machine.stator, Phase_in)
if Phase is not None:
# Store phase axis in dict
axes_dict[stator_label] = Phase
if "phase_R" in axes_list:
# Check if Phase is already in input dict of axes
rotor_label = "phase_" + machine.rotor.get_label()
if axes_dict_in is not None and rotor_label in axes_dict_in:
Phase_in = axes_dict_in[rotor_label]
else:
Phase_in = None
# Calculate rotor phase axis
per_a_phase = 2 * per_a if is_antiper_a else per_a
Phase = self.comp_axis_phase(machine.rotor, per_a_phase, Phase_in)
if Phase is not None:
# Store phase axis in dict
axes_dict[rotor_label] = Phase
return axes_dict
|
d01cb4efb2b1676cc2548e3e57324733708e4764
| 3,649,561
|
def clean_vehicles(country):
"""Delete all vehicles from given country."""
with elastic() as client:
search = Vehicle.search(using=client).filter("term", country=country)
count = search.count()
search.delete()
return count
|
021189a68ec6035af2b5140f79a29ce71caa12fd
| 3,649,562
|
from typing import Union
from pathlib import Path
from typing import Any
def render_template(
env: NativeEnvironment,
template: Union[Text, Path],
context: Any,
) -> Any:
"""Utility function for rendering Jinja2 text or file templates.
Args:
env: The Jinja2 environment to use for rendering
template: The template string or file to render
context: The context variables to use for rendering
Returns:
The rendered template string or data structure
"""
# convert strings to template
if isinstance(template, Path):
_template = env.get_template(str(template))
else:
_template = env.from_string(template)
value = _template.render(**context)
if isinstance(value, Undefined):
value._fail_with_undefined_error()
return value
|
61585cf76896afd70be8b3a620cb4dbe8620c567
| 3,649,563
|
import os
def _get_template_dirs(type="plugin"):
"""Return a list of directories where templates may be located.
"""
template_dirs = [
os.path.expanduser(os.path.join("~", ".rapport", "templates", type)),
os.path.join("rapport", "templates", type) # Local dev tree
]
return template_dirs
|
b0c6351d0e346310f5d36c6d010332cc1e1b54ea
| 3,649,564
|
def aspect_ToCString(*args):
"""
* Translates an ExtendedString to a CString depending of the local format.
:param aString:
:type aString: TCollection_ExtendedString &
:rtype: char *
"""
return _Aspect.aspect_ToCString(*args)
|
e5f5b352b60610f3a18e0757a98b8e58f31c84ff
| 3,649,565
|
def suites_list(request):
"""List suites."""
return TemplateResponse(
request,
"manage/suite/suites.html",
{
"suites": model.Suite.objects.select_related().annotate(
case_count=NotDeletedCount("cases", distinct=True)),
}
)
|
55e1cd86a0d82bc6fd2a6b75248a1a4b06264bb5
| 3,649,566
|
def push_gitlab_event_dict():
"""
Cleared version of the push gitlab webhook content.
"""
return {
"object_kind": "push",
"event_name": "push",
"before": "0e27f070efa4bef2a7c0168f07a0ac36ef90d8cb",
"after": "cb2859505e101785097e082529dced35bbee0c8f",
"ref": "refs/heads/build-branch",
"checkout_sha": "cb2859505e101785097e082529dced35bbee0c8f",
"user_id": 5647360,
"user_name": "Shreyas Papinwar",
"user_username": "shreyaspapi",
"user_email": "",
"user_avatar": "https://assets.gitlab-static.net/uploads/-"
"/system/user/avatar/5647360/avatar.png",
"project_id": 18032222,
"project": {
"id": 18032222,
"name": "Hello there",
"description": "Hehehehe",
"web_url": "https://gitlab.com/the-namespace/repo-name",
"git_ssh_url": "git@gitlab.com:the-namespace/repo-name.git",
"git_http_url": "https://gitlab.com/the-namespace/repo-name.git",
"namespace": "Testing packit",
"visibility_level": 20,
"path_with_namespace": "the-namespace/repo-name",
"default_branch": "master",
"homepage": "https://gitlab.com/the-namespace/repo-name",
"url": "git@gitlab.com:the-namespace/repo-name.git",
"ssh_url": "git@gitlab.com:the-namespace/repo-name.git",
"http_url": "https://gitlab.com/the-namespace/repo-name.git",
},
"commits": [
{
"id": "cb2859505e101785097e082529dced35bbee0c8f",
"message": "Update README.md",
"title": "Update README.md",
"timestamp": "2020-06-04T23:14:57+00:00",
"url": "https://gitlab.com/the-namespace/repo-name/-/commit/"
"cb2859505e101785097e082529dced35bbee0c8f",
"author": {"name": "Shreyas Papinwar", "email": "spapinwar@gmail.com"},
"added": [],
"modified": ["README.md"],
"removed": [],
}
],
"total_commits_count": 1,
"push_options": {},
"repository": {
"name": "Hello there",
"url": "git@gitlab.com:the-namespace/repo-name.git",
"description": "Hehehehe",
"homepage": "https://gitlab.com/the-namespace/repo-name",
"git_http_url": "https://gitlab.com/the-namespace/repo-name.git",
"git_ssh_url": "git@gitlab.com:the-namespace/repo-name.git",
"visibility_level": 20,
},
}
|
3a0134774f828e233c8b1e3fd2d6b94d6fae699f
| 3,649,567
|
def compute_rotation_effects(VD, settings, EW_small, GAMMA, len_mach, X, CHORD, XLE, XBAR,
rhs, COSINP, SINALF, PITCH, ROLL, YAW, STB, RNMAX):
""" This computes the effects of the freestream and aircraft rotation rate on
CLE, the induced flow at the leading edge
Assumptions:
Several of the values needed in this calculation have been computed earlier and stored in VD
Normally, VORLAX skips the calculation implemented in this function for linear
chordwise spacing (the if statement below). However, since the trends are correct,
albeit underestimated, this calculation is being forced here.
"""
LE_ind = VD.leading_edge_indices
RNMAX = VD.panels_per_strip
##spacing = settings.spanwise_cosine_spacing
##if spacing == False: # linear spacing is LAX==1 in VORLAX
## return 0 #CLE not calculated till later for linear spacing
# Computate rotational effects (pitch, roll, yaw rates) on LE suction
# pick leading edge strip values for EW and reshape GAMMA -> gamma accordingly
EW = EW_small[: ,LE_ind, :]
n_tot_strips = EW.shape[1]
gamma = np.array(np.split(np.repeat(GAMMA, n_tot_strips, axis=0), len_mach))
CLE = (EW*gamma).sum(axis=2)
# Up till EFFINC, some of the following values were computed in compute_RHS_matrix().
# EFFINC and ALOC are calculated the exact same way, except for the XGIRO term.
# LOCATE VORTEX LATTICE CONTROL POINT WITH RESPECT TO THE
# ROTATION CENTER (XBAR, 0, ZBAR). THE RELATIVE COORDINATES
# ARE XGIRO, YGIRO, AND ZGIRO.
XGIRO = X - CHORD*XLE - np.repeat(XBAR, RNMAX[LE_ind])
YGIRO = rhs.YGIRO
ZGIRO = rhs.ZGIRO
# VX, VY, VZ ARE THE FLOW ONSET VELOCITY COMPONENTS AT THE LEADING
# EDGE (STRIP MIDPOINT). VX, VY, VZ AND THE ROTATION RATES ARE
# REFERENCED TO THE FREE STREAM VELOCITY.
VX = rhs.VX
VY = (COSINP - YAW *XGIRO + ROLL *ZGIRO)
VZ = (SINALF - ROLL *YGIRO + PITCH*XGIRO)
# CCNTL, SCNTL, SID, and COD were computed in compute_RHS_matrix()
# EFFINC = COMPONENT OF ONSET FLOW ALONG NORMAL TO CAMBERLINE AT
# LEADING EDGE.
EFFINC = VX *rhs.SCNTL + VY *rhs.CCNTL *rhs.SID - VZ *rhs.CCNTL *rhs.COD
CLE = CLE - EFFINC[:,LE_ind]
CLE = np.where(STB > 0, CLE /RNMAX[LE_ind] /STB, CLE)
return CLE
|
6184f0732c4da4726a5f17e99bd7329bd55c4907
| 3,649,568
|
def introduction(course):
"""This method represents route to 'courses/<course>/intro.html' where the character introduction is rendered.
This method handles both GET and POST requests.
Args:
course (string): Name of the course.
Returns:
render_template: Returns rendered 'courses/<course>/intro.html' template.
"""
courseClass = class_for_name("project.models", course.capitalize())
introLevel = courseClass.query.filter_by(email=current_user.email).first().get_introLevel()
letters = introduceLetters(course, introLevel)
return render_template('courses/introduction.html', letters=letters, course=course)
|
7c569f98afbced4a0e1c45b0956d3ba15147384f
| 3,649,569
|
import pesummary.core.file.formats
import pesummary.gw.file.formats
import pkgutil
import importlib
def available_formats():
"""Return the available formats for reading and writing
Returns
-------
tuple: tuple of sets. First set are the available formats for reading.
Second set are the available sets for writing.
"""
read_formats, write_formats = [], []
modules = {
"gw": pesummary.gw.file.formats, "core": pesummary.core.file.formats
}
for package in ["core", "gw"]:
formats = [
a for _, a, _ in pkgutil.walk_packages(path=modules[package].__path__)
]
for _format in formats:
_submodule = importlib.import_module(
"pesummary.{}.file.formats.{}".format(package, _format)
)
if hasattr(_submodule, "write_{}".format(_format)):
write_formats.append(_format)
if hasattr(_submodule, "read_{}".format(_format)):
read_formats.append(_format)
return set(read_formats), set(write_formats)
|
1419092210d0cf5dfa116d43c0354c309afc831e
| 3,649,570
|
import json
def bbox_from_openpose(openpose_file, rescale=1.2, detection_thresh=0.2):
"""Get center and scale for bounding box from openpose detections."""
with open(openpose_file, 'r') as f:
keypoints = json.load(f)['people'][0]['pose_keypoints_2d']
keypoints = np.reshape(np.array(keypoints), (-1, 3))
valid = keypoints[:, -1] > detection_thresh
valid_keypoints = keypoints[valid][:, :-1]
center = valid_keypoints.mean(axis=0)
bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()
# adjust bounding box tightness
scale = bbox_size / 200.0
scale *= rescale
return center, scale
|
c91397fbe42a15d8bce1f1018303e6ff7328c467
| 3,649,571
|
def calc_KPs(TempC, Sal, P=None):
"""
Calculate equilibrium constants for P species.
KP1 = H3PO4
KP2 = H2PO4
KP3 = HPO4
Chapter 5, Section 7.2.5 of Dickson, Sabine and Christian
(2007, http://cdiac.ornl.gov/oceans/Handbook_2007.html)
**WITHOUT APPROX PH SCALE CONVERSION IN CONSTANT**
(See footnote 5 in 'Best Practices' Guide)
This produces constants on SWS pH Scale.
Must be converted to Total scale before use.
Parameters
----------
TempC : array-like
Temperature in Celcius.
Sal : array-like
Salinity in PSU
P : array-like
Pressure in bar
Returns
-------
dict of KPs
"""
TempK = TempC + 273.15
lnTempK = np.log(TempK)
a0, a1, a2, a3, a4, a5, a6 = (
-4576.752,
115.54,
-18.453,
-106.736,
0.69171,
-0.65643,
-0.01844,
)
b0, b1, b2, b3, b4, b5, b6 = (
-8814.715,
172.1033,
-27.927,
-160.340,
1.3566,
0.37335,
-0.05778,
)
c0, c1, c3, c4, c5, c6 = (-3070.75, -18.126, 17.27039, 2.81197, -44.99486, -0.09984)
KP1 = np.exp(
a0 / TempK
+ a1
+ a2 * lnTempK
+ (a3 / TempK + a4) * Sal ** 0.5
+ (a5 / TempK + a6) * Sal
)
KP2 = np.exp(
b0 / TempK
+ b1
+ b2 * lnTempK
+ (b3 / TempK + b4) * Sal ** 0.5
+ (b5 / TempK + b6) * Sal
)
KP3 = np.exp(
c0 / TempK + c1 + (c3 / TempK + c4) * Sal ** 0.5 + (c5 / TempK + c6) * Sal
)
# parameters from Table 5 of Millero 2007 (doi:10.1021/cr0503557)
# Checked against CO2SYS
if P is not None:
ppar = {
"KP1": [-14.51, 0.1211, -0.000321, -2.67, 0.0427],
"KP2": [-23.12, 0.1758, -2.647e-3, -5.15, 0.09],
"KP3": [-26.57, 0.2020, -3.042e-3, -4.08, 0.0714],
}
KP1 *= prescorr(P, TempC, *ppar["KP1"])
KP2 *= prescorr(P, TempC, *ppar["KP2"])
KP3 *= prescorr(P, TempC, *ppar["KP3"])
return {"KP1": KP1, "KP2": KP2, "KP3": KP3}
|
a850fb9a85946d4fc9607f8b6744291157b980d1
| 3,649,572
|
def evaluate_model_sector_prediction(
model,
test_data_x,
test_data_y,
test_data_industry,
test_data_size,
mode_classifier=True,
max_seq_length=512,
batch_size=8,
):
"""This is a function to predict the sector given the input text ids"""
model = model.eval()
pred_label_test = []
answer_label_test = []
pred_industry_test = []
answer_indesutry_test = []
pred_label_prob_list = []
pred_industry_prob_list = []
for data_index in range(0, len(test_data_x), batch_size):
data_batch = test_data_x[data_index : data_index + batch_size]
doc_batch = [doc[0] for doc in data_batch]
logits = 0
industry_logits_all = 0
"""formatting the input data"""
input_array_doc = []
for doc_batch_index, input_ids in enumerate(doc_batch):
input_array = np.zeros(max_seq_length, dtype=np.int)
input_array[: min(max_seq_length, 1)] = input_ids[: min(max_seq_length, 1)]
input_array_doc.append(input_array)
input_ids = LongTensor(np.array(input_array_doc).astype(np.int32))
"""getting the model's output"""
label_logits, industry_logits = model(input_ids)
"""getting the values of the predicted probabilities"""
logits += label_logits
industry_logits_all += industry_logits
pred_label = np.argmax(logits.detach().to("cpu").numpy(), axis=1)
pred_industry = np.argmax(
industry_logits_all.detach().to("cpu").numpy(), axis=1
)
"""creating the output lists for the predicted values"""
pred_label_test += list(pred_label)
pred_industry_test += list(pred_industry)
answer_label_test += list(test_data_y[data_index : data_index + batch_size])
answer_indesutry_test += list(
test_data_industry[data_index : data_index + batch_size]
)
"""printing classification metrics of the sectors"""
target_sectors = [0, 1, 2, 3, 4, 5, 6, 7, 8]
print(classification_report(answer_label_test, pred_label_test, target_sectors))
return (
pred_label_test,
answer_label_test,
pred_industry_test,
answer_indesutry_test,
)
|
4b0d97c647f9e49600a149a0f5144744ea78f8bc
| 3,649,573
|
def is_no_op(module: Module) -> bool:
"""Return whether the module does no operation in graph.
Args:
module: module
Returns:
whether module is no operation
"""
no_op_modules = (Sequential, _Branch, Parallel, ReduceTuple, GraphModule)
return isinstance(module, no_op_modules)
|
6b5a765be41353596a500e6827800868daa16386
| 3,649,574
|
import os
def has_ext(path_name, *, multiple=None, if_all_ext=False):
"""
Determine if the given path name has an extension
"""
base = os.path.basename(path_name)
count = base.count(EXT)
if not if_all_ext and base[0] == EXT and count != 0:
count -= 1
if multiple is None:
return count >= 1
elif multiple:
return count > 1
else:
return count == 1
|
53fc639b44b2a1791220ec24f2b1fc6922152482
| 3,649,575
|
import os
import multiprocessing
def index_directory(directory,
labels,
formats,
class_names=None,
shuffle=True,
seed=None,
follow_links=False):
"""Make list of all files in the subdirs of `directory`, with their labels.
Args:
directory: The target directory (string).
labels: Either "inferred"
(labels are generated from the directory structure),
None (no labels),
or a list/tuple of integer labels of the same size as the number of
valid files found in the directory. Labels should be sorted according
to the alphanumeric order of the image file paths
(obtained via `os.walk(directory)` in Python).
formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt").
class_names: Only valid if "labels" is "inferred". This is the explict
list of class names (must match names of subdirectories). Used
to control the order of the classes
(otherwise alphanumerical order is used).
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling.
follow_links: Whether to visits subdirectories pointed to by symlinks.
Returns:
tuple (file_paths, labels, class_names).
file_paths: list of file paths (strings).
labels: list of matching integer labels (same length as file_paths)
class_names: names of the classes corresponding to these labels, in order.
"""
if labels is None:
# in the no-label case, index from the parent directory down.
subdirs = ['']
class_names = subdirs
else:
subdirs = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
subdirs.append(subdir)
if not class_names:
class_names = subdirs
else:
if set(class_names) != set(subdirs):
raise ValueError(
'The `class_names` passed did not match the '
'names of the subdirectories of the target directory. '
'Expected: %s, but received: %s' %
(subdirs, class_names))
class_indices = dict(zip(class_names, range(len(class_names))))
# Build an index of the files
# in the different class subfolders.
pool = multiprocessing.pool.ThreadPool()
results = []
filenames = []
for dirpath in (os.path.join(directory, subdir) for subdir in subdirs):
results.append(
pool.apply_async(index_subdirectory,
(dirpath, class_indices, follow_links, formats)))
labels_list = []
for res in results:
partial_filenames, partial_labels = res.get()
labels_list.append(partial_labels)
filenames += partial_filenames
if labels not in ('inferred', None):
if len(labels) != len(filenames):
raise ValueError('Expected the lengths of `labels` to match the number '
'of files in the target directory. len(labels) is %s '
'while we found %s files in %s.' % (
len(labels), len(filenames), directory))
else:
i = 0
labels = np.zeros((len(filenames),), dtype='int32')
for partial_labels in labels_list:
labels[i:i + len(partial_labels)] = partial_labels
i += len(partial_labels)
if labels is None:
print('Found %d files.' % (len(filenames),))
else:
print('Found %d files belonging to %d classes.' %
(len(filenames), len(class_names)))
pool.close()
pool.join()
file_paths = [os.path.join(directory, fname) for fname in filenames]
if shuffle:
# Shuffle globally to erase macro-structure
if seed is None:
seed = np.random.randint(1e6)
rng = np.random.RandomState(seed)
rng.shuffle(file_paths)
rng = np.random.RandomState(seed)
rng.shuffle(labels)
return file_paths, labels, class_names
|
474886eecf2515e53d2120357a8c5405e89a45fb
| 3,649,576
|
def colors_from_cmap(length=50, cmap=None, start=None, stop=None):
"""Return color cycle from a given colormap.
Parameters
----------
length : int
The number of colors in the cycle. When `length` is large (> ~10), it
is difficult to distinguish between successive lines because successive
colors are very similar.
cmap : str
Name of a matplotlib colormap (see matplotlib.pyplot.cm).
start, stop: 0 <= float <= 1
Limit colormap to this range (start < stop 1). You should limit the
range of colormaps with light values (assuming a white background).
Some colors have default start/stop values (see `CMAP_RANGE`).
Returns
-------
colors : list
List of RGBA colors.
See Also
--------
cycle_cmap
"""
if cmap is None:
cmap = config['color']['cmap']
if isinstance(cmap, basestring):
cmap = getattr(plt.cm, cmap)
crange = CMAP_RANGE.get(cmap.name, (0, 1))
if start is not None:
crange[0] = start
if stop is not None:
crange[1] = stop
assert 0 <= crange[0] <= 1
assert 0 <= crange[1] <= 1
idx = np.linspace(crange[0], crange[1], num=length)
return cmap(idx)
|
e2c7c117ab3d463ee20548c15d3e7deee3a1879a
| 3,649,577
|
def mag_thresh(img, sobel_kernel=3, mag_thresh=(30, 100)):
"""
Return the magnitude of the gradient
for a given sobel kernel size and threshold values
"""
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the gradient magnitude
mag_sobel = np.sqrt(sobelx**2 + sobely**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255*mag_sobel/np.max(mag_sobel))
# 5) Create a binary mask where mag thresholds are met, zeros otherwise
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= mag_thresh[0] ) & (scaled_sobel <= mag_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
|
c079ca591c4e35e69821d871d7f451aaaf867ef9
| 3,649,578
|
def mean_absolute_percentage_error(predictions, targets):
"""Calculate mean absolute percentage error"""
mask = (targets != 0.0)
return (np.fabs(targets - predictions)/targets)[mask].mean()*100.0
|
1f37da29c47035a3656d3d07b34ec26f862a80ac
| 3,649,579
|
def make_net_xds_list(data_xds_list, coords_per_xds):
"""Construct a list of dicts of xarray.Datasets to house the net gains.
Args:
data_xds_list: A List of xarray.Dataset objects containing MS data.
coords_per_xds: A List of Dicts containing dataset coords.
Returns:
net_gain_xds_list: A List of xarray.Dataset objects to house
the net gains.
"""
net_gain_xds_list = []
for data_xds, xds_coords in zip(data_xds_list, coords_per_xds):
net_t_chunks = np.tile(data_xds.UTIME_CHUNKS, 2).reshape(2, -1)
net_f_chunks = np.tile(data_xds.chunks["chan"], 2).reshape(2, -1)
# Create a default config object, consistent with the net gain.
# NOTE: If we have a direction-dependent model, assume the net gain
# is also direction dependent.
config = Gain(direction_dependent=bool(data_xds.dims["dir"]))
net_obj = TERM_TYPES["complex"]("NET",
config,
data_xds,
xds_coords,
net_t_chunks,
net_f_chunks)
net_gain_xds_list.append(net_obj.make_xds())
return net_gain_xds_list
|
1546555e76d0f6bab4abc7985707f8be9fc19558
| 3,649,580
|
import collections
def sort_dict(d, key=None, reverse=False):
"""
Sorts a dict by value.
Args:
d: Input dictionary
key: Function which takes an tuple (key, object) and returns a value to
compare and sort by. By default, the function compares the values
of the dict i.e. key = lambda t : t[1]
reverse: Allows to reverse sort order.
Returns:
OrderedDict object whose keys are ordered according to their value.
"""
kv_items = list(d.items())
# Sort kv_items according to key.
if key is None:
kv_items.sort(key=lambda t: t[1], reverse=reverse)
else:
kv_items.sort(key=key, reverse=reverse)
# Build ordered dict.
return collections.OrderedDict(kv_items)
|
9ca904a5e0df3e3c50b29967adfe9061e778dfc9
| 3,649,581
|
import requests
def check_builds():
"""Base task"""
response = requests.get(
url=urljoin(Config.SISENSE_URL, "v2/builds"), headers=Config.SISENSE_HEADERS
)
builds = pd.DataFrame(data=response.json())
failed_builds = builds.loc[(builds.status == "failed")]
# for each failed cube:
for build in failed_builds.to_dict(orient="records"):
# check if failed cube is already recorded (oid), if not record
recorded_failure = (
session.query(FailedBuilds).filter(FailedBuilds.oid == build["oid"]).first()
)
if recorded_failure is None:
# record
record_failure(
build["oid"],
build["datamodelId"],
build["datamodelTitle"],
build["instanceId"],
)
# save log and get elements for log card
error_dict = get_logs(build["datamodelId"], build["datamodelTitle"])
# prepare card (so look into log)
card = make_teams_card(
build["datamodelTitle"],
error_dict["timestamp"],
error_dict["error_message"],
error_dict["file_link"],
)
# send card
send_teams_card(card)
return error_dict
|
38820f314ff8a57cbf5b7242a52b557905b0f1eb
| 3,649,582
|
def comp_neworig(tileid,dirn='/global/cfs/cdirs/desi/survey/catalogs/testfiberassign/SV3rerun/orig/'):
"""
check that new matches the original
"""
ts = str(tileid).zfill(6)
fa = fitsio.read('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz')
fn = fitsio.read(dirn+'fba-'+ts+'.fits')
w = fn['DEVICE_TYPE'] == 'POS'
fn = fn[w]
wn = fn['TARGETID'] >= 0
fn = fn[wn]
print(len(fn))
wa = fa['TARGETID'] >= 0
fa = fa[wa]
print(len(fa))
ws = np.isin(fn['TARGETID'],fa['TARGETID'])
print(np.sum(ws))
if np.sum(ws) == len(fa) and len(fa) == len(fn):
return True
else:
return False
|
e7d1d4202b024508712e14de86341d3597c85314
| 3,649,583
|
def _get_widget_handler(webmanager):
"""
Returns a handler to get the widgets
:param WebManager webmanager:
:return tornado.web.RequestHandler:
"""
class WidgetHandler(web.RequestHandler):
"""
Handler for all communications over WebSockets
"""
def get(self):
"""
Called when a client connection is closed
"""
webmanager.on_get_widgets(self)
return WidgetHandler
|
734b081e3b92180356e88ca21418785d45662b64
| 3,649,584
|
def get_model_field_type(model, field_label):
"""
Returns model's field type.
"""
return FIELD_TYPES_MAPPING.get(type(get_model_field(model, field_label)), 'STRING')
|
aeba374954b25f0383015f56be41cdc5f9917ae3
| 3,649,585
|
def Normalize_Column_Scores(df, columns, norm_type = 'divide_by_max'):
"""Normalizes scores for specified columns in a pandas dataframe
Parameters
----------
df : a pandas DataFrame object that contains the specified columns
columns: a list object that includes the columns to normalize
norm_type : a string specifying the type of normalization to perform
- 'divide_by_max' divides all values by the maximum value
- 'range_norm' divides all values (+ the min) by the range of values in the column
- 'z_norm' computes a z-score based on the mean and standard deviation of values
- 'divide_by_sum' divides all values by the sum of the values
- 'vector' dives all values by the square root of the sum of the squares of all values
Yields
------
temp_df: a copy of the passed dataframe with the normalizations performed
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> import mcdm_functions as mcfunc
>>> data_dict = {'Product': ['A', 'B', 'C', 'D'],
'Product Advantage': [13.1,13.2,12.2,13.2],
'Strategic Alignment': [9.8,8.2,10.0,9.6],
'Technical Feasibility': [20.0,18.7,18.5,17.1],
'Market Attractiveness': [15.5,12.3,13.1,13.1]}
>>> score_data = pd.DataFrame(data_dict)
>>> score_data = score_data.set_index('Product')
>>> print(score_data)
Market Attractiveness Product Advantage Strategic Alignment \
Product
A 15.5 13.1 9.8
B 12.3 13.2 8.2
C 13.1 12.2 10.0
D 13.1 13.2 9.6
Technical Feasibility
Product
A 20.0
B 18.7
C 18.5
D 17.1
>>> columns = ['Market Attractiveness','Product Advantage']
>>> temp = mcfunc.Normalize_Column_Scores(score_data,columns)
>>> print(temp)
Market Attractiveness Product Advantage Strategic Alignment \
Product
A 1.000000 13.1 9.8
B 0.793548 13.2 8.2
C 0.845161 12.2 10.0
D 0.845161 13.2 9.6
Technical Feasibility
Product
A 20.0
B 18.7
C 18.5
D 17.1
"""
temp_df = df.copy()
for column in columns:
if norm_type is 'divide_by_max':
max_entry = temp_df[column].max()
temp_df[column] = temp_df[column]/max_entry
elif norm_type is 'range_norm':
min_entry = temp_df[column].min()
max_entry = temp_df[column].max()
temp_df[column] = (temp_df[column]-min_entry)/(max_entry - min_entry)
elif norm_type is 'z_norm':
mean = temp_df[column].mean()
sd = temp_df[column].std()
temp_df[column] = (temp_df[column]-mean)/sd
elif norm_type is 'divide_by_sum':
temp_df[column] = temp_df[column]/temp_df[column].sum()
elif norm_type is 'vector':
values = temp_df[column].values
values_squared = values**2
vector_norm = values/np.sqrt(np.sum(values_squared))
temp_df[column] = vector_norm
else:
print('You did not enter a valid type, so no changes were made')
return temp_df
|
906fcf944b676e04120eb915e7ead24c97900f56
| 3,649,586
|
import json
import os
def custom_pdf_merge(doctype,docid,attach_to_og_doc=False,doc_to_merge={}):
"""
doc_to_merge = {
"dt_to_merge": "", ##doctype on which merge is to be performed
"dt_to_merge_id": "", ##docid on which merge is to be performed
"attach_fieldname": "", ##fieldname of the attach field through which CAD doc is uploaded (Ex:assembly_drawing)
"print_format": "", ##preferred print format of docid
"attach_to_doc": True/False, ##should the merged pdf be attached to dt_to_merge_id
"other_attachments_to_merge": [list of file names] ##list of pdfs attached to dt_to_merge_id that need to be merged along with attach_fieldname
}
"""
doc_to_merge=json.loads(doc_to_merge)
file_path = frappe.utils.get_url()
dir_path_idx = file_path.find('/')+2
dir_path =file_path[dir_path_idx:]
mergeFile = PyPDF2.PdfFileMerger()
final_merged_file = _("/private/files/Merged_{}.pdf").format(doc_to_merge['dt_to_merge_id'])
# Generate pdf of original record
org_pdf = doc_to_merge['dt_to_merge_id'] + ".pdf"
doc_pdf = frappe.attach_print(doc_to_merge['dt_to_merge'], doc_to_merge['dt_to_merge_id'],
str(doc_to_merge['dt_to_merge_id']), print_format=doc_to_merge['print_format'])
docfile = open(org_pdf,"wb")
docfile.write(doc_pdf["fcontent"])
# Append pdf of original record
og_doc_to_merge = PyPDF2.PdfFileReader(org_pdf,'rb')
mergeFile.append(og_doc_to_merge,'rb')
attachment_filename = frappe.get_value(doc_to_merge['dt_to_merge'],
doc_to_merge['dt_to_merge_id'],
doc_to_merge['attach_fieldname'])
idx = attachment_filename.rfind('/')+1
attachment_filename = attachment_filename[idx:]
# Fetch attachment details
attached_doc = frappe.get_all("File",
fields=["name", "file_name", "file_url"] ,
filters = {
"attached_to_name": doc_to_merge['dt_to_merge_id'],
"attached_to_doctype": doc_to_merge['dt_to_merge'],
"file_name":attachment_filename})
if 'other_attachments_to_merge' in doc_to_merge:
other_attachments_str = ",".join(doc_to_merge['other_attachments_to_merge'])
else:
other_attachments_str = ''
other_attached_docs = frappe.get_all("File",
fields=['name','file_name','file_url'],
filters={
"attached_to_name": doc_to_merge['dt_to_merge_id'],
"attached_to_doctype": doc_to_merge['dt_to_merge'],
"file_name":['in',other_attachments_str]
})
old_merged_doc = frappe.get_all("File",
fields=['name','file_name','file_url'],
filters={
"attached_to_name": ['in',(docid,doc_to_merge['dt_to_merge_id'])],
"attached_to_doctype": ['in',(doctype,doc_to_merge['dt_to_merge'])],
"file_name":['like','Merged_'+doc_to_merge['dt_to_merge_id']+'.pdf']
})
# Delete old Merged file
for doc in old_merged_doc:
frappe.delete_doc("File",doc.name)
# Append main attachment to merge file
if attached_doc:
if not attached_doc[0].file_url.startswith('/private'):
url = '/public' + attached_doc[0].file_url
to_merge =PyPDF2.PdfFileReader(dir_path + url)
mergeFile.append(to_merge,'rb')
# Append other attachments to final pdf
for pdfs in other_attached_docs:
if not pdfs.file_url.startswith('/private'):
url = '/public' + pdfs.file_url
to_merge =PyPDF2.PdfFileReader(dir_path + url)
mergeFile.append(to_merge,'rb')
if mergeFile:
mergeFile.write(dir_path + final_merged_file)
mergeFile.close()
file_stats = os.stat(dir_path + final_merged_file)
file_size = file_stats.st_size
if attach_to_og_doc == 1:
merged_file = frappe.get_doc({
"doctype": "File",
"file_name": "Merged_"+doc_to_merge['dt_to_merge_id']+".pdf",
"file_url": final_merged_file,
"attached_to_doctype": doctype,
"attached_to_name": docid,
"file_size":file_size,
"is_private": 1
})
merged_file.insert()
merged_file = frappe.get_doc({
"doctype": "File",
"file_name": "Merged_"+doc_to_merge['dt_to_merge_id']+".pdf",
"file_url":final_merged_file,
"attached_to_doctype": 'BOM',
"attached_to_name": doc_to_merge['dt_to_merge_id'],
"file_size":file_size,
"is_private": 1
})
merged_file.insert()
return {'file_url' : merged_file.file_url,
'attached_to' : merged_file.attached_to_name}
|
070caf97c60139841bf0471f75442ea0a6397336
| 3,649,587
|
def find_most_common_word(sentence):
"""Return the most common word in the sentence."""
# Change to lowercase and strip out punctuation
sentence = clean_sentence(sentence)
list_of_words = sentence.split()
word_to_count = dict()
# Create a histogram of the occurrence of all words
for word in list_of_words:
if word not in word_to_count:
word_to_count[word] = 1
else:
word_to_count[word] += 1
most_common_word = ''
highest_count = 0
# Find highest count in the histogram
for word, count in word_to_count.items():
if count > highest_count:
most_common_word, highest_count = word, count
return most_common_word
|
0c9e03fb4324999e73e2d036ab3dec53f1857fe8
| 3,649,588
|
def fast_rcnn_inference(boxes, scores, image_shapes, predictions, score_thresh, nms_thresh, topk_per_image):
"""
Call `fast_rcnn_inference_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
fast_rcnn_inference_single_image(
boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image, prediction
)
for scores_per_image, boxes_per_image, image_shape, prediction in zip(scores, boxes, image_shapes, predictions)
]
return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
|
24fec22cdd285d50b4512115b638f3b7499f47be
| 3,649,589
|
def GetVar(doc:NexDoc, varNumber, varType) -> NexVar:
"""Returns the reference to the specified variable."""
return NexRun("GetVar", locals())
|
355f51f3ffa9b0d5c5f835546ee38bc3e0784328
| 3,649,590
|
def putversenotes():
"""Serves AJAX call for json data to save notes.
See also [M:NOTESAVE.putVerseNotes][notesave.NOTESAVE.putVerseNotes].
Client code: [{noteverse.sendnotes}][noteversesendnotes]
"""
session.forget(response)
Books = BOOKS()
Note = NOTE(Books)
NoteSave = NOTESAVE(Note)
return NoteSave.putVerseNotes()
|
b1fece391a6e47c2f500a540d57e12c1c0c11279
| 3,649,591
|
def _valid_multiview_args(cfg_user, logger):
"""
Validates the "multiview" parameters of a json configuration file used for training.
The function returns False if an error has occurred and True if all settings have passed the check.
:param cfg_user: EasyDict, json configuration file imported as dictionary
:param logger: logger instance
:return: boolean, True if no errors have been detected, False otherwise
"""
error = False
# Extract the input channel configuration (use the default setting if not specified by the user)
if 'model' in cfg_user and 'input_channels' in cfg_user.model:
# Use user setting
input_config = cfg_user.model.input_channels
else:
# Use default setting
input_config = cfg_default.model.input_channels
if input_config != 'geom-multiview' and 'multiview' in cfg_user:
logger.warning(f"The argument 'model': 'input_channels' is set to '{input_config}'. Hence, the multiview "
"settings will be ignored.\n")
elif input_config == 'geom-multiview' and 'multiview' in cfg_user:
if not all_keys_known(cfg_user.multiview, arguments.MULTIVIEW_KEYS, logger):
error = True
if 'config' in cfg_user.multiview and cfg_user.multiview.config not in arguments.MULTIVIEW_CONFIG:
logger.error(f"Unknown multiview configuration: '{cfg_user.multiview.config}'. Choose among "
f"{arguments.MULTIVIEW_CONFIG} to specify 'config'.\n")
error = True
if error:
logger.info('\n')
else:
logger.info('Settings check: ok.\n\n')
return not error
|
1a60afbf956b5b7096ec21a52669b1aa85f54c7d
| 3,649,592
|
import os
def generate_extension(project=None, outdir=None, *,
_project_from_raw=info.Project.from_raw,
_abspath=os.path.abspath,
_projfiles=None,
_gen=None,
):
"""Produce all needed files to build an extension from the given root."""
project = _project_from_raw(project)
# No need to validate.
if outdir:
outdir = _abspath(outdir)
else:
outdir = os.path.join(project.root, OUT_DIR)
if _projfiles is None:
_projfiles = _get_project_files(project.root)
return (_gen or _generate)(
project.root,
project.cfg,
_projfiles,
outdir,
)
|
966b1539c5b90a4ae53d13c8fdc71f88fb76ea99
| 3,649,593
|
def apply_gradient_descent(var_list, obj, learning_rate = 0.01):
"""
Sets up the gradient descent optimizer
Args:
var_list: List of variables to optimizer over.
obj: Node of the objective to minimize
Notes:
learning_rate: What learning rate to run with. (Default = ``0.01``) Set with ``LR``
"""
back_prop = tf.train.GradientDescentOptimizer(
learning_rate = learning_rate,
name = 'gradient_descent' ).minimize(loss = obj, \
var_list = var_list )
return back_prop
|
97ed8db3e02412f2dfbe4e44b6835ed8fe754c57
| 3,649,594
|
from re import T
from typing import Callable
import inspect
from typing import get_type_hints
def make_cls_accept_cls_annotated_deps(cls: type[T]) -> type[T]:
"""
Make class `cls` accept class-annotated dependencies, performing following modifications:
- Update `__init__` function to set any class-annotated dependencies as instance attributes
- Update `__signature__` attribute to indicate to FastAPI what arguments should be passed to the initializer
"""
old_init: Callable[..., None] = cls.__init__
old_signature = inspect.signature(old_init)
old_params = list(old_signature.parameters.values())[1:] # drop `self` param
new_params = [
param
for param in old_params
if param.kind not in {Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD}
]
dep_names: list[str] = []
for name, hint in get_type_hints(cls).items():
if is_classvar(hint):
continue
dep_names.append(name)
new_params.append(
Parameter(
name=name,
kind=Parameter.KEYWORD_ONLY,
annotation=hint,
default=getattr(cls, name, Ellipsis),
)
)
new_signature = old_signature.replace(parameters=new_params)
def new_init(self: T, *args, **kwargs) -> None:
for dep_name in dep_names:
dep_value = kwargs.pop(dep_name)
setattr(self, dep_name, dep_value)
old_init(self, *args, **kwargs)
setattr(cls, "__init__", new_init)
setattr(cls, "__signature__", new_signature)
return cls
|
529b73cd76adde9868bebec2a91b323c679fcdd0
| 3,649,595
|
def getReviewRedirect(entity, params):
"""Returns the redirect to review the specified entity.
"""
return '/%s/review/%s' % (
params['url_name'], entity.key().id_or_name())
|
959ff6d0297ec54248ee725e93a79702512d00d7
| 3,649,596
|
def leapfrog_step(state, target_log_prob_fn, kinetic_energy_fn, step_size, rng=None):
"""Single step of leapfrog.
Notes
=====
The canonical distribution is related to the energy of the system
by
p(p, \theta) = 1/Zexp(-H(\theta, p)/T)
For now, we assume that the kinetic energy takes
the form
K(p) = sum_i(p_i^2/(2m_i))
"""
del rng
p, q, q_grad = state.momentum, state.state, state.state_grads
p_half = tree_util.tree_multimap(lambda p, qg: p + 0.5 * step_size * qg, p, q_grad)
_, grad_p_half = utils.call_fn_value_and_grad(kinetic_energy_fn, p_half)
q_full = tree_util.tree_multimap(lambda q, ph: q + step_size * ph, q, grad_p_half)
logprob, q_full_grad = utils.call_fn_value_and_grad(target_log_prob_fn, q_full)
p_full = tree_util.tree_multimap(lambda ph, qg: ph + 0.5 * step_size * qg, p_half,
q_full_grad)
return IntegratorState(q_full, q_full_grad, logprob, p_full)
|
78a98c9edaabefd0d6d13b8b83ee080b4a11e941
| 3,649,597
|
import requests
def get_plugins_json(url: str = "https://repobee.org/plugins.json") -> dict:
"""Fetch and parse the plugins.json file.
Args:
url: URL to the plugins.json file.
Returns:
A dictionary with the contents of the plugins.json file.
"""
resp = requests.get(url)
if resp.status_code != 200:
plug.log.error(resp.content.decode("utf8"))
raise plug.PlugError(f"could not fetch plugins.json from '{url}'")
return resp.json()
|
f6d795d88d124d8cb68e2dad4d8a354af88525c1
| 3,649,598
|
def add_available_prefixes(parent, prefix_list):
"""
Create fake Prefix objects for all unallocated space within a prefix.
"""
# Find all unallocated space
available_prefixes = IPSet(parent) ^ IPSet([p.prefix for p in prefix_list])
available_prefixes = [Prefix(prefix=p) for p in available_prefixes.iter_cidrs()]
# Concatenate and sort complete list of children
prefix_list = list(prefix_list) + available_prefixes
prefix_list.sort(key=lambda p: p.prefix)
return prefix_list
|
1df9f991f33e1a77b81b43de08c5f86f6acc7a20
| 3,649,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.