content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def calc_power_VARIANT(input_data, working_dir, t_pt=0):
"""Calculate the power distributions from VARIANT
Parameters
----------
data : dict
DASSH input data dictionary
working_dir : str
Path to current working directory
Returns
-------
dict
DASSH Power objects for each type of assembly in the problem;
different objects are required because different assemblies
can have different unrodded region specifications
"""
cwd = os.getcwd()
if working_dir != '':
os.chdir(working_dir)
# Identify VARPOW keys for fuel and coolant
fuel_type = input_data['Power']['ARC']['fuel_material'].lower()
fuel_id = _FUELS[fuel_type]
if type(fuel_id) == dict:
alloy_type = input_data['Power']['ARC']['fuel_alloy'].lower()
fuel_id = fuel_id[alloy_type]
coolant_heating = input_data['Power']['ARC']['coolant_heating']
if coolant_heating is None:
coolant_heating = input_data['Core']['coolant_material']
if coolant_heating.lower() not in _COOLANTS.keys():
module_logger.error('Unknown coolant specification for '
'heating calculation; must choose '
'from options: Na, NaK, Pb, Pb-Bi')
else:
cool_id = _COOLANTS[coolant_heating.lower()]
# Run VARPOW, rename output files
path2varpow = os.path.dirname(os.path.abspath(__file__))
if sys.platform == 'darwin':
path2varpow = os.path.join(path2varpow, 'varpow_osx.x')
elif 'linux' in sys.platform:
path2varpow = os.path.join(path2varpow, 'varpow_linux.x')
else:
raise SystemError('DASSH currently supports only Linux and OSX')
with open('varpow_stdout.txt', 'w') as f:
subprocess.call([path2varpow,
str(fuel_id),
str(cool_id),
input_data['Power']['ARC']['pmatrx'][t_pt],
input_data['Power']['ARC']['geodst'][t_pt],
input_data['Power']['ARC']['ndxsrf'][t_pt],
input_data['Power']['ARC']['znatdn'][t_pt],
input_data['Power']['ARC']['nhflux'][t_pt],
input_data['Power']['ARC']['ghflux'][t_pt]],
stdout=f)
subprocess.call(['mv', 'MaterialPower.out',
'varpow_MatPower.out'])
subprocess.call(['mv', 'VariantMonoExponents.out',
'varpow_MonoExp.out'])
subprocess.call(['mv', 'Output.VARPOW', 'VARPOW.out'])
os.chdir(cwd)
return import_power_VARIANT(input_data, working_dir, t_pt)
| 5,344,400
|
def from_float32(buffer):
"""Interprets an arbitrary string or NumPy array as Vax single-precision
floating-point binary values, and returns the equivalent array in IEEE
values."""
# Convert the buffer to 2-byte elements
if isinstance(buffer, (str, np.str_, bytes, bytearray)):
pairs = np.fromstring(buffer, dtype='uint16')
pairs = pairs.reshape(pairs.size//2, 2)
newshape = (pairs.size//2,)
else:
buffer = np.asarray(buffer)
pairs = buffer.view('uint16')
assert pairs.shape[-1] % 2 == 0, \
'buffer shape is incompatible with 4-byte elements'
if buffer.itemsize == 1:
newshape = buffer.shape[:-1] + (buffer.shape//4,)
elif buffer.itemsize == 2:
newshape = buffer.shape[:-1] + (buffer.shape//2,)
elif buffer.itemsize == 4:
newshape = buffer.shape[:-1] + (1,)
else:
newshape = buffer.shape + (buffer.itemsize//4,)
if newshape[-1] == 1: newshape = newshape[:-1]
# Perform a pairwise swap of the two-byte elements
swapped = np.empty(pairs.shape, dtype='uint16')
swapped[...,:] = pairs[...,::-1]
# The results are in LSB IEEE format aside from a scale factor of four
ieee = swapped.view('<f4') / 4.
return ieee.reshape(newshape)
| 5,344,401
|
def p_cmdexpr_xmlunescape(p):
"""cmdexpr : XMLUNESCAPE
| XMLUNESCAPE arglist
| XMLUNESCAPE MACRO"""
| 5,344,402
|
def draw_bound_box_on_image(image, xmin, ymin, xmax, ymax, vis=True):
"""
:param image:
:param xmin, ymin, xmax, ymax: 归一化后的边角坐标
:param vis:
:return:
"""
pil_image = PIL.Image.fromarray(image)
draw = ImageDraw(pil_image)
xmin *= pil_image.width
xmax *= pil_image.width
ymin *= pil_image.height
ymax *= pil_image.height
xmin, ymin, xmax, ymax = [int(it) for it in [xmin, ymin, xmax, ymax]]
draw.line([(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)], width=4, fill='blue')
np.copyto(image, np.array(pil_image))
| 5,344,403
|
def get_file_stats(file_name, entity_type='file', lineno=None, cursorpos=None,
plugin=None, language=None, local_file=None):
"""Returns a hash of information about the entity."""
language = standardize_language(language, plugin)
stats = {
'language': language,
'dependencies': [],
'lines': None,
'lineno': lineno,
'cursorpos': cursorpos,
}
if entity_type == 'file':
lexer = get_lexer(language)
if not language:
language, lexer = guess_language(file_name, local_file)
parser = DependencyParser(local_file or file_name, lexer)
stats.update({
'language': use_root_language(language, lexer),
'dependencies': parser.parse(),
'lines': number_lines_in_file(local_file or file_name),
})
return stats
| 5,344,404
|
def setup(app: sphinx.application.Sphinx) -> dict[str, object]:
"""Called by Sphinx to set up the extension."""
app.add_config_value("gaphor_models", {}, "env", [dict])
app.add_directive("diagram", DiagramDirective)
app.connect("config-inited", config_inited)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 5,344,405
|
def create_vmhc(use_ants, flirt_only=False, name='vmhc_workflow', ants_threads=1):
"""
Compute the map of brain functional homotopy, the high degree of synchrony in spontaneous activity between geometrically corresponding interhemispheric (i.e., homotopic) regions.
Parameters
----------
None
Returns
-------
vmhc_workflow : workflow
Voxel Mirrored Homotopic Connectivity Analysis Workflow
Notes
-----
`Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/vmhc/vmhc.py>`_
Workflow Inputs::
inputspec.brain : string (existing nifti file)
Anatomical image(without skull)
inputspec.symmetric_brain : string (existing nifti file)
MNI152_T1_2mm_symmetric_brain.nii.gz
inputspec.rest_res_filt : string (existing nifti file)
Band passed Image with nuisance signal regressed out(and optionally scrubbed). Recommended bandpass filter (0.001,0.1) )
inputspec.reorient : string (existing nifti file)
RPI oriented anatomical data
inputspec.example_func2highres_mat : string (existing affine transformation .mat file)
Specifies an affine transform that should be applied to the example_func before non linear warping
inputspec.standard_for_func: string (existing nifti file)
MNI152_T1_standard_resolution_brain.nii.gz
inputspec.symmetric_skull : string (existing nifti file)
MNI152_T1_2mm_symmetric.nii.gz
inputspec.twomm_brain_mask_dil : string (existing nifti file)
MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz
inputspec.config_file_twomm_symmetric : string (existing .cnf file)
T1_2_MNI152_2mm_symmetric.cnf
inputspec.rest_mask : string (existing nifti file)
A mask functional volume(derived by dilation from motion corrected functional volume)
fwhm_input.fwhm : list (float)
For spatial smoothing the Z-transformed correlations in MNI space.
Generally the value of this parameter is 1.5 or 2 times the voxel size of the input Image.
inputspec.mean_functional : string (existing nifti file)
The mean functional image for use in the func-to-anat registration matrix conversion
to ITK (ANTS) format, if the user selects to use ANTS.
Workflow Outputs::
outputspec.highres2symmstandard : string (nifti file)
Linear registration of T1 image to symmetric standard image
outputspec.highres2symmstandard_mat : string (affine transformation .mat file)
An affine transformation .mat file from linear registration and used in non linear registration
outputspec.highres2symmstandard_warp : string (nifti file)
warp file from Non Linear registration of T1 to symmetrical standard brain
outputspec.fnirt_highres2symmstandard : string (nifti file)
Non Linear registration of T1 to symmetrical standard brain
outputspec.highres2symmstandard_jac : string (nifti file)
jacobian determinant image from Non Linear registration of T1 to symmetrical standard brain
outputspec.rest_res_2symmstandard : string (nifti file)
nonlinear registration (func to standard) image
outputspec.VMHC_FWHM_img : string (nifti file)
pearson correlation between res2standard and flipped res2standard
outputspec.VMHC_Z_FWHM_img : string (nifti file)
Fisher Z transform map
outputspec.VMHC_Z_stat_FWHM_img : string (nifti file)
Z statistic map
Order of commands:
- Perform linear registration of Anatomical brain in T1 space to symmetric standard space. For details see `flirt <http://www.fmrib.ox.ac.uk/fsl/flirt/index.html>`_::
flirt
-ref MNI152_T1_2mm_symmetric_brain.nii.gz
-in mprage_brain.nii.gz
-out highres2symmstandard.nii.gz
-omat highres2symmstandard.mat
-cost corratio
-searchcost corratio
-dof 12
-interp trilinear
- Perform nonlinear registration (higres to standard) to symmetric standard brain. For details see `fnirt <http://fsl.fmrib.ox.ac.uk/fsl/fnirt/>`_::
fnirt
--in=head.nii.gz
--aff=highres2symmstandard.mat
--cout=highres2symmstandard_warp.nii.gz
--iout=fnirt_highres2symmstandard.nii.gz
--jout=highres2symmstandard_jac.nii.gz
--config=T1_2_MNI152_2mm_symmetric.cnf
--ref=MNI152_T1_2mm_symmetric.nii.gz
--refmask=MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz
--warpres=10,10,10
- Perform spatial smoothing on the input functional image(inputspec.rest_res_filt). For details see `PrinciplesSmoothing <http://imaging.mrc-cbu.cam.ac.uk/imaging/PrinciplesSmoothing>`_ `fslmaths <http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/intro/index.htm>`_::
fslmaths rest_res_filt.nii.gz
-kernel gauss FWHM/ sqrt(8-ln(2))
-fmean -mas rest_mask.nii.gz
rest_res_filt_FWHM.nii.gz
- Apply nonlinear registration (func to standard). For details see `applywarp <http://www.fmrib.ox.ac.uk/fsl/fnirt/warp_utils.html#applywarp>`_::
applywarp
--ref=MNI152_T1_2mm_symmetric.nii.gz
--in=rest_res_filt_FWHM.nii.gz
--out=rest_res_2symmstandard.nii.gz
--warp=highres2symmstandard_warp.nii.gz
--premat=example_func2highres.mat
- Copy and L/R swap the output of applywarp command (rest_res_2symmstandard.nii.gz). For details see `fslswapdim <http://fsl.fmrib.ox.ac.uk/fsl/fsl4.0/avwutils/index.html>`_::
fslswapdim
rest_res_2symmstandard.nii.gz
-x y z
tmp_LRflipped.nii.gz
- Calculate pearson correlation between rest_res_2symmstandard.nii.gz and flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz). For details see `3dTcorrelate <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorrelate.html>`_::
3dTcorrelate
-pearson
-polort -1
-prefix VMHC_FWHM.nii.gz
rest_res_2symmstandard.nii.gz
tmp_LRflipped.nii.gz
- Fisher Z Transform the correlation. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_::
3dcalc
-a VMHC_FWHM.nii.gz
-expr 'log((a+1)/(1-a))/2'
-prefix VMHC_FWHM_Z.nii.gz
- Calculate the number of volumes(nvols) in flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz) ::
-Use Nibabel to do this
- Compute the Z statistic map ::
3dcalc
-a VMHC_FWHM_Z.nii.gz
-expr 'a*sqrt('${nvols}'-3)'
-prefix VMHC_FWHM_Z_stat.nii.gz
Workflow:
.. image:: ../images/vmhc_graph.dot.png
:width: 500
Workflow Detailed:
.. image:: ../images/vmhc_detailed_graph.dot.png
:width: 500
References
----------
.. [1] Zuo, X.-N., Kelly, C., Di Martino, A., Mennes, M., Margulies, D. S., Bangaru, S., Grzadzinski, R., et al. (2010). Growing together and growing apart: regional and sex differences in the lifespan developmental trajectories of functional homotopy. The Journal of neuroscience : the official journal of the Society for Neuroscience, 30(45), 15034-43. doi:10.1523/JNEUROSCI.2612-10.2010
Examples
--------
>>> vmhc_w = create_vmhc()
>>> vmhc_w.inputs.inputspec.symmetric_brain = 'MNI152_T1_2mm_symmetric_brain.nii.gz'
>>> vmhc_w.inputs.inputspec.symmetric_skull = 'MNI152_T1_2mm_symmetric.nii.gz'
>>> vmhc_w.inputs.inputspec.twomm_brain_mask_dil = 'MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz'
>>> vmhc_w.inputs.inputspec.config_file_twomm = 'T1_2_MNI152_2mm_symmetric.cnf'
>>> vmhc_w.inputs.inputspec.standard_for_func= 'MNI152_T1_2mm.nii.gz'
>>> vmhc_w.inputs.fwhm_input.fwhm = [4.5, 6]
>>> vmhc_w.get_node('fwhm_input').iterables = ('fwhm', [4.5, 6])
>>> vmhc_w.inputs.inputspec.rest_res = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_res_filt.nii.gz')
>>> vmhc_w.inputs.inputspec.reorient = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_RPI.nii.gz')
>>> vmhc_w.inputs.inputspec.brain = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_brain.nii.gz')
>>> vmhc_w.inputs.inputspec.example_func2highres_mat = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/reg/example_func2highres.mat')
>>> vmhc_w.inputs.inputspec.rest_mask = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_mask.nii.gz')
>>> vmhc_w.run() # doctest: +SKIP
"""
vmhc = pe.Workflow(name=name)
inputNode = pe.Node(util.IdentityInterface(fields=['rest_res',
'example_func2highres_mat',
'rest_mask',
'standard_for_func',
'mean_functional',
'brain',
'flirt_linear_aff',
'fnirt_nonlinear_warp',
'ants_symm_initial_xfm',
'ants_symm_rigid_xfm',
'ants_symm_affine_xfm',
'ants_symm_warp_field']),
name='inputspec')
outputNode = pe.Node(util.IdentityInterface(fields=['rest_res_2symmstandard',
'VMHC_FWHM_img',
'VMHC_Z_FWHM_img',
'VMHC_Z_stat_FWHM_img']),
name='outputspec')
inputnode_fwhm = pe.Node(util.IdentityInterface(fields=['fwhm']),
name='fwhm_input')
if use_ants == False:
# Apply nonlinear registration (func to standard)
func_to_standard = pe.Node(interface=fsl.ApplyWarp(),
name='func_to_standard')
elif use_ants == True:
# ANTS warp image etc.
fsl_to_itk_vmhc = create_wf_c3d_fsl_to_itk(0, name='fsl_to_itk_vmhc')
collect_transforms_vmhc = create_wf_collect_transforms(0, name='collect_transforms_vmhc')
apply_ants_xfm_vmhc = create_wf_apply_ants_warp(map_node=False, name='apply_ants_xfm_vmhc',
ants_threads=ants_threads)
# this has to be 3 instead of default 0 because it is a 4D file
apply_ants_xfm_vmhc.inputs.inputspec.input_image_type = 3
# copy and L/R swap file
copy_and_L_R_swap = pe.Node(interface=fsl.SwapDimensions(),
name='copy_and_L_R_swap')
copy_and_L_R_swap.inputs.new_dims = ('-x', 'y', 'z')
# calculate vmhc
pearson_correlation = pe.Node(interface=preprocess.TCorrelate(),
name='pearson_correlation')
pearson_correlation.inputs.pearson = True
pearson_correlation.inputs.polort = -1
pearson_correlation.inputs.outputtype = 'NIFTI_GZ'
try:
z_trans = pe.Node(interface=preprocess.Calc(), name='z_trans')
z_stat = pe.Node(interface=preprocess.Calc(), name='z_stat')
except AttributeError:
from nipype.interfaces.afni import utils as afni_utils
z_trans = pe.Node(interface=afni_utils.Calc(), name='z_trans')
z_stat = pe.Node(interface=afni_utils.Calc(), name='z_stat')
z_trans.inputs.expr = 'log((1+a)/(1-a))/2'
z_trans.inputs.outputtype = 'NIFTI_GZ'
z_stat.inputs.outputtype = 'NIFTI_GZ'
NVOLS = pe.Node(util.Function(input_names=['in_files'],
output_names=['nvols'],
function=get_img_nvols),
name='NVOLS')
generateEXP = pe.Node(util.Function(input_names=['nvols'],
output_names=['expr'],
function=get_operand_expression),
name='generateEXP')
smooth = pe.Node(interface=fsl.MultiImageMaths(),
name='smooth')
if use_ants == False:
vmhc.connect(inputNode, 'rest_res',
smooth, 'in_file')
vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss),
smooth, 'op_string')
vmhc.connect(inputNode, 'rest_mask',
smooth, 'operand_files')
vmhc.connect(inputNode, 'standard_for_func',
func_to_standard, 'ref_file')
if not flirt_only:
vmhc.connect(inputNode, 'fnirt_nonlinear_warp',
func_to_standard, 'field_file')
vmhc.connect(smooth, 'out_file',
func_to_standard, 'in_file')
vmhc.connect(inputNode, 'example_func2highres_mat',
func_to_standard, 'premat')
else:
func_to_anat = pe.Node(interface=fsl.ApplyWarp(),
name='func_to_anat')
vmhc.connect(smooth, 'out_file', func_to_anat, 'in_file')
vmhc.connect(inputNode, 'brain', func_to_anat, 'ref_file')
vmhc.connect(inputNode, 'example_func2highres_mat',
func_to_anat, 'premat')
vmhc.connect(func_to_anat, 'out_file', func_to_standard, 'in_file')
vmhc.connect(inputNode, 'flirt_linear_aff',
func_to_standard, 'premat')
vmhc.connect(func_to_standard, 'out_file',
copy_and_L_R_swap, 'in_file')
vmhc.connect(func_to_standard, 'out_file',
pearson_correlation, 'xset')
elif use_ants == True:
# connections for ANTS stuff
# functional apply warp stuff
vmhc.connect(inputNode, 'rest_res',
smooth, 'in_file')
vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss),
smooth, 'op_string')
vmhc.connect(inputNode, 'rest_mask',
smooth, 'operand_files')
vmhc.connect(smooth, 'out_file',
apply_ants_xfm_vmhc, 'inputspec.input_image')
vmhc.connect(inputNode, 'ants_symm_initial_xfm',
collect_transforms_vmhc, 'inputspec.linear_initial')
vmhc.connect(inputNode, 'ants_symm_rigid_xfm',
collect_transforms_vmhc, 'inputspec.linear_rigid')
vmhc.connect(inputNode, 'ants_symm_affine_xfm',
collect_transforms_vmhc, 'inputspec.linear_affine')
vmhc.connect(inputNode, 'ants_symm_warp_field',
collect_transforms_vmhc, 'inputspec.warp_file')
# func->anat matrix (bbreg)
vmhc.connect(inputNode, 'example_func2highres_mat',
fsl_to_itk_vmhc, 'inputspec.affine_file')
vmhc.connect(inputNode, 'brain', fsl_to_itk_vmhc,
'inputspec.reference_file')
vmhc.connect(inputNode, 'mean_functional', fsl_to_itk_vmhc,
'inputspec.source_file')
vmhc.connect(fsl_to_itk_vmhc, 'outputspec.itk_transform',
collect_transforms_vmhc, 'inputspec.fsl_to_itk_affine')
vmhc.connect(inputNode, 'standard_for_func',
apply_ants_xfm_vmhc, 'inputspec.reference_image')
vmhc.connect(collect_transforms_vmhc,
'outputspec.transformation_series',
apply_ants_xfm_vmhc, 'inputspec.transforms')
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
copy_and_L_R_swap, 'in_file')
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
pearson_correlation, 'xset')
vmhc.connect(copy_and_L_R_swap, 'out_file',
pearson_correlation, 'yset')
vmhc.connect(pearson_correlation, 'out_file',
z_trans, 'in_file_a')
vmhc.connect(copy_and_L_R_swap, 'out_file',
NVOLS, 'in_files')
vmhc.connect(NVOLS, 'nvols',
generateEXP, 'nvols')
vmhc.connect(z_trans, 'out_file',
z_stat, 'in_file_a')
vmhc.connect(generateEXP, 'expr',
z_stat, 'expr')
if use_ants == False:
vmhc.connect(func_to_standard, 'out_file',
outputNode, 'rest_res_2symmstandard')
elif use_ants == True:
# ANTS warp outputs to outputnode
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
outputNode, 'rest_res_2symmstandard')
vmhc.connect(pearson_correlation, 'out_file',
outputNode, 'VMHC_FWHM_img')
vmhc.connect(z_trans, 'out_file',
outputNode, 'VMHC_Z_FWHM_img')
vmhc.connect(z_stat, 'out_file',
outputNode, 'VMHC_Z_stat_FWHM_img')
return vmhc
| 5,344,406
|
def get_chunk_index(connection, db, # pylint: disable=too-many-arguments
tbl, chunk,
ch_db='percona', ch_tbl='checksums'):
"""
Get index that was used to cut the chunk
:param connection: MySQLDb connection
:param db: database of the chunk
:param tbl: table of the chunk
:param chunk: chunk id
:param ch_db: Database where checksums are stored. Default percona.
:param ch_tbl: Table where checksums are stored. Default checksums.
:return: index name or None if no index was used
"""
cur = connection.cursor()
query = "SELECT chunk_index FROM `%s`.`%s` " \
"WHERE db='%s' AND tbl='%s' AND chunk = %s"
LOG.info('Executing %s', query % (ch_db, ch_tbl, db, tbl, chunk))
cur.execute(query % (ch_db, ch_tbl, db, tbl, chunk))
return cur.fetchone()[0]
| 5,344,407
|
def _get_tree(code):
"""Return an AST tree of the JS passed in `code`."""
if not code:
return
# Acceptable unicode characters still need to be stripped. Just remove the
# slash: a character is necessary to prevent bad identifier errors.
code = JS_ESCAPE.sub("u", unicodehelper.decode(code))
shell_obj = subprocess.Popen(
["node", "./acorn.js"], shell=False, stdin=subprocess.PIPE,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
data, stderr = shell_obj.communicate(code.encode('utf-8'))
if stderr:
raise RuntimeError('Error calling acorn: %s' % stderr)
if not data:
raise JSReflectException("Reflection failed")
parsed = json.loads(unicodehelper.decode(data), strict=False)
if parsed.get("error"):
raise JSReflectException(
parsed["error_message"]).line_num(parsed["line_number"])
return parsed
| 5,344,408
|
def initial_assignment_alpha_MSS(agents: List[AdditiveAgent], items: List[str], alpha: float)->Allocation:
"""
Initial division for allocting agents according to their alpha-MMS.
:param agents: valuations of agents, normalized such that MMS=1 for all agents,
and valuation are ordered in ascending order
:param items: items names sorted from the highest valued to the lowest
:param alpha: parameter for how much to approximate MMS allocation.
:return Allocation: whats been allocated so far (in this function), items and agents are update during function
>>> ### allocation for 1 agent, 1 object (this pass!)
>>> a = AdditiveAgent({"x": 1}, name="Alice")
>>> agents=[a]
>>> a1 = initial_assignment_alpha_MSS(agents,['x'],0.75)
>>> print(a1, agents)
Alice gets {x} with value nan.
[]
>>> ### allocation for 1 agent, 2 object
>>> b = AdditiveAgent({"x": 0.5, "y": 0.4}, name="Blice")
>>> agents=[b]
>>> a1 = initial_assignment_alpha_MSS(agents,['x','y'],0.6)
>>> print(a1, agents)
Blice gets {x,y} with value nan.
[]
>>> ### allocation for 2 agent, 2 object
>>> a = AdditiveAgent({"x": 0.8, "y": 0.7}, name="Alice")
>>> b = AdditiveAgent({"x": 0.7, "y": 0.7}, name="Blice")
>>> agents=[a,b]
>>> a1= initial_assignment_alpha_MSS(agents,['x','y'],0.6)
>>> print(a1, agents)
Alice gets {x} with value nan.
Blice gets {y} with value nan.
[]
>>> ### allocation for 2 agent, 8 object
>>> a = AdditiveAgent({"x1": 0.647059, "x2": 0.588235, "x3": 0.470588, "x4": 0.411765, "x5": 0.352941, "x6": 0.294118, "x7": 0.176471, "x8": 0.117647}, name="A")
>>> b = AdditiveAgent({"x1": 1.298701, "x2": 0.714286, "x3": 0.649351, "x4": 0.428571, "x5": 0.155844, "x6": 0.064935, "x7": 0.051948, "x8": 0.012987}, name="B")
>>> c = AdditiveAgent({"x1": 0.6, "x2": 0.6, "x3": 0.48, "x4": 0.36, "x5": 0.32, "x6": 0.32, "x7": 0.28, "x8": 0.04}, name="C")
>>> agents=[a,b,c]
>>> a1 = initial_assignment_alpha_MSS(agents,['x1','x2','x3','x4','x5','x6','x7','x8'],0.75)
>>> print(a1, agents) # x6, x7, x8 weren't divided
A gets {x3,x4} with value nan.
B gets {x1} with value nan.
C gets {x2,x5} with value nan.
[]
"""
ag_alloc = {}
n = len(agents)-1
#if thereare less object than agents, mms is 0 for every one.
if(n+1>len(items)):
return Allocation(ag_alloc,agents)
#return None
names_agents=agent_names_from(agents)
while(True): # for every agents check if s1/s2/s3/s3>=alpha
num_items=len(items)
#fill si bundles
s1_bundle,s2_bundle,s3_bundle,s4_bundle=[],[],[],[]
#check index not out of bound
if num_items>0:
s1_bundle=[items[0]]
if num_items>n+1:
s2_bundle=[items[n] , items[n+1]]
if num_items>2*(n+1):
if 2*(n+1)-2>0:
s3_bundle=[items[(2*(n+1))-2], items[2*(n+1)-1] , items[2*(n+1)]]
s4_bundle=[items[0], items[2*(n+1)]]
s=[s1_bundle,s2_bundle,s3_bundle, s4_bundle]
for si in s:
willing_agent_index=willing_agent(agents,si,alpha)
if willing_agent_index!=None:
# give bundle to agent
ag_alloc[agents[willing_agent_index]._name] = si
# remove given items agent
for item in si:
items.remove(item)
agents.pop(willing_agent_index)
# update number of agents
n = n - 1
# go to begining of outside loop and redefine the si bundles
break
elif si==s4_bundle:
# no agent is satisfied by any of the si bundles
return Allocation (names_agents,ag_alloc)
| 5,344,409
|
def start_interactive_mode(result):
"""starts an interactive shell so that the user can inspect errors
"""
debuggers = result.debuggers
descrs = result.error_descrs + result.fail_descrs
if len(debuggers) == 1:
# don't ask for test name if there's only one failure
debuggers[0].start()
else:
while True:
testindex = 0
print "Choose a test to debug:"
# order debuggers in the same way than errors were printed
print "\n".join(['\t%s : %s' % (i, descr) for i, (_, descr)
in enumerate(descrs)])
print "Type 'exit' (or ^D) to quit"
print
try:
todebug = raw_input('Enter a test name: ')
if todebug.strip().lower() == 'exit':
print
break
else:
try:
testindex = int(todebug)
debugger = debuggers[descrs[testindex][0]]
except (ValueError, IndexError):
print "ERROR: invalid test number %r" % (todebug, )
else:
debugger.start()
except (EOFError, KeyboardInterrupt):
print
break
| 5,344,410
|
def find_furious_yaml(config_file=__file__):
"""
Traverse directory trees to find a furious.yaml file
Begins with the location of this file then checks the
working directory if not found
Args:
config_file: location of this file, override for
testing
Returns:
the path of furious.yaml or None if not found
"""
checked = set()
result = _find_furious_yaml(os.path.dirname(config_file), checked)
if not result:
result = _find_furious_yaml(os.getcwd(), checked)
return result
| 5,344,411
|
def comp_axes(
self,
axes_list,
machine=None,
axes_dict_in=None,
is_periodicity_a=None,
is_periodicity_t=None,
per_a=None,
is_antiper_a=None,
per_t=None,
is_antiper_t=None,
):
"""Compute simulation axes such as time / angle / phase axes, with or without periodicities
and including normalizations
Parameters
----------
self : Input
an Input object
machine : Machine
a Machine object
axes_list: list
List of axes name to return in axes dict
axes_dict: {Data}
dict of axes containing time and angle axes (with or without (anti-)periodicity)
is_periodicity_a: bool
True if spatial periodicity is requested
is_periodicity_t: bool
True if time periodicity is requested
per_a : int
angle periodicity
is_antiper_a : bool
if the angle axis is antiperiodic
per_t : int
time periodicity
is_antiper_t : bool
if the time axis is antiperiodic
Returns
-------
axes_dict: {Data}
dict of axes containing requested axes
"""
if len(axes_list) == 0:
raise Exception("axes_list should not be empty")
if self.parent is not None:
simu = self.parent
else:
simu = None
if hasattr(simu, "parent") and simu.parent is not None:
output = simu.parent
else:
output = None
if (axes_list is None or len(axes_list) == 0) and (
axes_dict_in is None or len(axes_dict_in) == 0
):
raise Exception(
"Cannot calculate axes if both axes list and axes dict are None"
)
if machine is None:
# Fetch machine from input
if hasattr(simu, "machine") and simu.machine is not None:
machine = simu.machine
else:
raise Exception("Cannot calculate axes if simu.machine is None")
# Get machine pole pair number
p = machine.get_pole_pair_number()
# Fill periodicity parameters that are None
if per_a is None or is_antiper_a is None or per_t is None or is_antiper_t is None:
if output is not None:
# Get time and space (anti-)periodicities from the output
(
per_a_0,
is_antiper_a_0,
per_t_0,
is_antiper_t_0,
) = output.get_machine_periodicity()
else:
# Compute time and space (anti-)periodicities from the machine
per_a_0, is_antiper_a_0 = machine.comp_periodicity_spatial()
per_t_0, is_antiper_t_0, _, _ = machine.comp_periodicity_time()
if is_periodicity_t is None or is_periodicity_t:
# Enforce None values to machine time periodicity
per_t = per_t_0 if per_t is None else per_t
is_antiper_t = is_antiper_t_0 if is_antiper_t is None else is_antiper_t
if is_periodicity_t is None:
# Check time periodicity is included
is_periodicity_t = per_t > 1 or is_antiper_t
elif not is_periodicity_t:
# Remove time periodicity
per_t = 1
is_antiper_t = False
if is_periodicity_a is None or is_periodicity_a:
# Enforce None values to machine periodicity
per_a = per_a_0 if per_a is None else per_a
is_antiper_a = is_antiper_a_0 if is_antiper_a is None else is_antiper_a
if is_periodicity_a is None:
# Enforce requested angle periodicity
is_periodicity_a = per_a > 1 or is_antiper_a
elif not is_periodicity_a:
# Remove angle periodicity
per_a = 1
is_antiper_a = False
# Init axes_dict
axes_dict = dict()
# Get time axis
if "time" in axes_list:
# Check if Time is already in input dict of axes
if axes_dict_in is not None and "time" in axes_dict_in:
Time_in = axes_dict_in["time"]
else:
Time_in = None
# Calculate time axis
Time = self.comp_axis_time(p, per_t, is_antiper_t, Time_in)
# Store time axis in dict
axes_dict["time"] = Time
# Get angle axis
if "angle" in axes_list:
# Airgap radius
Rag = machine.comp_Rgap_mec()
# Check if Angle is already in input dict of axes
if axes_dict_in is not None and "angle" in axes_dict_in:
Angle_in = axes_dict_in["angle"]
else:
Angle_in = None
# Calculate angle axis
Angle = self.comp_axis_angle(p, Rag, per_a, is_antiper_a, Angle_in)
# Store angle axis in dict
axes_dict["angle"] = Angle
if "phase_S" in axes_list:
# Check if Phase is already in input dict of axes
stator_label = "phase_" + machine.stator.get_label()
if axes_dict_in is not None and stator_label in axes_dict_in:
Phase_in = axes_dict_in[stator_label]
else:
Phase_in = None
# Calculate stator phase axis
Phase = self.comp_axis_phase(machine.stator, Phase_in)
if Phase is not None:
# Store phase axis in dict
axes_dict[stator_label] = Phase
if "phase_R" in axes_list:
# Check if Phase is already in input dict of axes
rotor_label = "phase_" + machine.rotor.get_label()
if axes_dict_in is not None and rotor_label in axes_dict_in:
Phase_in = axes_dict_in[rotor_label]
else:
Phase_in = None
# Calculate rotor phase axis
per_a_phase = 2 * per_a if is_antiper_a else per_a
Phase = self.comp_axis_phase(machine.rotor, per_a_phase, Phase_in)
if Phase is not None:
# Store phase axis in dict
axes_dict[rotor_label] = Phase
return axes_dict
| 5,344,412
|
def update_credentials():
"""
Update the credentials zip file
"""
from base.application import create_app
app = create_app()
app.app_context().push()
click.secho("Zipping env_config", fg='green')
zipdir('env_config/', 'env_config.zip')
zip_creds = get_item('credential', 'travis-ci-cred')
click.secho("Encrypting credentials", fg='green')
if os.path.exists("env_config.zip.enc"):
os.remove("env_config.zip.enc")
comm = ['travis',
'encrypt-file',
'env_config.zip',
"--org",
'--key',
zip_creds['key'],
'--iv',
zip_creds['iv']]
print(' '.join(comm))
out, err = Popen(comm, stdout=PIPE, stderr=PIPE).communicate()
secho(str(out, 'utf-8'), fg='green')
if err:
exit(secho(str(err, 'utf-8'), fg='red'))
os.remove("env_config.zip")
| 5,344,413
|
def update_person(s, I_ID, firstname, lastname, dob, phonenum=None):
"""This function will update a person's records.
"""
pers = s.query(Person).filter(Person.id == I_ID).one()
pers.first_name = firstname
pers.last_name = lastname
pers.DOB = dob
pers.phone = phonenum
pers.age = age(dob)
s.commit()
| 5,344,414
|
def clean_vehicles(country):
"""Delete all vehicles from given country."""
with elastic() as client:
search = Vehicle.search(using=client).filter("term", country=country)
count = search.count()
search.delete()
return count
| 5,344,415
|
def render_template(
env: NativeEnvironment,
template: Union[Text, Path],
context: Any,
) -> Any:
"""Utility function for rendering Jinja2 text or file templates.
Args:
env: The Jinja2 environment to use for rendering
template: The template string or file to render
context: The context variables to use for rendering
Returns:
The rendered template string or data structure
"""
# convert strings to template
if isinstance(template, Path):
_template = env.get_template(str(template))
else:
_template = env.from_string(template)
value = _template.render(**context)
if isinstance(value, Undefined):
value._fail_with_undefined_error()
return value
| 5,344,416
|
def _get_template_dirs(type="plugin"):
"""Return a list of directories where templates may be located.
"""
template_dirs = [
os.path.expanduser(os.path.join("~", ".rapport", "templates", type)),
os.path.join("rapport", "templates", type) # Local dev tree
]
return template_dirs
| 5,344,417
|
def aspect_ToCString(*args):
"""
* Translates an ExtendedString to a CString depending of the local format.
:param aString:
:type aString: TCollection_ExtendedString &
:rtype: char *
"""
return _Aspect.aspect_ToCString(*args)
| 5,344,418
|
def execute(args, parser):
"""Executes the :ref:`authorise <authorise-cli>` command."""
print('To generate a personal access token do the following:')
print(' 1. Login to your GitHub account')
print(' 2. Go to Settings -> Developer settings -> Personal access tokens')
print(' 3. Click "Generate new token"')
print(' 4. In the Note field enter a description for the token (e.g., msl-package-manager)')
print(' 5. Optional: Select the scopes that you want to associate with the token')
print(' 6. Click "Generate token"')
print(' 7. Copy the token to your clipboard and paste it in the terminal when asked')
print()
print('For more detailed instructions see')
print('https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token')
print()
authorise()
| 5,344,419
|
def suites_list(request):
"""List suites."""
return TemplateResponse(
request,
"manage/suite/suites.html",
{
"suites": model.Suite.objects.select_related().annotate(
case_count=NotDeletedCount("cases", distinct=True)),
}
)
| 5,344,420
|
def main():
"""Runs your solution -- no need to update (except to maybe try out different databases)."""
# Sample implementation of the autocomplete API
database = ["abracadara", "al", "alice", "alicia", "allen", "alter", "altercation", "bob", "element", "ello", "eve", "evening", "event", "eventually", "mallory"]
database = ["abracadara", "al", "alice", "alicia", "allen", "alter", "altercation", "bob", "element", "ello", "eve", "evening", "event", "eventually", "mallory",
"z", "za", "zb", "zc", "zd", "ze", "zf", "zh", "zj", "zz", "zzz", "zzzz", "zzzzz", "zzzzzzzz", "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz",
"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz", "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzza", "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzb"]
query = lambda prefix: [d for d in database if d.startswith(prefix)][:5]
assert extract(query) == database
| 5,344,421
|
def push_gitlab_event_dict():
"""
Cleared version of the push gitlab webhook content.
"""
return {
"object_kind": "push",
"event_name": "push",
"before": "0e27f070efa4bef2a7c0168f07a0ac36ef90d8cb",
"after": "cb2859505e101785097e082529dced35bbee0c8f",
"ref": "refs/heads/build-branch",
"checkout_sha": "cb2859505e101785097e082529dced35bbee0c8f",
"user_id": 5647360,
"user_name": "Shreyas Papinwar",
"user_username": "shreyaspapi",
"user_email": "",
"user_avatar": "https://assets.gitlab-static.net/uploads/-"
"/system/user/avatar/5647360/avatar.png",
"project_id": 18032222,
"project": {
"id": 18032222,
"name": "Hello there",
"description": "Hehehehe",
"web_url": "https://gitlab.com/the-namespace/repo-name",
"git_ssh_url": "git@gitlab.com:the-namespace/repo-name.git",
"git_http_url": "https://gitlab.com/the-namespace/repo-name.git",
"namespace": "Testing packit",
"visibility_level": 20,
"path_with_namespace": "the-namespace/repo-name",
"default_branch": "master",
"homepage": "https://gitlab.com/the-namespace/repo-name",
"url": "git@gitlab.com:the-namespace/repo-name.git",
"ssh_url": "git@gitlab.com:the-namespace/repo-name.git",
"http_url": "https://gitlab.com/the-namespace/repo-name.git",
},
"commits": [
{
"id": "cb2859505e101785097e082529dced35bbee0c8f",
"message": "Update README.md",
"title": "Update README.md",
"timestamp": "2020-06-04T23:14:57+00:00",
"url": "https://gitlab.com/the-namespace/repo-name/-/commit/"
"cb2859505e101785097e082529dced35bbee0c8f",
"author": {"name": "Shreyas Papinwar", "email": "spapinwar@gmail.com"},
"added": [],
"modified": ["README.md"],
"removed": [],
}
],
"total_commits_count": 1,
"push_options": {},
"repository": {
"name": "Hello there",
"url": "git@gitlab.com:the-namespace/repo-name.git",
"description": "Hehehehe",
"homepage": "https://gitlab.com/the-namespace/repo-name",
"git_http_url": "https://gitlab.com/the-namespace/repo-name.git",
"git_ssh_url": "git@gitlab.com:the-namespace/repo-name.git",
"visibility_level": 20,
},
}
| 5,344,422
|
def compute_rotation_effects(VD, settings, EW_small, GAMMA, len_mach, X, CHORD, XLE, XBAR,
rhs, COSINP, SINALF, PITCH, ROLL, YAW, STB, RNMAX):
""" This computes the effects of the freestream and aircraft rotation rate on
CLE, the induced flow at the leading edge
Assumptions:
Several of the values needed in this calculation have been computed earlier and stored in VD
Normally, VORLAX skips the calculation implemented in this function for linear
chordwise spacing (the if statement below). However, since the trends are correct,
albeit underestimated, this calculation is being forced here.
"""
LE_ind = VD.leading_edge_indices
RNMAX = VD.panels_per_strip
##spacing = settings.spanwise_cosine_spacing
##if spacing == False: # linear spacing is LAX==1 in VORLAX
## return 0 #CLE not calculated till later for linear spacing
# Computate rotational effects (pitch, roll, yaw rates) on LE suction
# pick leading edge strip values for EW and reshape GAMMA -> gamma accordingly
EW = EW_small[: ,LE_ind, :]
n_tot_strips = EW.shape[1]
gamma = np.array(np.split(np.repeat(GAMMA, n_tot_strips, axis=0), len_mach))
CLE = (EW*gamma).sum(axis=2)
# Up till EFFINC, some of the following values were computed in compute_RHS_matrix().
# EFFINC and ALOC are calculated the exact same way, except for the XGIRO term.
# LOCATE VORTEX LATTICE CONTROL POINT WITH RESPECT TO THE
# ROTATION CENTER (XBAR, 0, ZBAR). THE RELATIVE COORDINATES
# ARE XGIRO, YGIRO, AND ZGIRO.
XGIRO = X - CHORD*XLE - np.repeat(XBAR, RNMAX[LE_ind])
YGIRO = rhs.YGIRO
ZGIRO = rhs.ZGIRO
# VX, VY, VZ ARE THE FLOW ONSET VELOCITY COMPONENTS AT THE LEADING
# EDGE (STRIP MIDPOINT). VX, VY, VZ AND THE ROTATION RATES ARE
# REFERENCED TO THE FREE STREAM VELOCITY.
VX = rhs.VX
VY = (COSINP - YAW *XGIRO + ROLL *ZGIRO)
VZ = (SINALF - ROLL *YGIRO + PITCH*XGIRO)
# CCNTL, SCNTL, SID, and COD were computed in compute_RHS_matrix()
# EFFINC = COMPONENT OF ONSET FLOW ALONG NORMAL TO CAMBERLINE AT
# LEADING EDGE.
EFFINC = VX *rhs.SCNTL + VY *rhs.CCNTL *rhs.SID - VZ *rhs.CCNTL *rhs.COD
CLE = CLE - EFFINC[:,LE_ind]
CLE = np.where(STB > 0, CLE /RNMAX[LE_ind] /STB, CLE)
return CLE
| 5,344,423
|
def setup():
"""
Sets up the database session
"""
global connection
connection = MySQLdb.connect(host=config.get('mysql.host'),
user=config.get('mysql.user'),
passwd=config.get('mysql.password'),
db=config.get('mysql.db'),
ssl={'ca' : config.get('mysql.cert')})
init_model(connection)
| 5,344,424
|
def introduction(course):
"""This method represents route to 'courses/<course>/intro.html' where the character introduction is rendered.
This method handles both GET and POST requests.
Args:
course (string): Name of the course.
Returns:
render_template: Returns rendered 'courses/<course>/intro.html' template.
"""
courseClass = class_for_name("project.models", course.capitalize())
introLevel = courseClass.query.filter_by(email=current_user.email).first().get_introLevel()
letters = introduceLetters(course, introLevel)
return render_template('courses/introduction.html', letters=letters, course=course)
| 5,344,425
|
def available_formats():
"""Return the available formats for reading and writing
Returns
-------
tuple: tuple of sets. First set are the available formats for reading.
Second set are the available sets for writing.
"""
import pesummary.core.file.formats
import pesummary.gw.file.formats
import pkgutil
import importlib
read_formats, write_formats = [], []
modules = {
"gw": pesummary.gw.file.formats, "core": pesummary.core.file.formats
}
for package in ["core", "gw"]:
formats = [
a for _, a, _ in pkgutil.walk_packages(path=modules[package].__path__)
]
for _format in formats:
_submodule = importlib.import_module(
"pesummary.{}.file.formats.{}".format(package, _format)
)
if hasattr(_submodule, "write_{}".format(_format)):
write_formats.append(_format)
if hasattr(_submodule, "read_{}".format(_format)):
read_formats.append(_format)
return set(read_formats), set(write_formats)
| 5,344,426
|
def bbox_from_openpose(openpose_file, rescale=1.2, detection_thresh=0.2):
"""Get center and scale for bounding box from openpose detections."""
with open(openpose_file, 'r') as f:
keypoints = json.load(f)['people'][0]['pose_keypoints_2d']
keypoints = np.reshape(np.array(keypoints), (-1, 3))
valid = keypoints[:, -1] > detection_thresh
valid_keypoints = keypoints[valid][:, :-1]
center = valid_keypoints.mean(axis=0)
bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()
# adjust bounding box tightness
scale = bbox_size / 200.0
scale *= rescale
return center, scale
| 5,344,427
|
def test_g_year_month_enumeration004_1194_g_year_month_enumeration004_1194_v(mode, save_output, output_format):
"""
TEST :Facet Schemas for string : facet=enumeration and value=2001-03
2000-10 2001-12 and document value=2001-03
"""
assert_bindings(
schema="msData/datatypes/Facets/gYearMonth/gYearMonth_enumeration004.xsd",
instance="msData/datatypes/Facets/gYearMonth/gYearMonth_enumeration004.xml",
class_name="Test",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,344,428
|
def datasets(tablefmt: str):
"""List datasets."""
click.echo(_help_datasets(tablefmt))
| 5,344,429
|
def test_main_fails_on_requests_error(
runner: CliRunner, mock_requests_get: Mock
) -> None:
"""It exits with exit code 1 if wikipedia throws exception."""
mock_requests_get.side_effect = Exception("Boom")
result = runner.invoke(console.main)
assert result.exit_code == 1
| 5,344,430
|
def calc_KPs(TempC, Sal, P=None):
"""
Calculate equilibrium constants for P species.
KP1 = H3PO4
KP2 = H2PO4
KP3 = HPO4
Chapter 5, Section 7.2.5 of Dickson, Sabine and Christian
(2007, http://cdiac.ornl.gov/oceans/Handbook_2007.html)
**WITHOUT APPROX PH SCALE CONVERSION IN CONSTANT**
(See footnote 5 in 'Best Practices' Guide)
This produces constants on SWS pH Scale.
Must be converted to Total scale before use.
Parameters
----------
TempC : array-like
Temperature in Celcius.
Sal : array-like
Salinity in PSU
P : array-like
Pressure in bar
Returns
-------
dict of KPs
"""
TempK = TempC + 273.15
lnTempK = np.log(TempK)
a0, a1, a2, a3, a4, a5, a6 = (
-4576.752,
115.54,
-18.453,
-106.736,
0.69171,
-0.65643,
-0.01844,
)
b0, b1, b2, b3, b4, b5, b6 = (
-8814.715,
172.1033,
-27.927,
-160.340,
1.3566,
0.37335,
-0.05778,
)
c0, c1, c3, c4, c5, c6 = (-3070.75, -18.126, 17.27039, 2.81197, -44.99486, -0.09984)
KP1 = np.exp(
a0 / TempK
+ a1
+ a2 * lnTempK
+ (a3 / TempK + a4) * Sal ** 0.5
+ (a5 / TempK + a6) * Sal
)
KP2 = np.exp(
b0 / TempK
+ b1
+ b2 * lnTempK
+ (b3 / TempK + b4) * Sal ** 0.5
+ (b5 / TempK + b6) * Sal
)
KP3 = np.exp(
c0 / TempK + c1 + (c3 / TempK + c4) * Sal ** 0.5 + (c5 / TempK + c6) * Sal
)
# parameters from Table 5 of Millero 2007 (doi:10.1021/cr0503557)
# Checked against CO2SYS
if P is not None:
ppar = {
"KP1": [-14.51, 0.1211, -0.000321, -2.67, 0.0427],
"KP2": [-23.12, 0.1758, -2.647e-3, -5.15, 0.09],
"KP3": [-26.57, 0.2020, -3.042e-3, -4.08, 0.0714],
}
KP1 *= prescorr(P, TempC, *ppar["KP1"])
KP2 *= prescorr(P, TempC, *ppar["KP2"])
KP3 *= prescorr(P, TempC, *ppar["KP3"])
return {"KP1": KP1, "KP2": KP2, "KP3": KP3}
| 5,344,431
|
def evaluate_model_sector_prediction(
model,
test_data_x,
test_data_y,
test_data_industry,
test_data_size,
mode_classifier=True,
max_seq_length=512,
batch_size=8,
):
"""This is a function to predict the sector given the input text ids"""
model = model.eval()
pred_label_test = []
answer_label_test = []
pred_industry_test = []
answer_indesutry_test = []
pred_label_prob_list = []
pred_industry_prob_list = []
for data_index in range(0, len(test_data_x), batch_size):
data_batch = test_data_x[data_index : data_index + batch_size]
doc_batch = [doc[0] for doc in data_batch]
logits = 0
industry_logits_all = 0
"""formatting the input data"""
input_array_doc = []
for doc_batch_index, input_ids in enumerate(doc_batch):
input_array = np.zeros(max_seq_length, dtype=np.int)
input_array[: min(max_seq_length, 1)] = input_ids[: min(max_seq_length, 1)]
input_array_doc.append(input_array)
input_ids = LongTensor(np.array(input_array_doc).astype(np.int32))
"""getting the model's output"""
label_logits, industry_logits = model(input_ids)
"""getting the values of the predicted probabilities"""
logits += label_logits
industry_logits_all += industry_logits
pred_label = np.argmax(logits.detach().to("cpu").numpy(), axis=1)
pred_industry = np.argmax(
industry_logits_all.detach().to("cpu").numpy(), axis=1
)
"""creating the output lists for the predicted values"""
pred_label_test += list(pred_label)
pred_industry_test += list(pred_industry)
answer_label_test += list(test_data_y[data_index : data_index + batch_size])
answer_indesutry_test += list(
test_data_industry[data_index : data_index + batch_size]
)
"""printing classification metrics of the sectors"""
target_sectors = [0, 1, 2, 3, 4, 5, 6, 7, 8]
print(classification_report(answer_label_test, pred_label_test, target_sectors))
return (
pred_label_test,
answer_label_test,
pred_industry_test,
answer_indesutry_test,
)
| 5,344,432
|
def is_no_op(module: Module) -> bool:
"""Return whether the module does no operation in graph.
Args:
module: module
Returns:
whether module is no operation
"""
no_op_modules = (Sequential, _Branch, Parallel, ReduceTuple, GraphModule)
return isinstance(module, no_op_modules)
| 5,344,433
|
def check_dirs_make(dir_fullpaths:list):
"""
Check if a directory exists. If not, create it.
@type dir_fullpaths: list
@param dir_fullpaths: Paths to directorys to check
"""
for dir_fullpath in dir_fullpaths:
if not os.path.isdir(dir_fullpath):
os.mkdir(dir_fullpath)
| 5,344,434
|
def process_json_file(filename):
"""The entry point - loops through data files and loads each in"""
assert isinstance(filename, str)
if os.path.isdir(filename):
LOGGER.info("Filename points to a directory")
return
else:
LOGGER.info('Processing {}'.format(filename))
with open(filename) as f:
model_specs = json.load(f)
assert isinstance(model_specs, list)
assert len(model_specs) > 0
for model_spec in model_specs:
try:
_process_model_spec(model_spec)
except Exception as ex: # Broad catch to allow debug messages
import traceback
traceback.print_exc()
LOGGER.error(
'{} when processing {:.1000}'.format(ex, model_spec))
| 5,344,435
|
def has_ext(path_name, *, multiple=None, if_all_ext=False):
"""
Determine if the given path name has an extension
"""
base = os.path.basename(path_name)
count = base.count(EXT)
if not if_all_ext and base[0] == EXT and count != 0:
count -= 1
if multiple is None:
return count >= 1
elif multiple:
return count > 1
else:
return count == 1
| 5,344,436
|
def index_directory(directory,
labels,
formats,
class_names=None,
shuffle=True,
seed=None,
follow_links=False):
"""Make list of all files in the subdirs of `directory`, with their labels.
Args:
directory: The target directory (string).
labels: Either "inferred"
(labels are generated from the directory structure),
None (no labels),
or a list/tuple of integer labels of the same size as the number of
valid files found in the directory. Labels should be sorted according
to the alphanumeric order of the image file paths
(obtained via `os.walk(directory)` in Python).
formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt").
class_names: Only valid if "labels" is "inferred". This is the explict
list of class names (must match names of subdirectories). Used
to control the order of the classes
(otherwise alphanumerical order is used).
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling.
follow_links: Whether to visits subdirectories pointed to by symlinks.
Returns:
tuple (file_paths, labels, class_names).
file_paths: list of file paths (strings).
labels: list of matching integer labels (same length as file_paths)
class_names: names of the classes corresponding to these labels, in order.
"""
if labels is None:
# in the no-label case, index from the parent directory down.
subdirs = ['']
class_names = subdirs
else:
subdirs = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
subdirs.append(subdir)
if not class_names:
class_names = subdirs
else:
if set(class_names) != set(subdirs):
raise ValueError(
'The `class_names` passed did not match the '
'names of the subdirectories of the target directory. '
'Expected: %s, but received: %s' %
(subdirs, class_names))
class_indices = dict(zip(class_names, range(len(class_names))))
# Build an index of the files
# in the different class subfolders.
pool = multiprocessing.pool.ThreadPool()
results = []
filenames = []
for dirpath in (os.path.join(directory, subdir) for subdir in subdirs):
results.append(
pool.apply_async(index_subdirectory,
(dirpath, class_indices, follow_links, formats)))
labels_list = []
for res in results:
partial_filenames, partial_labels = res.get()
labels_list.append(partial_labels)
filenames += partial_filenames
if labels not in ('inferred', None):
if len(labels) != len(filenames):
raise ValueError('Expected the lengths of `labels` to match the number '
'of files in the target directory. len(labels) is %s '
'while we found %s files in %s.' % (
len(labels), len(filenames), directory))
else:
i = 0
labels = np.zeros((len(filenames),), dtype='int32')
for partial_labels in labels_list:
labels[i:i + len(partial_labels)] = partial_labels
i += len(partial_labels)
if labels is None:
print('Found %d files.' % (len(filenames),))
else:
print('Found %d files belonging to %d classes.' %
(len(filenames), len(class_names)))
pool.close()
pool.join()
file_paths = [os.path.join(directory, fname) for fname in filenames]
if shuffle:
# Shuffle globally to erase macro-structure
if seed is None:
seed = np.random.randint(1e6)
rng = np.random.RandomState(seed)
rng.shuffle(file_paths)
rng = np.random.RandomState(seed)
rng.shuffle(labels)
return file_paths, labels, class_names
| 5,344,437
|
def execute_actions(protocol, device, actions):
"""
iterate through the actions and call the execute method at the Protocol class
:param protocol:
:param device:
:param actions:
:return:
"""
for action in actions:
protocol.execute(device, action)
| 5,344,438
|
def test_cray_config_unset(cli_runner):
""" Test `cray init` for creating the default configuration """
runner, cli, opts = cli_runner
config = opts['default']
configname = config['configname']
filep = '.config/cray/configurations/{}'.format(configname)
with open(filep, encoding='utf-8') as f:
original_data = toml.load(f)
result = runner.invoke(cli, ['config', 'unset', 'auth.login.username'])
assert result.exit_code == 0
assert os.path.isfile(filep)
with open(filep, encoding='utf-8') as f:
data = toml.load(f)
assert data != original_data
assert data['auth']['login'].get('username') is None
| 5,344,439
|
def bugprint(content: object):
"""
Only prints message if in debug mode
:type content: str
:param content: the string to print
"""
if LOG is not None and LOG == 1:
print(content)
| 5,344,440
|
def colors_from_cmap(length=50, cmap=None, start=None, stop=None):
"""Return color cycle from a given colormap.
Parameters
----------
length : int
The number of colors in the cycle. When `length` is large (> ~10), it
is difficult to distinguish between successive lines because successive
colors are very similar.
cmap : str
Name of a matplotlib colormap (see matplotlib.pyplot.cm).
start, stop: 0 <= float <= 1
Limit colormap to this range (start < stop 1). You should limit the
range of colormaps with light values (assuming a white background).
Some colors have default start/stop values (see `CMAP_RANGE`).
Returns
-------
colors : list
List of RGBA colors.
See Also
--------
cycle_cmap
"""
if cmap is None:
cmap = config['color']['cmap']
if isinstance(cmap, basestring):
cmap = getattr(plt.cm, cmap)
crange = CMAP_RANGE.get(cmap.name, (0, 1))
if start is not None:
crange[0] = start
if stop is not None:
crange[1] = stop
assert 0 <= crange[0] <= 1
assert 0 <= crange[1] <= 1
idx = np.linspace(crange[0], crange[1], num=length)
return cmap(idx)
| 5,344,441
|
def mag_thresh(img, sobel_kernel=3, mag_thresh=(30, 100)):
"""
Return the magnitude of the gradient
for a given sobel kernel size and threshold values
"""
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the gradient magnitude
mag_sobel = np.sqrt(sobelx**2 + sobely**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255*mag_sobel/np.max(mag_sobel))
# 5) Create a binary mask where mag thresholds are met, zeros otherwise
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= mag_thresh[0] ) & (scaled_sobel <= mag_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
| 5,344,442
|
def mean_absolute_percentage_error(predictions, targets):
"""Calculate mean absolute percentage error"""
mask = (targets != 0.0)
return (np.fabs(targets - predictions)/targets)[mask].mean()*100.0
| 5,344,443
|
def make_net_xds_list(data_xds_list, coords_per_xds):
"""Construct a list of dicts of xarray.Datasets to house the net gains.
Args:
data_xds_list: A List of xarray.Dataset objects containing MS data.
coords_per_xds: A List of Dicts containing dataset coords.
Returns:
net_gain_xds_list: A List of xarray.Dataset objects to house
the net gains.
"""
net_gain_xds_list = []
for data_xds, xds_coords in zip(data_xds_list, coords_per_xds):
net_t_chunks = np.tile(data_xds.UTIME_CHUNKS, 2).reshape(2, -1)
net_f_chunks = np.tile(data_xds.chunks["chan"], 2).reshape(2, -1)
# Create a default config object, consistent with the net gain.
# NOTE: If we have a direction-dependent model, assume the net gain
# is also direction dependent.
config = Gain(direction_dependent=bool(data_xds.dims["dir"]))
net_obj = TERM_TYPES["complex"]("NET",
config,
data_xds,
xds_coords,
net_t_chunks,
net_f_chunks)
net_gain_xds_list.append(net_obj.make_xds())
return net_gain_xds_list
| 5,344,444
|
def sort_dict(d, key=None, reverse=False):
"""
Sorts a dict by value.
Args:
d: Input dictionary
key: Function which takes an tuple (key, object) and returns a value to
compare and sort by. By default, the function compares the values
of the dict i.e. key = lambda t : t[1]
reverse: Allows to reverse sort order.
Returns:
OrderedDict object whose keys are ordered according to their value.
"""
kv_items = list(d.items())
# Sort kv_items according to key.
if key is None:
kv_items.sort(key=lambda t: t[1], reverse=reverse)
else:
kv_items.sort(key=key, reverse=reverse)
# Build ordered dict.
return collections.OrderedDict(kv_items)
| 5,344,445
|
def check_builds():
"""Base task"""
response = requests.get(
url=urljoin(Config.SISENSE_URL, "v2/builds"), headers=Config.SISENSE_HEADERS
)
builds = pd.DataFrame(data=response.json())
failed_builds = builds.loc[(builds.status == "failed")]
# for each failed cube:
for build in failed_builds.to_dict(orient="records"):
# check if failed cube is already recorded (oid), if not record
recorded_failure = (
session.query(FailedBuilds).filter(FailedBuilds.oid == build["oid"]).first()
)
if recorded_failure is None:
# record
record_failure(
build["oid"],
build["datamodelId"],
build["datamodelTitle"],
build["instanceId"],
)
# save log and get elements for log card
error_dict = get_logs(build["datamodelId"], build["datamodelTitle"])
# prepare card (so look into log)
card = make_teams_card(
build["datamodelTitle"],
error_dict["timestamp"],
error_dict["error_message"],
error_dict["file_link"],
)
# send card
send_teams_card(card)
return error_dict
| 5,344,446
|
def comp_neworig(tileid,dirn='/global/cfs/cdirs/desi/survey/catalogs/testfiberassign/SV3rerun/orig/'):
"""
check that new matches the original
"""
ts = str(tileid).zfill(6)
fa = fitsio.read('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz')
fn = fitsio.read(dirn+'fba-'+ts+'.fits')
w = fn['DEVICE_TYPE'] == 'POS'
fn = fn[w]
wn = fn['TARGETID'] >= 0
fn = fn[wn]
print(len(fn))
wa = fa['TARGETID'] >= 0
fa = fa[wa]
print(len(fa))
ws = np.isin(fn['TARGETID'],fa['TARGETID'])
print(np.sum(ws))
if np.sum(ws) == len(fa) and len(fa) == len(fn):
return True
else:
return False
| 5,344,447
|
def create_cluster(config):
"""Create a PostgreSQL cluster for verification.
Once the cluster is initialized, access should be strictly controlled.
The postgresql.conf and pg_hba.conf should be set, and a random
password set for the postgres user.
"""
cluster_path = os.path.join(config['working_path'], 'pgsql_cluster')
initdb = os.path.join(config['postgresql']['bin_path'], 'initdb')
password_file = os.path.join(config['working_path'], '.password')
password = str(uuid.uuid4())
pathlib.Path(password_file).touch(mode=0o600, exist_ok=False)
with open(password_file, 'w') as pw_fh:
pw_fh.write('{}\n'.format(password))
args = '{} --auth-host=reject --auth-local=md5 --username=postgres --pwfile={} --pgdata={}'.format(
initdb, password_file, cluster_path
)
subprocess.run(args, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with open(password_file, 'w') as pw_fh:
pw_fh.write('*:*:*:*:{}\n'.format(password))
# Set the UNIX socket directory to be used.
socket_path = os.path.join(config['working_path'], 'sockets')
os.makedirs(socket_path, exist_ok=True)
with open(os.path.join(cluster_path, 'postgresql.conf'), 'a') as conf:
conf.write('# Custom settings.\n')
conf.write("listen_addresses = ''\n")
conf.write("unix_socket_directories = '{}'\n".format(socket_path))
| 5,344,448
|
def _get_widget_handler(webmanager):
"""
Returns a handler to get the widgets
:param WebManager webmanager:
:return tornado.web.RequestHandler:
"""
class WidgetHandler(web.RequestHandler):
"""
Handler for all communications over WebSockets
"""
def get(self):
"""
Called when a client connection is closed
"""
webmanager.on_get_widgets(self)
return WidgetHandler
| 5,344,449
|
def get_model_field_type(model, field_label):
"""
Returns model's field type.
"""
return FIELD_TYPES_MAPPING.get(type(get_model_field(model, field_label)), 'STRING')
| 5,344,450
|
def test_yt32():
"""
Testing permutations with Young tableaux without the shape [2,2]
Expected answer is
25 mesh patterns (some classical)
Suffices to look at permutations up to length 6
"""
A = read_bisc_file(ppf + "yt_perm_avoids_32_good_len8")
B = read_bisc_file(ppf + "yt_perm_avoids_32_bad_len8")
# Too short
assert bisc(A, 4, 4) == {}
assert bisc(A, 5, 5) == {
5: {
Perm((0, 2, 1, 4, 3)): [set()],
Perm((0, 2, 4, 1, 3)): [set()],
Perm((0, 3, 1, 4, 2)): [set()],
Perm((0, 3, 4, 1, 2)): [set()],
Perm((1, 0, 2, 4, 3)): [set()],
Perm((1, 0, 3, 2, 4)): [set()],
Perm((1, 0, 3, 4, 2)): [set()],
Perm((1, 0, 4, 2, 3)): [set()],
Perm((1, 2, 0, 4, 3)): [set()],
Perm((1, 2, 4, 0, 3)): [set()],
Perm((1, 3, 0, 2, 4)): [set()],
Perm((1, 3, 0, 4, 2)): [set()],
Perm((1, 3, 4, 0, 2)): [set()],
Perm((1, 4, 0, 2, 3)): [set()],
Perm((2, 0, 1, 4, 3)): [set()],
Perm((2, 0, 3, 1, 4)): [set()],
Perm((2, 0, 3, 4, 1)): [set()],
Perm((2, 0, 4, 1, 3)): [set()],
Perm((2, 3, 0, 1, 4)): [set()],
Perm((2, 3, 0, 4, 1)): [set()],
Perm((2, 3, 4, 0, 1)): [set()],
Perm((2, 4, 0, 1, 3)): [set()],
Perm((3, 0, 1, 4, 2)): [set()],
Perm((3, 0, 4, 1, 2)): [set()],
Perm((3, 4, 0, 1, 2)): [set()],
}
}
# Should get expected answer
assert bisc(A, 5, 6) == {
5: {
Perm((0, 2, 1, 4, 3)): [set()],
Perm((0, 2, 4, 1, 3)): [{(3, 3)}],
Perm((0, 3, 1, 4, 2)): [{(3, 3)}],
Perm((0, 3, 4, 1, 2)): [set()],
Perm((1, 0, 2, 4, 3)): [set()],
Perm((1, 0, 3, 2, 4)): [set()],
Perm((1, 0, 3, 4, 2)): [set()],
Perm((1, 0, 4, 2, 3)): [set()],
Perm((1, 2, 0, 4, 3)): [set()],
Perm((1, 2, 4, 0, 3)): [{(3, 3)}],
Perm((1, 3, 0, 2, 4)): [{(2, 2)}],
Perm((1, 3, 0, 4, 2)): [set()],
Perm((1, 3, 4, 0, 2)): [set()],
Perm((1, 4, 0, 2, 3)): [{(2, 2)}],
Perm((2, 0, 1, 4, 3)): [set()],
Perm((2, 0, 3, 1, 4)): [{(2, 2)}],
Perm((2, 0, 3, 4, 1)): [{(2, 2)}],
Perm((2, 0, 4, 1, 3)): [set()],
Perm((2, 3, 0, 1, 4)): [set()],
Perm((2, 3, 0, 4, 1)): [set()],
Perm((2, 3, 4, 0, 1)): [set()],
Perm((2, 4, 0, 1, 3)): [set()],
Perm((3, 0, 1, 4, 2)): [{(3, 3)}],
Perm((3, 0, 4, 1, 2)): [set()],
Perm((3, 4, 0, 1, 2)): [set()],
}
}
assert bisc(A, 5, 7) == {
5: {
Perm((0, 2, 1, 4, 3)): [set()],
Perm((0, 2, 4, 1, 3)): [{(3, 3)}],
Perm((0, 3, 1, 4, 2)): [{(3, 3)}],
Perm((0, 3, 4, 1, 2)): [set()],
Perm((1, 0, 2, 4, 3)): [set()],
Perm((1, 0, 3, 2, 4)): [set()],
Perm((1, 0, 3, 4, 2)): [set()],
Perm((1, 0, 4, 2, 3)): [set()],
Perm((1, 2, 0, 4, 3)): [set()],
Perm((1, 2, 4, 0, 3)): [{(3, 3)}],
Perm((1, 3, 0, 2, 4)): [{(2, 2)}],
Perm((1, 3, 0, 4, 2)): [set()],
Perm((1, 3, 4, 0, 2)): [set()],
Perm((1, 4, 0, 2, 3)): [{(2, 2)}],
Perm((2, 0, 1, 4, 3)): [set()],
Perm((2, 0, 3, 1, 4)): [{(2, 2)}],
Perm((2, 0, 3, 4, 1)): [{(2, 2)}],
Perm((2, 0, 4, 1, 3)): [set()],
Perm((2, 3, 0, 1, 4)): [set()],
Perm((2, 3, 0, 4, 1)): [set()],
Perm((2, 3, 4, 0, 1)): [set()],
Perm((2, 4, 0, 1, 3)): [set()],
Perm((3, 0, 1, 4, 2)): [{(3, 3)}],
Perm((3, 0, 4, 1, 2)): [set()],
Perm((3, 4, 0, 1, 2)): [set()],
}
}
# Looking for longer patterns
assert bisc(A, 6, 7) == {
5: {
Perm((0, 2, 1, 4, 3)): [set()],
Perm((0, 2, 4, 1, 3)): [{(3, 3)}],
Perm((0, 3, 1, 4, 2)): [{(3, 3)}],
Perm((0, 3, 4, 1, 2)): [set()],
Perm((1, 0, 2, 4, 3)): [set()],
Perm((1, 0, 3, 2, 4)): [set()],
Perm((1, 0, 3, 4, 2)): [set()],
Perm((1, 0, 4, 2, 3)): [set()],
Perm((1, 2, 0, 4, 3)): [set()],
Perm((1, 2, 4, 0, 3)): [{(3, 3)}],
Perm((1, 3, 0, 2, 4)): [{(2, 2)}],
Perm((1, 3, 0, 4, 2)): [set()],
Perm((1, 3, 4, 0, 2)): [set()],
Perm((1, 4, 0, 2, 3)): [{(2, 2)}],
Perm((2, 0, 1, 4, 3)): [set()],
Perm((2, 0, 3, 1, 4)): [{(2, 2)}],
Perm((2, 0, 3, 4, 1)): [{(2, 2)}],
Perm((2, 0, 4, 1, 3)): [set()],
Perm((2, 3, 0, 1, 4)): [set()],
Perm((2, 3, 0, 4, 1)): [set()],
Perm((2, 3, 4, 0, 1)): [set()],
Perm((2, 4, 0, 1, 3)): [set()],
Perm((3, 0, 1, 4, 2)): [{(3, 3)}],
Perm((3, 0, 4, 1, 2)): [set()],
Perm((3, 4, 0, 1, 2)): [set()],
},
6: {Perm((2, 5, 0, 3, 4, 1)): [set()], Perm((4, 1, 2, 5, 0, 3)): [set()]},
}
SG = bisc(A, 6, 8)
assert SG == {
5: {
Perm((0, 2, 1, 4, 3)): [set()],
Perm((0, 2, 4, 1, 3)): [{(3, 3)}],
Perm((0, 3, 1, 4, 2)): [{(3, 3)}],
Perm((0, 3, 4, 1, 2)): [set()],
Perm((1, 0, 2, 4, 3)): [set()],
Perm((1, 0, 3, 2, 4)): [set()],
Perm((1, 0, 3, 4, 2)): [set()],
Perm((1, 0, 4, 2, 3)): [set()],
Perm((1, 2, 0, 4, 3)): [set()],
Perm((1, 2, 4, 0, 3)): [{(3, 3)}],
Perm((1, 3, 0, 2, 4)): [{(2, 2)}],
Perm((1, 3, 0, 4, 2)): [set()],
Perm((1, 3, 4, 0, 2)): [set()],
Perm((1, 4, 0, 2, 3)): [{(2, 2)}],
Perm((2, 0, 1, 4, 3)): [set()],
Perm((2, 0, 3, 1, 4)): [{(2, 2)}],
Perm((2, 0, 3, 4, 1)): [{(2, 2)}],
Perm((2, 0, 4, 1, 3)): [set()],
Perm((2, 3, 0, 1, 4)): [set()],
Perm((2, 3, 0, 4, 1)): [set()],
Perm((2, 3, 4, 0, 1)): [set()],
Perm((2, 4, 0, 1, 3)): [set()],
Perm((3, 0, 1, 4, 2)): [{(3, 3)}],
Perm((3, 0, 4, 1, 2)): [set()],
Perm((3, 4, 0, 1, 2)): [set()],
},
6: {Perm((2, 5, 0, 3, 4, 1)): [set()], Perm((4, 1, 2, 5, 0, 3)): [set()]},
}
assert patterns_suffice_for_bad(SG, 7, B) == (True, [])
assert run_clean_up(SG, B, limit_monitors=25) == (
[
[
(5, 0, 0),
(5, 1, 0),
(5, 2, 0),
(5, 3, 0),
(5, 4, 0),
(5, 5, 0),
(5, 6, 0),
(5, 7, 0),
(5, 8, 0),
(5, 9, 0),
(5, 10, 0),
(5, 11, 0),
(5, 12, 0),
(5, 13, 0),
(5, 14, 0),
(5, 15, 0),
(5, 16, 0),
(5, 17, 0),
(5, 18, 0),
(5, 19, 0),
(5, 20, 0),
(5, 21, 0),
(5, 22, 0),
(5, 23, 0),
(5, 24, 0),
]
],
{
(5, 0, 0): (Perm((0, 2, 1, 4, 3)), set()),
(5, 1, 0): (Perm((0, 2, 4, 1, 3)), {(3, 3)}),
(5, 2, 0): (Perm((0, 3, 1, 4, 2)), {(3, 3)}),
(5, 3, 0): (Perm((0, 3, 4, 1, 2)), set()),
(5, 4, 0): (Perm((1, 0, 2, 4, 3)), set()),
(5, 5, 0): (Perm((1, 0, 3, 2, 4)), set()),
(5, 6, 0): (Perm((1, 0, 3, 4, 2)), set()),
(5, 7, 0): (Perm((1, 0, 4, 2, 3)), set()),
(5, 8, 0): (Perm((1, 2, 0, 4, 3)), set()),
(5, 9, 0): (Perm((1, 2, 4, 0, 3)), {(3, 3)}),
(5, 10, 0): (Perm((1, 3, 0, 2, 4)), {(2, 2)}),
(5, 11, 0): (Perm((1, 3, 0, 4, 2)), set()),
(5, 12, 0): (Perm((1, 3, 4, 0, 2)), set()),
(5, 13, 0): (Perm((1, 4, 0, 2, 3)), {(2, 2)}),
(5, 14, 0): (Perm((2, 0, 1, 4, 3)), set()),
(5, 15, 0): (Perm((2, 0, 3, 1, 4)), {(2, 2)}),
(5, 16, 0): (Perm((2, 0, 3, 4, 1)), {(2, 2)}),
(5, 17, 0): (Perm((2, 0, 4, 1, 3)), set()),
(5, 18, 0): (Perm((2, 3, 0, 1, 4)), set()),
(5, 19, 0): (Perm((2, 3, 0, 4, 1)), set()),
(5, 20, 0): (Perm((2, 3, 4, 0, 1)), set()),
(5, 21, 0): (Perm((2, 4, 0, 1, 3)), set()),
(5, 22, 0): (Perm((3, 0, 1, 4, 2)), {(3, 3)}),
(5, 23, 0): (Perm((3, 0, 4, 1, 2)), set()),
(5, 24, 0): (Perm((3, 4, 0, 1, 2)), set()),
(6, 0, 0): (Perm((2, 5, 0, 3, 4, 1)), set()),
(6, 1, 0): (Perm((4, 1, 2, 5, 0, 3)), set()),
},
)
| 5,344,451
|
def touch(path):
"""Creates an empty file at the specified path."""
perms = (os.O_WRONLY | os.O_CREAT | os.O_NONBLOCK | os.O_NOCTTY)
fd = None
try:
fd = os.open(path, perms)
os.utime(path, None)
finally:
if fd is not None:
os.close(fd)
| 5,344,452
|
def prepare_elements_default_wallet(my_node):
"""this will collect the free coins we have created with -initialfreecoins=2100000000000000
and transfer them to the default-wallet
"""
rpc = my_node.rpcconn.get_rpc()
wallet = rpc.wallet("")
freehash = rpc.getblockhash(0)
freetxid = rpc.getblock(freehash)["tx"][1]
logger.debug(f"freetxid: {freetxid}")
if rpc.gettxout(freetxid, 0): # unspent!
tx = rpc.getrawtransaction(freetxid, 1)
fee = 1000e-8
value = round(tx["vout"][0]["value"] - fee, 8)
addr = wallet.getnewaddress()
unconfidential = wallet.getaddressinfo(addr)["unconfidential"]
rawtx = wallet.createrawtransaction(
[{"txid": freetxid, "vout": 0}], # inputs
[{unconfidential: value}, {"fee": fee}],
)
wallet.sendrawtransaction(rawtx)
rpc.generatetoaddress(101, unconfidential)
| 5,344,453
|
def test_set_nonexisting_working_dir(known_dirs):
"""Working dir not added to sys paths if not exist."""
p = '/arb/path'
assert sys.path == ['arb']
moduleloader.set_working_directory(p)
assert sys.path == ['arb']
assert moduleloader._known_dirs == {p}
| 5,344,454
|
def Normalize_Column_Scores(df, columns, norm_type = 'divide_by_max'):
"""Normalizes scores for specified columns in a pandas dataframe
Parameters
----------
df : a pandas DataFrame object that contains the specified columns
columns: a list object that includes the columns to normalize
norm_type : a string specifying the type of normalization to perform
- 'divide_by_max' divides all values by the maximum value
- 'range_norm' divides all values (+ the min) by the range of values in the column
- 'z_norm' computes a z-score based on the mean and standard deviation of values
- 'divide_by_sum' divides all values by the sum of the values
- 'vector' dives all values by the square root of the sum of the squares of all values
Yields
------
temp_df: a copy of the passed dataframe with the normalizations performed
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> import mcdm_functions as mcfunc
>>> data_dict = {'Product': ['A', 'B', 'C', 'D'],
'Product Advantage': [13.1,13.2,12.2,13.2],
'Strategic Alignment': [9.8,8.2,10.0,9.6],
'Technical Feasibility': [20.0,18.7,18.5,17.1],
'Market Attractiveness': [15.5,12.3,13.1,13.1]}
>>> score_data = pd.DataFrame(data_dict)
>>> score_data = score_data.set_index('Product')
>>> print(score_data)
Market Attractiveness Product Advantage Strategic Alignment \
Product
A 15.5 13.1 9.8
B 12.3 13.2 8.2
C 13.1 12.2 10.0
D 13.1 13.2 9.6
Technical Feasibility
Product
A 20.0
B 18.7
C 18.5
D 17.1
>>> columns = ['Market Attractiveness','Product Advantage']
>>> temp = mcfunc.Normalize_Column_Scores(score_data,columns)
>>> print(temp)
Market Attractiveness Product Advantage Strategic Alignment \
Product
A 1.000000 13.1 9.8
B 0.793548 13.2 8.2
C 0.845161 12.2 10.0
D 0.845161 13.2 9.6
Technical Feasibility
Product
A 20.0
B 18.7
C 18.5
D 17.1
"""
temp_df = df.copy()
for column in columns:
if norm_type is 'divide_by_max':
max_entry = temp_df[column].max()
temp_df[column] = temp_df[column]/max_entry
elif norm_type is 'range_norm':
min_entry = temp_df[column].min()
max_entry = temp_df[column].max()
temp_df[column] = (temp_df[column]-min_entry)/(max_entry - min_entry)
elif norm_type is 'z_norm':
mean = temp_df[column].mean()
sd = temp_df[column].std()
temp_df[column] = (temp_df[column]-mean)/sd
elif norm_type is 'divide_by_sum':
temp_df[column] = temp_df[column]/temp_df[column].sum()
elif norm_type is 'vector':
values = temp_df[column].values
values_squared = values**2
vector_norm = values/np.sqrt(np.sum(values_squared))
temp_df[column] = vector_norm
else:
print('You did not enter a valid type, so no changes were made')
return temp_df
| 5,344,455
|
def custom_pdf_merge(doctype,docid,attach_to_og_doc=False,doc_to_merge={}):
"""
doc_to_merge = {
"dt_to_merge": "", ##doctype on which merge is to be performed
"dt_to_merge_id": "", ##docid on which merge is to be performed
"attach_fieldname": "", ##fieldname of the attach field through which CAD doc is uploaded (Ex:assembly_drawing)
"print_format": "", ##preferred print format of docid
"attach_to_doc": True/False, ##should the merged pdf be attached to dt_to_merge_id
"other_attachments_to_merge": [list of file names] ##list of pdfs attached to dt_to_merge_id that need to be merged along with attach_fieldname
}
"""
doc_to_merge=json.loads(doc_to_merge)
file_path = frappe.utils.get_url()
dir_path_idx = file_path.find('/')+2
dir_path =file_path[dir_path_idx:]
mergeFile = PyPDF2.PdfFileMerger()
final_merged_file = _("/private/files/Merged_{}.pdf").format(doc_to_merge['dt_to_merge_id'])
# Generate pdf of original record
org_pdf = doc_to_merge['dt_to_merge_id'] + ".pdf"
doc_pdf = frappe.attach_print(doc_to_merge['dt_to_merge'], doc_to_merge['dt_to_merge_id'],
str(doc_to_merge['dt_to_merge_id']), print_format=doc_to_merge['print_format'])
docfile = open(org_pdf,"wb")
docfile.write(doc_pdf["fcontent"])
# Append pdf of original record
og_doc_to_merge = PyPDF2.PdfFileReader(org_pdf,'rb')
mergeFile.append(og_doc_to_merge,'rb')
attachment_filename = frappe.get_value(doc_to_merge['dt_to_merge'],
doc_to_merge['dt_to_merge_id'],
doc_to_merge['attach_fieldname'])
idx = attachment_filename.rfind('/')+1
attachment_filename = attachment_filename[idx:]
# Fetch attachment details
attached_doc = frappe.get_all("File",
fields=["name", "file_name", "file_url"] ,
filters = {
"attached_to_name": doc_to_merge['dt_to_merge_id'],
"attached_to_doctype": doc_to_merge['dt_to_merge'],
"file_name":attachment_filename})
if 'other_attachments_to_merge' in doc_to_merge:
other_attachments_str = ",".join(doc_to_merge['other_attachments_to_merge'])
else:
other_attachments_str = ''
other_attached_docs = frappe.get_all("File",
fields=['name','file_name','file_url'],
filters={
"attached_to_name": doc_to_merge['dt_to_merge_id'],
"attached_to_doctype": doc_to_merge['dt_to_merge'],
"file_name":['in',other_attachments_str]
})
old_merged_doc = frappe.get_all("File",
fields=['name','file_name','file_url'],
filters={
"attached_to_name": ['in',(docid,doc_to_merge['dt_to_merge_id'])],
"attached_to_doctype": ['in',(doctype,doc_to_merge['dt_to_merge'])],
"file_name":['like','Merged_'+doc_to_merge['dt_to_merge_id']+'.pdf']
})
# Delete old Merged file
for doc in old_merged_doc:
frappe.delete_doc("File",doc.name)
# Append main attachment to merge file
if attached_doc:
if not attached_doc[0].file_url.startswith('/private'):
url = '/public' + attached_doc[0].file_url
to_merge =PyPDF2.PdfFileReader(dir_path + url)
mergeFile.append(to_merge,'rb')
# Append other attachments to final pdf
for pdfs in other_attached_docs:
if not pdfs.file_url.startswith('/private'):
url = '/public' + pdfs.file_url
to_merge =PyPDF2.PdfFileReader(dir_path + url)
mergeFile.append(to_merge,'rb')
if mergeFile:
mergeFile.write(dir_path + final_merged_file)
mergeFile.close()
file_stats = os.stat(dir_path + final_merged_file)
file_size = file_stats.st_size
if attach_to_og_doc == 1:
merged_file = frappe.get_doc({
"doctype": "File",
"file_name": "Merged_"+doc_to_merge['dt_to_merge_id']+".pdf",
"file_url": final_merged_file,
"attached_to_doctype": doctype,
"attached_to_name": docid,
"file_size":file_size,
"is_private": 1
})
merged_file.insert()
merged_file = frappe.get_doc({
"doctype": "File",
"file_name": "Merged_"+doc_to_merge['dt_to_merge_id']+".pdf",
"file_url":final_merged_file,
"attached_to_doctype": 'BOM',
"attached_to_name": doc_to_merge['dt_to_merge_id'],
"file_size":file_size,
"is_private": 1
})
merged_file.insert()
return {'file_url' : merged_file.file_url,
'attached_to' : merged_file.attached_to_name}
| 5,344,456
|
def test_schema_from_parquet_with_namespace(filename):
"""Test the creation of a new model from a parquet file."""
origin = Path(__file__).parent / "data" / "original" / filename
target_dir = Path(__file__).parent / "data" / "schemas"
namespace = "pydf"
schema = Schema.from_parquet(
str(origin), str(target_dir), namespace=namespace
)
name = f"{filename.split('.')[0].title()}{namespace.title()}"
assert schema["name"] == name
| 5,344,457
|
def find_most_common_word(sentence):
"""Return the most common word in the sentence."""
# Change to lowercase and strip out punctuation
sentence = clean_sentence(sentence)
list_of_words = sentence.split()
word_to_count = dict()
# Create a histogram of the occurrence of all words
for word in list_of_words:
if word not in word_to_count:
word_to_count[word] = 1
else:
word_to_count[word] += 1
most_common_word = ''
highest_count = 0
# Find highest count in the histogram
for word, count in word_to_count.items():
if count > highest_count:
most_common_word, highest_count = word, count
return most_common_word
| 5,344,458
|
def fast_rcnn_inference(boxes, scores, image_shapes, predictions, score_thresh, nms_thresh, topk_per_image):
"""
Call `fast_rcnn_inference_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputLayers.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputLayers.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
fast_rcnn_inference_single_image(
boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image, prediction
)
for scores_per_image, boxes_per_image, image_shape, prediction in zip(scores, boxes, image_shapes, predictions)
]
return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
| 5,344,459
|
def GetVar(doc:NexDoc, varNumber, varType) -> NexVar:
"""Returns the reference to the specified variable."""
return NexRun("GetVar", locals())
| 5,344,460
|
def full_issuance(cfr_title, cfr_part, version):
"""Create a full regulation tree from a notice"""
process_version_if_needed(cfr_title, cfr_part, version)
process_tree_if_needed(cfr_title, cfr_part, version)
| 5,344,461
|
def putversenotes():
"""Serves AJAX call for json data to save notes.
See also [M:NOTESAVE.putVerseNotes][notesave.NOTESAVE.putVerseNotes].
Client code: [{noteverse.sendnotes}][noteversesendnotes]
"""
session.forget(response)
Books = BOOKS()
Note = NOTE(Books)
NoteSave = NOTESAVE(Note)
return NoteSave.putVerseNotes()
| 5,344,462
|
def prepend(list, item):
"""Adds the provided item to the front of the list."""
list.insert(0, item)
| 5,344,463
|
def _valid_multiview_args(cfg_user, logger):
"""
Validates the "multiview" parameters of a json configuration file used for training.
The function returns False if an error has occurred and True if all settings have passed the check.
:param cfg_user: EasyDict, json configuration file imported as dictionary
:param logger: logger instance
:return: boolean, True if no errors have been detected, False otherwise
"""
error = False
# Extract the input channel configuration (use the default setting if not specified by the user)
if 'model' in cfg_user and 'input_channels' in cfg_user.model:
# Use user setting
input_config = cfg_user.model.input_channels
else:
# Use default setting
input_config = cfg_default.model.input_channels
if input_config != 'geom-multiview' and 'multiview' in cfg_user:
logger.warning(f"The argument 'model': 'input_channels' is set to '{input_config}'. Hence, the multiview "
"settings will be ignored.\n")
elif input_config == 'geom-multiview' and 'multiview' in cfg_user:
if not all_keys_known(cfg_user.multiview, arguments.MULTIVIEW_KEYS, logger):
error = True
if 'config' in cfg_user.multiview and cfg_user.multiview.config not in arguments.MULTIVIEW_CONFIG:
logger.error(f"Unknown multiview configuration: '{cfg_user.multiview.config}'. Choose among "
f"{arguments.MULTIVIEW_CONFIG} to specify 'config'.\n")
error = True
if error:
logger.info('\n')
else:
logger.info('Settings check: ok.\n\n')
return not error
| 5,344,464
|
def generate_extension(project=None, outdir=None, *,
_project_from_raw=info.Project.from_raw,
_abspath=os.path.abspath,
_projfiles=None,
_gen=None,
):
"""Produce all needed files to build an extension from the given root."""
project = _project_from_raw(project)
# No need to validate.
if outdir:
outdir = _abspath(outdir)
else:
outdir = os.path.join(project.root, OUT_DIR)
if _projfiles is None:
_projfiles = _get_project_files(project.root)
return (_gen or _generate)(
project.root,
project.cfg,
_projfiles,
outdir,
)
| 5,344,465
|
def apply_gradient_descent(var_list, obj, learning_rate = 0.01):
"""
Sets up the gradient descent optimizer
Args:
var_list: List of variables to optimizer over.
obj: Node of the objective to minimize
Notes:
learning_rate: What learning rate to run with. (Default = ``0.01``) Set with ``LR``
"""
back_prop = tf.train.GradientDescentOptimizer(
learning_rate = learning_rate,
name = 'gradient_descent' ).minimize(loss = obj, \
var_list = var_list )
return back_prop
| 5,344,466
|
def save_to_csv(dealtime,number,vercode, amount):
"""
写入内容到csv
"""
dealtime = dealtime.split(' ')[0] # 切割时间取年月日
number = number + '\t'
vercode = vercode + '\t'
amount = amount[0]
data = (dealtime, number, vercode, amount)
file_path = strftime("jd-assistant_%Y_%m.csv") # 按照当前系统时间创建文件
# 判断文件是否存在
if os.path.exists(file_path):
# 处理数据并写入
with open(file_path, 'a+', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(data)
else:
# 创建文件 处理数据并写入
header = ('下单时间', '订单号', '验证码', '实付金额')
with open(file_path, 'a+', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
with open(file_path, 'a+', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(data)
| 5,344,467
|
def make_cls_accept_cls_annotated_deps(cls: type[T]) -> type[T]:
"""
Make class `cls` accept class-annotated dependencies, performing following modifications:
- Update `__init__` function to set any class-annotated dependencies as instance attributes
- Update `__signature__` attribute to indicate to FastAPI what arguments should be passed to the initializer
"""
old_init: Callable[..., None] = cls.__init__
old_signature = inspect.signature(old_init)
old_params = list(old_signature.parameters.values())[1:] # drop `self` param
new_params = [
param
for param in old_params
if param.kind not in {Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD}
]
dep_names: list[str] = []
for name, hint in get_type_hints(cls).items():
if is_classvar(hint):
continue
dep_names.append(name)
new_params.append(
Parameter(
name=name,
kind=Parameter.KEYWORD_ONLY,
annotation=hint,
default=getattr(cls, name, Ellipsis),
)
)
new_signature = old_signature.replace(parameters=new_params)
def new_init(self: T, *args, **kwargs) -> None:
for dep_name in dep_names:
dep_value = kwargs.pop(dep_name)
setattr(self, dep_name, dep_value)
old_init(self, *args, **kwargs)
setattr(cls, "__init__", new_init)
setattr(cls, "__signature__", new_signature)
return cls
| 5,344,468
|
def getReviewRedirect(entity, params):
"""Returns the redirect to review the specified entity.
"""
return '/%s/review/%s' % (
params['url_name'], entity.key().id_or_name())
| 5,344,469
|
def leapfrog_step(state, target_log_prob_fn, kinetic_energy_fn, step_size, rng=None):
"""Single step of leapfrog.
Notes
=====
The canonical distribution is related to the energy of the system
by
p(p, \theta) = 1/Zexp(-H(\theta, p)/T)
For now, we assume that the kinetic energy takes
the form
K(p) = sum_i(p_i^2/(2m_i))
"""
del rng
p, q, q_grad = state.momentum, state.state, state.state_grads
p_half = tree_util.tree_multimap(lambda p, qg: p + 0.5 * step_size * qg, p, q_grad)
_, grad_p_half = utils.call_fn_value_and_grad(kinetic_energy_fn, p_half)
q_full = tree_util.tree_multimap(lambda q, ph: q + step_size * ph, q, grad_p_half)
logprob, q_full_grad = utils.call_fn_value_and_grad(target_log_prob_fn, q_full)
p_full = tree_util.tree_multimap(lambda ph, qg: ph + 0.5 * step_size * qg, p_half,
q_full_grad)
return IntegratorState(q_full, q_full_grad, logprob, p_full)
| 5,344,470
|
def test_atomic_string_max_length_1_nistxml_sv_iv_atomic_string_max_length_2_4(mode, save_output, output_format):
"""
Type atomic/string is restricted by facet maxLength with value 969.
"""
assert_bindings(
schema="nistData/atomic/string/Schema+Instance/NISTSchema-SV-IV-atomic-string-maxLength-2.xsd",
instance="nistData/atomic/string/Schema+Instance/NISTXML-SV-IV-atomic-string-maxLength-2-4.xml",
class_name="NistschemaSvIvAtomicStringMaxLength2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,344,471
|
def get_plugins_json(url: str = "https://repobee.org/plugins.json") -> dict:
"""Fetch and parse the plugins.json file.
Args:
url: URL to the plugins.json file.
Returns:
A dictionary with the contents of the plugins.json file.
"""
resp = requests.get(url)
if resp.status_code != 200:
plug.log.error(resp.content.decode("utf8"))
raise plug.PlugError(f"could not fetch plugins.json from '{url}'")
return resp.json()
| 5,344,472
|
def add_available_prefixes(parent, prefix_list):
"""
Create fake Prefix objects for all unallocated space within a prefix.
"""
# Find all unallocated space
available_prefixes = IPSet(parent) ^ IPSet([p.prefix for p in prefix_list])
available_prefixes = [Prefix(prefix=p) for p in available_prefixes.iter_cidrs()]
# Concatenate and sort complete list of children
prefix_list = list(prefix_list) + available_prefixes
prefix_list.sort(key=lambda p: p.prefix)
return prefix_list
| 5,344,473
|
def isready() -> bool:
"""Is the embedded R ready for use."""
INITIALIZED = RPY_R_Status.INITIALIZED
return bool(
rpy2_embeddedR_isinitialized == INITIALIZED.value
)
| 5,344,474
|
def yolo2_loss(args, anchors, num_classes, label_smoothing=0, use_crossentropy_loss=False, use_crossentropy_obj_loss=False, rescore_confidence=False):
"""YOLOv2 loss function.
Parameters
----------
yolo_output : tensor
Final convolutional layer features.
true_boxes : tensor
Ground truth boxes tensor with shape [batch, num_true_boxes, 5]
containing box x_center, y_center, width, height, and class.
y_true : array
output of preprocess_true_boxes, with shape [conv_height, conv_width, num_anchors, 6]
anchors : tensor
Anchor boxes for model.
num_classes : int
Number of object classes.
rescore_confidence : bool, default=False
If true then set confidence target to IOU of best predicted box with
the closest matching ground truth box.
Returns
-------
total_loss : float
total mean YOLOv2 loss across minibatch
"""
(yolo_output, true_boxes, y_true) = args
num_anchors = len(anchors)
yolo_output_shape = K.shape(yolo_output)
input_shape = yolo_output_shape[1:3] * 32
object_scale = 5
no_object_scale = 1
class_scale = 1
coordinates_scale = 1
object_mask = y_true[..., 4:5]
pred_xy, pred_wh, pred_confidence, pred_class_prob = yolo2_head(
yolo_output, anchors, num_classes, input_shape)
# Unadjusted box predictions for loss.
# TODO: Remove extra computation shared with yolo2_head.
batch_size = yolo_output_shape[0] # batch size, tensor
batch_size_f = K.cast(batch_size, K.dtype(yolo_output))
feats = K.reshape(yolo_output, [
-1, yolo_output_shape[1], yolo_output_shape[2], num_anchors,
num_classes + 5
])
pred_boxes = K.concatenate(
(K.sigmoid(feats[..., 0:2]), feats[..., 2:4]), axis=-1)
# TODO: Adjust predictions by image width/height for non-square images?
# IOUs may be off due to different aspect ratio.
# Expand pred x,y,w,h to allow comparison with ground truth.
# batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params
pred_xy = K.expand_dims(pred_xy, 4)
pred_wh = K.expand_dims(pred_wh, 4)
pred_wh_half = pred_wh / 2.
pred_mins = pred_xy - pred_wh_half
pred_maxes = pred_xy + pred_wh_half
true_boxes_shape = K.shape(true_boxes)
# batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params
true_boxes = K.reshape(true_boxes, [
true_boxes_shape[0], 1, 1, 1, true_boxes_shape[1], true_boxes_shape[2]
])
true_xy = true_boxes[..., 0:2]
true_wh = true_boxes[..., 2:4]
# Find IOU of each predicted box with each ground truth box.
true_wh_half = true_wh / 2.
true_mins = true_xy - true_wh_half
true_maxes = true_xy + true_wh_half
intersect_mins = K.maximum(pred_mins, true_mins)
intersect_maxes = K.minimum(pred_maxes, true_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = intersect_areas / union_areas
# Best IOUs for each location.
best_ious = K.max(iou_scores, axis=4) # Best IOU scores.
best_ious = K.expand_dims(best_ious)
# A detector has found an object if IOU > thresh for some true box.
object_detections = K.cast(best_ious > 0.6, K.dtype(best_ious))
# TODO: Darknet region training includes extra coordinate loss for early
# training steps to encourage predictions to match anchor priors.
# Determine confidence weights from object and no_object weights.
# NOTE: YOLOv2 does not use binary cross-entropy. Here we try it.
no_object_weights = (no_object_scale * (1 - object_detections) *
(1 - object_mask))
if use_crossentropy_obj_loss:
no_objects_loss = no_object_weights * K.binary_crossentropy(K.zeros(K.shape(pred_confidence)), pred_confidence, from_logits=False)
if rescore_confidence:
objects_loss = (object_scale * object_mask *
K.binary_crossentropy(best_ious, pred_confidence, from_logits=False))
else:
objects_loss = (object_scale * object_mask *
K.binary_crossentropy(K.ones(K.shape(pred_confidence)), pred_confidence, from_logits=False))
else:
no_objects_loss = no_object_weights * K.square(-pred_confidence)
if rescore_confidence:
objects_loss = (object_scale * object_mask *
K.square(best_ious - pred_confidence))
else:
objects_loss = (object_scale * object_mask *
K.square(1 - pred_confidence))
confidence_loss = objects_loss + no_objects_loss
# Classification loss for matching detections.
# NOTE: YOLOv2 does not use categorical cross-entropy loss.
# Here we try it.
matching_classes = K.cast(y_true[..., 5], 'int32')
matching_classes = K.one_hot(matching_classes, num_classes)
if label_smoothing:
matching_classes = _smooth_labels(matching_classes, label_smoothing)
if use_crossentropy_loss:
classification_loss = (class_scale * object_mask *
K.expand_dims(K.categorical_crossentropy(matching_classes, pred_class_prob, from_logits=False), axis=-1))
else:
classification_loss = (class_scale * object_mask *
K.square(matching_classes - pred_class_prob))
# Coordinate loss for matching detection boxes.
matching_boxes = y_true[..., 0:4]
coordinates_loss = (coordinates_scale * object_mask *
K.square(matching_boxes - pred_boxes))
confidence_loss_sum = K.sum(confidence_loss) / batch_size_f
classification_loss_sum = K.sum(classification_loss) / batch_size_f
coordinates_loss_sum = K.sum(coordinates_loss) / batch_size_f
total_loss = 0.5 * (
confidence_loss_sum + classification_loss_sum + coordinates_loss_sum)
# Fit for tf 2.0.0 loss shape
total_loss = K.expand_dims(total_loss, axis=-1)
return total_loss, coordinates_loss_sum, confidence_loss_sum, classification_loss_sum
| 5,344,475
|
def create_compressed_model(model: tf.keras.Model,
config: NNCFConfig,
compression_state: Optional[Dict[str, Any]] = None) \
-> Tuple[CompressionAlgorithmController, tf.keras.Model]:
"""
The main function used to produce a model ready for compression fine-tuning
from an original TensorFlow Keras model and a configuration object.
:param model: The original model. Should have its parameters already loaded
from a checkpoint or another source.
:param config: A configuration object used to determine the exact compression
modifications to be applied to the model.
:param compression_state: compression state to unambiguously restore the compressed model.
Includes builder and controller states. If it is specified, trainable parameter initialization will be skipped
during building.
:return: A tuple (compression_ctrl, compressed_model) where
- compression_ctrl: The controller of the compression algorithm.
- compressed_model: The model with additional modifications
necessary to enable algorithm-specific compression during fine-tuning.
"""
model = get_built_model(model, config)
original_model_accuracy = None
if is_accuracy_aware_training(config, compression_config_passed=True):
if config.has_extra_struct(ModelEvaluationArgs):
evaluation_args = config.get_extra_struct(ModelEvaluationArgs)
original_model_accuracy = evaluation_args.eval_fn(model)
builder = create_compression_algorithm_builder(config, should_init=not compression_state)
if compression_state:
builder.load_state(compression_state[BaseController.BUILDER_STATE])
compressed_model = builder.apply_to(model)
compression_ctrl = builder.build_controller(compressed_model)
compressed_model.original_model_accuracy = original_model_accuracy
if isinstance(compressed_model, tf.keras.Model):
compressed_model.accuracy_aware_fit = types.MethodType(accuracy_aware_fit, compressed_model)
return compression_ctrl, compressed_model
| 5,344,476
|
def load_misc_config():
"""Load misc configuration.
Returns: Misc object for misc config.
"""
return Misc(config.load_config('misc.yaml'))
| 5,344,477
|
async def test_wrong_device_now_has_our_ip(hass: HomeAssistant) -> None:
"""Test setup is retried when the wrong device is found."""
bulb = _mocked_wizlight(None, None, FAKE_SOCKET)
bulb.mac = "dddddddddddd"
_, entry = await async_setup_integration(hass, wizlight=bulb)
assert entry.state is config_entries.ConfigEntryState.SETUP_RETRY
| 5,344,478
|
def filter_activations_remove_neurons(X, neurons_to_remove):
"""
Filter activations so that they do not contain specific neurons.
.. note::
The returned value is a view, so modifying it will modify the original
matrix.
Parameters
----------
X : numpy.ndarray
Numpy Matrix of size [``NUM_TOKENS`` x ``NUM_NEURONS``]. Usually the
output of ``interpretation.utils.create_tensors``
neurons_to_remove : list or numpy.ndarray
List of neurons to remove
Returns
-------
filtered_X : numpy.ndarray view
Numpy Matrix of size [``NUM_TOKENS`` x ``NUM_NEURONS - len(neurons_to_remove)``]
"""
neurons_to_keep = np.arange(X.shape[1])
neurons_to_keep[neurons_to_remove] = -1
neurons_to_keep = np.where(neurons_to_keep != -1)[0]
return X[:, neurons_to_keep]
| 5,344,479
|
def adaptive_threshold(im, block_size, constant, mode=cv2.THRESH_BINARY):
"""
Performs an adaptive threshold on an image
Uses cv2.ADAPTIVE_THRESH_GAUSSIAN_C:
threshold value is the weighted sum of neighbourhood values where
weights are a gaussian window.
Uses cv2.THRESH_BINARY:
Pixels below the threshold set to black
Pixels above the threshold set to white
Parameters
----------
img: numpy array containing an image
block_size: the size of the neighbourhood area
constant: subtracted from the weighted sum
"""
out = cv2.adaptiveThreshold(
im,
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
mode,
block_size,
constant
)
return out
| 5,344,480
|
def get_loss(stochastic, variance_regularizer):
"""Get appropriate loss function for training.
Parameters
----------
stochastic : bool
determines if policy to be learned is deterministic or stochastic
variance_regularizer : float
regularization hyperparameter to penalize high variance policies
Returns
-------
Keras loss function to use for imitation learning.
"""
if stochastic:
return negative_log_likelihood_loss(variance_regularizer)
else:
return tf.keras.losses.mean_squared_error
| 5,344,481
|
def simul_growth_ho_amir(nbstart, run_time, params, name):
"""Simulate the Ho and Amir model (Front. in Microbiol. 2015) with inter-initiation per origin adder and
timer from initiation to division
Parameters
----------
nbstart : int
number of cells to simulate
run_time: int
number of iterations
params: dict
experimental parameters
name: str
name of runs
Returns
-------
cells : list of dict
Each element of the list is a cell cycle defined by a
dictionary of features (Lb, Ld etc.)
"""
#initialize birth length and growth rate
L0 = np.exp(np.random.normal(params['Lb_logn_mu'],params['Lb_logn_sigma'],size=nbstart))
tau = np.exp(np.random.normal(params['tau_logn_mu'], params['tau_logn_sigma'], size=nbstart))
#standard value of growth rate. Used to scale the noise appropriately
normval = np.exp(params['tau_logn_mu'])
#initialize the inter-initiation adder (exact procedure doesn't really matter here)
#as all cells start with n_ori = 1, there's no initiation to division adder running
DLi = np.random.normal(params['DLi_mu'], params['DLi_sigma'], size=nbstart)
#time from initiation to division
tid_mu = 90
tid_var = 5
Tid = np.random.normal(tid_mu, tid_var, size=nbstart)
#initialize cell infos as a list of dictionaries. All cells start with n_ori = 1
cells = {}
for x in range(nbstart):
dict1 = {'Lb': L0[x],'L':L0[x], 'gen': str(x), 'tau':tau[x], 'Lt': [[0,L0[x],1]], 'finish': False,
'born':0, 'DLi': [[0,DLi[x]]],'DLdLi': [],'Li':[],'Ti':[],
'numori':1,'Ld':np.nan, 'numori_born':1,'name': name,'mLi':np.nan,
'mLd':np.nan, 'rfact':0.5, 'Tid': [[0,Tid[x]]]}
cells[str(x)] = dict1
for t in range(run_time):
divide_cell = []
for x in cells:
if cells[x]['finish']==False:
#update cell size
cells[x]['L'] = cells[x]['L']*(2**(1/cells[x]['tau']))
cells[x]['Lt'].append([t,cells[x]['L'],cells[x]['numori']])
#increment the most recent inter-initiation adder
cells[x]['DLi'][-1][0] = cells[x]['DLi'][-1][0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1])
#if at least one volume counter since RI is running, increment all of them
if len(cells[x]['DLdLi'])>0:
cells[x]['DLdLi'] = [[k[0]+(cells[x]['Lt'][-1][1]-cells[x]['Lt'][-2][1]),k[1]] for k in cells[x]['DLdLi']]
cells[x]['Tid'] = [[k[0]+1,k[1]] for k in cells[x]['Tid']]
#if a volume counter has reached its limit divide
if len(cells[x]['DLdLi'])>0:
if (cells[x]['numori']>1) and (cells[x]['Tid'][0][0]>cells[x]['Tid'][0][1]):
cells[x]['finish'] = True#tag cell as finished
cells[x]['Ld'] = cells[x]['L']
cells[x]['Td'] = len(cells[x]['Lt'])
cells[x]['Td_abs'] = t
cells[x]['d_Ld_Lb'] = cells[x]['L']-cells[x]['Lb']
#assign the correct adders (the oldest ones) to the cell that just divided
cells[x]['final_DLdLi'] = cells[x]['DLdLi'][0][0]
cells[x]['final_DLi'] = cells[x]['DLi'][0][1]
cells[x]['final_Li'] = cells[x]['Li'][0]
cells[x]['final_Tid'] = cells[x]['Tid'][0][1]
#for each accumulated variable suppress the oldest one
if len(cells[x]['DLdLi'])==1:
cells[x]['DLdLi'] = []
else:
cells[x]['DLdLi'].pop(0)
if len(cells[x]['Tid'])==1:
cells[x]['Tid'] = []
else:
cells[x]['Tid'].pop(0)
if len(cells[x]['DLi'])==1:
cells[x]['DLi'] = []
else:
cells[x]['DLi'].pop(0)
if len(cells[x]['Li'])==1:
cells[x]['Li'] = []
else:
cells[x]['Li'].pop(0)
divide_cell.append(x)
#if the added volume has reached its limit make new RI
if cells[x]['DLi'][-1][0]>cells[x]['DLi'][-1][1]:
#duplicate origin
cells[x]['numori'] = cells[x]['numori']*2
#Version where adder is noisy itself
newdli = cells[x]['numori']*np.random.normal(params['DLi_mu'], params['DLi_sigma'])
cells[x]['DLi'].append([0,newdli])
cells[x]['Li'].append(cells[x]['L'])
#temporarilly store TL_S as absolute time
cells[x]['Ti'].append(t)
#Version where adder itself is noisy
new_dv = cells[x]['numori']*np.exp(np.random.normal(params['DLdLi_logn_mu'], params['DLdLi_logn_sigma']))
cells[x]['DLdLi'].append([0,new_dv])
cells[x]['Tid'].append([0,np.random.normal(tid_mu, tid_var, size=1)])
for x in divide_cell:
#Draw division ratio
rfact = 1/(1+np.random.normal(1,params['div_ratio']))
#Create new cell using mother information
new_tau = np.exp(correlated_normal(np.log(cells[x]['tau']), params['tau_logn_mu'], params['tau_logn_sigma'], params['tau_corr']))
new_Lb = copy.deepcopy(rfact*cells[x]['L'])
new_L = copy.deepcopy(rfact*cells[x]['L'])
new_Lt = [[t,copy.deepcopy(rfact*cells[x]['L']),copy.deepcopy(cells[x]['numori'])/2]]
new_DLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLi']])
new_DLdLi = copy.deepcopy([[rfact*y[0],rfact*y[1]] for y in cells[x]['DLdLi']])
new_Tid = copy.deepcopy(cells[x]['Tid'])
new_Li = copy.deepcopy([rfact*y for y in cells[x]['Li']])
new_numori = copy.deepcopy(cells[x]['numori'])/2
mother_initL = copy.deepcopy(cells[x]['final_Li'])/2
mother_Ld = copy.deepcopy(cells[x]['Ld'])
dict1 = {'Lb': new_Lb,'L': new_L, 'gen': str(x)+'B', 'tau': new_tau,'Lt': new_Lt, 'finish': False,
'born':t, 'DLi': new_DLi,'DLdLi': new_DLdLi,'Tid': new_Tid, 'Li':new_Li,'Ti':[], 'numori':new_numori,
'numori_born':copy.deepcopy(new_numori),'Ld':np.nan, 'name': name,'mLi': mother_initL, 'mLd':mother_Ld,
'rfact':rfact}
cells[x+'B'] = copy.deepcopy(dict1)
#keep oldest timer as final timer and give daughter remaining ones. Caclulate initiation time based on cell birth.
TL_S_val = copy.deepcopy(cells[x]['Ti'].pop(0))
cells[x+'B']['Ti'] = copy.deepcopy(cells[x]['Ti'])
cells[x]['Ti'] = TL_S_val-copy.deepcopy(cells[x]['born'])
for x in cells:
if len(cells[x]['Li'])>0:
cells[x]['Li'] = np.nan
return cells
| 5,344,482
|
def variable_id(variable):
"""Return variable identification for .dot file"""
if isinstance(variable, FileAccess):
return "a_{}".format(variable.id)
act_id = variable.activation_id
act_id = "global" if act_id == -1 else act_id
return "v_{}_{}".format(act_id, variable.id)
| 5,344,483
|
def node_set_power_state(request, node_id, state, soft=False):
"""Set power state for a given node.
:param request: HTTP request.
:param node_id: The UUID or name of the node.
:param state: the power state to set ['on', 'off', 'reboot'].
:param soft: flag for graceful power 'off' or reboot
:return: node.
http://docs.openstack.org/developer/python-ironicclient/api/ironicclient.v1.node.html#ironicclient.v1.node.NodeManager.set_power_state
"""
return ironicclient(request).node.set_power_state(node_id,
state,
soft)
| 5,344,484
|
def __sympyToC_Grad(exprs: list, doOpts: bool = False) -> str:
""" creates C code from a list of sympy functions (somewhat optimized).
source: https://stackoverflow.com/questions/22665990/optimize-code-generated-by-sympy
and modified """
tmpsyms = sympy.numbered_symbols("tmp")
if doOpts:
symbols, simple = sympy.cse(exprs, symbols=tmpsyms, optimizations="basic", order='none')
else:
symbols, simple = sympy.cse(exprs, symbols=tmpsyms)
c_code = ""
for s in symbols:
c_code += " double " +sympy.ccode(s[0]) + " = " + sympy.ccode(s[1]) + ";\n"
for i,s in enumerate(simple):
c_code += f" out({i}) = " + sympy.ccode(s) + ";\n"
return c_code
| 5,344,485
|
def startServer(mock=True, mockS3=False):
"""
Test cases that communicate with the server should call this
function in their setUpModule() function.
"""
# If the server starts, a database will exist and we can remove it later
dbName = cherrypy.config['database']['uri'].split('/')[-1]
usedDBs[dbName] = True
# By default, this passes "[]" to "plugins", disabling any installed plugins
server = setupServer(mode=ServerMode.TESTING, plugins=enabledPlugins)
if mock:
cherrypy.server.unsubscribe()
cherrypy.engine.start()
# Make server quiet (won't announce start/stop or requests)
cherrypy.config.update({'environment': 'embedded'})
# Log all requests if we asked to do so
if 'cherrypy' in os.environ.get('EXTRADEBUG', '').split():
cherrypy.config.update({'log.screen': True})
logHandler = logging.StreamHandler(sys.stdout)
logHandler.setLevel(logging.DEBUG)
cherrypy.log.error_log.addHandler(logHandler)
# Tell CherryPy to throw exceptions in request handling code
cherrypy.config.update({'request.throw_errors': True})
mockSmtp.start()
if mockS3:
global mockS3Server
mockS3Server = mock_s3.startMockS3Server()
return server
| 5,344,486
|
def get_loans(m='BTC_ETH'):
"""
"""
pass
| 5,344,487
|
def index():
"""User friendly index page at the root of the server
guides the user to the reportss
"""
return render_template('index.html')
| 5,344,488
|
def read_lookup(infile):
"""
-----------------------------------------------------------------------------
Read data from a lookup database.
Inputs:
infile [string] Input file containing the lookup data base.
Outputs:
[tuple] each element of the tuple is a numpy array. The elements in order are
x-coordinates, y-coordinates, data value at those coordiantes. The
data values are real or complex depending on whether the lookup table
has an 'imag_value' column
-----------------------------------------------------------------------------
"""
if not isinstance(infile, str):
raise TypeError('Input parameter infile must be of string data type')
try:
cols = ascii.read(infile, data_start=1, comment='#')
except IOError:
raise IOError('Could not read the specified file: '+infile)
if 'imag_value' in cols.colnames:
return cols['x'].data, cols['y'].data, cols['real_value'].data+1j*cols['imag_value'].data
else:
return cols['x'].data, cols['y'].data, cols['real_value'].data
| 5,344,489
|
def sum_function(context, nodeset, string):
"""
The dyn:sum function calculates the sum for the nodes passed as the first
argument, where the value of each node is calculated dynamically using an
XPath expression passed as a string as the second argument.
http://www.exslt.org/dyn/functions/sum/index.html
"""
nodeset = nodeset.evaluate_as_nodeset(context)
string = string.evaluate_as_string(context)
try:
expr = parse_xpath(string)
except XPathError:
lines = traceback.format_exception(*sys.exc_info())
lines[:1] = [("Syntax error in XPath expression '%(expr)s', "
"lower-level traceback:\n") % {'expr': string}]
context.processor.warning(''.join(lines))
return datatypes.nodeset()
return sum(map(datatypes.number, _map(context, nodeset, expr)))
| 5,344,490
|
def event_detail(request, id):
""" Return a JSON dict mapping for event given id
"""
event = get_object_or_404(Event, pk=id)
event_dict = {
"success": 1,
"result": [{
"id": event.id,
"title": event.title,
"description": event.description,
"created_date": event.created_date.strftime('%Y/%m/%d'),
"location": event.location
}]
}
return HttpResponse(json.dumps(event_dict),
content_type="application/json")
| 5,344,491
|
def main(args):
"""
Main interface
"""
if args.outdir != '' and not os.path.isdir(args.outdir):
os.makedirs(args.outdir)
# load metdata {seq_uuid : taxon}
idx, ntaxa = load_meta(args.metadata)
# load clusters
clst = {}
clst['rrna'] = load_uclust(args.uclust, idx)
clst['cds'] = load_membership(args.membership, idx)
# finding core clusters
core = {}
core['rrna'] = get_core_clusters(clst['rrna'], 'rrna', ntaxa,
args.perc_genomes_rrna,
args.max_clusters_rrna,
args.copies_per_genome_rrna)
core['cds'] = get_core_clusters(clst['cds'], 'cds', ntaxa,
args.perc_genomes_cds,
args.max_clusters_cds,
args.copies_per_genome_cds)
# getting seq ids per core cluster
core_ids = {}
core_ids['rrna'] = get_seq_ids_uclust(args.uclust, core['rrna'])
core_ids['cds'] = get_seq_ids(args.membership, core['cds'])
# parsing sequence
## indexing, then writing sequence files per cluster
genes = Fasta(args.nuc_fasta)
clust_idx = parse_seqs(genes, core_ids, args.outdir, ext='fna')
## indexing, then writing sequence files per cluster
genes = Fasta(args.aa_fasta)
clust_idx = parse_seqs(genes, core_ids, args.outdir, ext='faa', allow_missing=True)
# core gene cluster metadata with cluster info
clust2metadata(core_ids, args.metadata, args.membership, clust_idx, args.outdir)
| 5,344,492
|
def test_backup(dbsession, ini_settings):
"""Execute backup script with having our settings content."""
f = NamedTemporaryFile(delete=False)
temp_fname = f.name
f.close()
ini_settings["websauna.backup_script"] = "websauna.tests:backup_script.bash"
ini_settings["backup_test.filename"] = temp_fname
# We have some scoping issues with the dbsession here, make sure we close transaction at the end of the test
with transaction.manager:
init = get_init(dict(__file__=ini_settings["_ini_file"]), ini_settings)
init.run()
testing.setUp(registry=init.config.registry)
# Check we have faux AWS variable to export
secrets = get_secrets(get_current_registry())
assert "aws.access_key_id" in secrets
try:
# This will run the bash script above
backup_site()
# The result should be generated here
assert os.path.exists(temp_fname)
contents = io.open(temp_fname).read()
# test-secrets.ini, AWS access key
assert contents.strip() == "foo"
finally:
testing.tearDown()
| 5,344,493
|
def prepare_state(qubits: list[cirq.Qid], x: int) -> list[cirq.Gate]:
"""Prepare qubits into an initial state.
Args:
qubits: The qubits to prepare.
x: The initial state of the qubits. Must be non-negative.
Returns:
A list of gates to prepare the qubits.
Raises:
ValueError: If `x` is negative.
"""
gates = list()
if size_in_bits(x) > len(qubits):
logging.warning(f"prepare_state: `x` ({x}) cannot fit into {len(qubits)} qubits; some bits will be dropped.")
for q in qubits:
if x % 2:
gates.append(cirq.X(q))
x >>= 1
return gates
| 5,344,494
|
def manage(id):
"""Manage room request."""
room_request = RoomRequest.query.get(id)
if room_request is None:
return abort(404)
return render_template('room_request/manage.html', room_request=room_request)
| 5,344,495
|
def ger(self, y):
"""Computer an outer product between two vectors"""
assert self.dim() == 1 and y.dim() == 1, "Outer product must be on 1D tensors"
return self.view((-1, 1)).matmul(y.view((1, -1)))
| 5,344,496
|
def test_resolution_norm():
"""Tests resolution gaussian
"""
p = np.array([0., 0., 1., 0., 0., 1.43, 23867.71, 22311.93, 20739.82])
a, b = np.linspace(-1, 1, 501), np.linspace(-1, 1, 501)
q = np.meshgrid(a, b, sparse=True)
y = functions.resolution(p, q)
integ = simps(simps(y, b), a)
assert (abs(integ - 1.) < 1e-5)
| 5,344,497
|
def interpret(input, debug=False):
"""Downloads data and runs commands as per the Arx file."""
if debug:
log.configure(level='debug')
data = yaml.load(input.read())
task = Task(data)
task.run()
| 5,344,498
|
def delete_request(user_id): # удаление запроса
"""
:param user_id: user_id
:return: удаление запроса
"""
global connect
global cursor
cursor.execute("DELETE FROM Requests WHERE toUserID={0}".format(user_id))
connect.commit()
| 5,344,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.