content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def wrapper_configuration_get(): # noqa: E501
"""gets configuration details on the current wrapper configuration
# noqa: E501
:rtype: object
"""
return 'do some magic!' | 5,325,900 |
def controlMotorPID(controller, error, com):
"""send data to ESP32 with desired steps calculated by PID control."""
controller.updateData(error)
ctrl = controller.getControlParam()
sendData(ctrl,com) | 5,325,901 |
def update_qgs():
"""Generate QGIS project files."""
try:
# create ConfigGenerator
generator = config_generator()
qgs_writer_log = generator.write_qgs()
return {
'message': "Finished writing QGIS project files",
'log': qgs_writer_log
}
except Exception as e:
return {
'error': str(e)
} | 5,325,902 |
def time_evolution_derivatives(
hamiltonian: pyquil.paulis.PauliSum,
time: float,
method: str = "Trotter",
trotter_order: int = 1,
) -> Tuple[List[circuits.Circuit], List[float]]:
"""Generates derivative circuits for the time evolution operator defined in
function time_evolution
Args:
hamiltonian: The Hamiltonian to be evolved under. It should contain numeric
coefficients, symbolic expressions aren't supported.
time: time duration of the evolution.
method: time evolution method. Currently the only option is 'Trotter'.
trotter_order: order of Trotter evolution
Returns:
A Circuit simulating time evolution.
"""
if method != "Trotter":
raise ValueError(f"The method {method} is currently not supported.")
single_trotter_derivatives = []
factors = [1.0, -1.0]
output_factors = []
if isinstance(hamiltonian, QubitOperator):
terms = list(hamiltonian.get_operators())
elif isinstance(hamiltonian, pyquil.paulis.PauliSum):
warnings.warn(
"PauliSum as an input to time_evolution_derivatives will be depreciated, "
"please change to QubitOperator instead.",
DeprecationWarning,
)
terms = hamiltonian.terms
for i, term_1 in enumerate(terms):
for factor in factors:
output = circuits.Circuit()
try:
if isinstance(term_1, QubitOperator):
r = list(term_1.terms.values())[0] / trotter_order
else:
r = complex(term_1.coefficient).real / trotter_order
except TypeError:
raise ValueError(
"Term coefficients need to be numerical. "
f"Offending term: {term_1}"
)
output_factors.append(r * factor)
shift = factor * (np.pi / (4.0 * r))
for j, term_2 in enumerate(terms):
output += time_evolution_for_term(
term_2,
(time + shift) / trotter_order if i == j else time / trotter_order,
)
single_trotter_derivatives.append(output)
if trotter_order > 1:
output_circuits = []
final_factors = []
repeated_circuit = time_evolution(
hamiltonian, time, method="Trotter", trotter_order=1
)
for position in range(trotter_order):
for factor, different_circuit in zip(
output_factors, single_trotter_derivatives
):
output_circuits.append(
_generate_circuit_sequence(
repeated_circuit, different_circuit, trotter_order, position
)
)
final_factors.append(factor)
return output_circuits, final_factors
else:
return single_trotter_derivatives, output_factors | 5,325,903 |
def had_cells_strength(strmfunc, min_plev=None, max_plev=None, lat_str=LAT_STR,
lev_str=LEV_STR):
"""Location and signed magnitude of both Hadley cell centers."""
lat = strmfunc[lat_str]
# Sometimes the winter Ferrel cell is stronger than the summer Hadley cell.
# So find the global extremal negative and positive values as well as the
# opposite-signed cell on either side. The Hadley cells will be the two of
# these whose centers are nearest the equator.
cell_pos_max_strength = had_cell_strength(
strmfunc, min_plev=min_plev, max_plev=max_plev, lev_str=lev_str,
)
lat_pos_max = cell_pos_max_strength.coords[lat_str]
cell_south_of_pos_strength = -1*had_cell_strength(
-1*strmfunc.where(lat < lat_pos_max),
min_plev=min_plev, max_plev=max_plev, lev_str=lev_str,
)
cell_north_of_pos_strength = -1*had_cell_strength(
-1*strmfunc.where(lat > lat_pos_max),
min_plev=min_plev, max_plev=max_plev, lev_str=lev_str,
)
cell_neg_max_strength = had_cell_strength(
-1*strmfunc,
min_plev=min_plev, max_plev=max_plev, lev_str=lev_str,
)
lat_neg_max = cell_neg_max_strength.coords[lat_str]
cell_south_of_neg_strength = had_cell_strength(
strmfunc.where(lat < lat_neg_max),
min_plev=min_plev, max_plev=max_plev, lev_str=lev_str,
)
cell_north_of_neg_strength = had_cell_strength(
strmfunc.where(lat > lat_neg_max),
min_plev=min_plev, max_plev=max_plev, lev_str=lev_str,
)
# The above procedure generats 6 cells, of which 2 are duplicates. Now,
# get rid of the duplicates.
strengths = [
cell_pos_max_strength,
cell_south_of_pos_strength,
cell_north_of_pos_strength,
cell_neg_max_strength,
cell_south_of_neg_strength,
cell_north_of_neg_strength,
]
cell_strengths = xr.concat(strengths, dim=lat_str, coords=[lev_str])
dupes = cell_strengths.get_index(LAT_STR).duplicated()
cell_strengths = cell_strengths[~dupes]
# Pick the two cells closest to the equator.
center_lats = cell_strengths[lat_str]
hc_strengths = cell_strengths.sortby(np.abs(center_lats))[:2]
# Order the cells from south to north.
hc_strengths = hc_strengths.sortby(hc_strengths[lat_str])
# Create DataArray with one label for each cell, the cell strengths
# as the values, and the cell center latitudes and levels as coords.
coords_out = {"cell": ["had_cell_sh", "had_cell_nh"]}
ds_strengths = xr.Dataset(coords=coords_out)
arr_lat_center = xr.DataArray(hc_strengths[lat_str].values,
dims=["cell"], coords=coords_out)
arr_lev_center = xr.DataArray(hc_strengths[lev_str].values,
dims=["cell"], coords=coords_out)
arr_strength = xr.DataArray(hc_strengths.values,
dims=["cell"], coords=coords_out)
ds_strengths.coords[lat_str] = arr_lat_center
ds_strengths.coords[lev_str] = arr_lev_center
ds_strengths["cell_strength"] = arr_strength
return ds_strengths["cell_strength"] | 5,325,904 |
def d_enter_waste_cooler(W_mass, rho_waste, w_drift):
"""
Calculates the tube's diameter of enter waste to waste cooler.
Parameters
----------
W_mass : float
The mass flow rate of waste, [kg/s]
rho_waste : float
The density of liquid at boilling temperature, [kg/m**3]
w_drift :float
The speed of steam at the tube, [m/s]
Returns
-------
d_enter_waste_cooler : float
The tube's diameter of enter waste to waste cooler, [m]
References
----------
&&&
"""
return W_mass/(0,785*rho_waste*w_drift) | 5,325,905 |
def install_program():
"""
Installs user data on the system.
Creates the directory and asks for input for ground observer info.
"""
print """
Would you like to allow issTracker to install on your computer?
It will create on your computer:
- a file to save TLE info for your satellites
- a file to save longitude and latitude for your ground station
- a directory within your home directory where these files will be saved
"""
decision = raw_input('Do you want to install issTracker? (y/n): ')
if decision != 'y':
print 'Not installing. Terminating issTracker.'
exit(1)
print '\nInstalling issTracker\n'
# make the directory
if not os.path.isdir(DATA_DIR):
try:
os.mkdir(DATA_DIR)
except OSError:
print 'There was an error installing the program.'
exit(1)
if not os.path.exists(GRND_FILE):
update_grnd()
if not os.path.exists(TLE_FILE):
try:
update_tle()
except (ValueError, urllib2.URLError):
print 'Unable to download a TLE. Check your network connection'
exit(1)
if not os.path.exists(CURRENT_SAT_FILE):
save_current(ISS_FULL_NAME, ISS_NICKNAME)
return | 5,325,906 |
def test_file__init__():
""" Test with __init__.py from fire project:
-------------------------------------------------------------------------------
Language files blank comment code
-------------------------------------------------------------------------------
Python 1 4 14 6
-------------------------------------------------------------------------------
"""
project = Project('data/fire',
3000, 'test-output.docx')
project.run()
assert project.info.lines_extracted == 6
assert project.info.lines_skipped_blank == 4
assert project.info.lines_skipped_comments == 14
return | 5,325,907 |
def error(msg):
"""Write a log message at the ERROR level."""
EXEKALL_LOGGER.error(msg) | 5,325,908 |
def test_bam_compare_arguments():
"""
Test minimal command line args for bamCoverage. The ratio
between the same file is taken, therefore, the expected value
is 1.0 for all bins.
"""
outfile = '/tmp/test_file.bg'
for fname in [BAMFILE_B, CRAMFILE_B]:
args = "--bamfile1 {} --bamfile2 {} " \
"-o {} -p 1 --outFileFormat bedgraph --operation ratio".format(fname, fname, outfile).split()
bam_comp.main(args)
_foo = open(outfile, 'r')
resp = _foo.readlines()
_foo.close()
expected = ['3R\t0\t200\t1\n']
assert_equal(resp, expected)
unlink(outfile) | 5,325,909 |
async def finalize_round(request, persistence):
"""Finalize an owned round."""
game_id = request.match_info['game_id']
round_name = request.match_info['round_name']
user_session = await get_session(request)
if not client_owns_game(game_id, user_session, persistence):
return json_response({'error': 'The user is not the moderator of this game.'}, status=403)
try:
persistence.finalize_round(game_id, round_name)
except NoSuchRound:
return json_response({'error': 'Round does not exist.'}, status=404)
except NoActivePoll:
return json_response({'error': 'There is no active poll in this round.'}, status=404)
except RoundFinalized:
return json_response({'error': 'This round has already been finalized.'}, status=409)
return json_response({'game': persistence.serialize_game(game_id)}) | 5,325,910 |
def update_cart_quantity(cart):
"""Update the total quantity in cart."""
total_lines = cart.lines.aggregate(
total_quantity=Sum('quantity'))['total_quantity']
if not total_lines:
total_lines = 0
cart.quantity = total_lines
cart.save(update_fields=['quantity']) | 5,325,911 |
def create_terms_key_terms_page(page, portal_topic):
"""
Parses the terms & definitions on a given key terms page,
and creates new glossary term snippets out of them.
"""
for block in page.specific.content:
if block.block_type != 'full_width_text':
continue
for child in block.value:
if child.block_type == 'content':
contents = bs(child.value.source, 'html.parser')
headings = contents.find_all('h3')
for h3 in headings:
if h3.text:
h3_contents = bs(h3.contents[0], 'html.parser')
div = h3_contents.find('div')
term = div.text.strip()
definition = ''
sibling = h3.next_sibling
while sibling and sibling.name != 'h3':
if sibling.text:
definition += sibling.encode(formatter="html")
sibling = sibling.next_sibling
glossary_term = GlossaryTerm(
name_en=term,
definition_en=definition,
portal_topic=portal_topic,
anchor_en=slugify(term),
)
glossary_term.save() | 5,325,912 |
def _arrs_to_ds(arrs, names=None):
"""Combine DataArrays into a single Dataset."""
if names is None:
names = [str(n) for n in range(len(arrs))]
return xr.Dataset(data_vars=dict(zip(names, arrs))) | 5,325,913 |
def formatTimeFromNow(secs=0):
""" Properly Format Time that is `x` seconds in the future
:param int secs: Seconds to go in the future (`x>0`) or the
past (`x<0`)
:return: Properly formated time for Graphene (`%Y-%m-%dT%H:%M:%S`)
:rtype: str
"""
return datetime.utcfromtimestamp(time.time() + int(secs)).strftime(timeFormat) | 5,325,914 |
def exitFlow(x, n_classes):
""" Create the exit flow section
x : input to the exit flow section
n_classes : number of output classes
"""
def classifier(x, n_classes):
""" The output classifier
x : input to the classifier
n_classes : number of output classes
"""
# Global Average Pooling will flatten the 10x10 feature maps into 1D
# feature maps
x = GlobalAveragePooling2D()(x)
# Fully connected output layer (classification)
x = Dense(n_classes, activation='softmax')(x)
return x
# Remember the input
shortcut = x
# Strided convolution to double number of filters in identity link to
# match output of residual block for the add operation (projection shortcut)
shortcut = Conv2D(1024, (1, 1), strides=(2, 2), padding='same')(shortcut)
shortcut = BatchNormalization()(shortcut)
# First Depthwise Separable Convolution
# Dimensionality reduction - reduce number of filters
x = SeparableConv2D(728, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Second Depthwise Separable Convolution
# Dimensionality restoration
x = SeparableConv2D(1024, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Create pooled feature maps, reduce size by 75%
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
# Add the projection shortcut to the output of the pooling layer
x = Add()([x, shortcut])
# Third Depthwise Separable Convolution
x = SeparableConv2D(1556, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Fourth Depthwise Separable Convolution
x = SeparableConv2D(2048, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Create classifier section
x = classifier(x, n_classes)
return x | 5,325,915 |
def get_files_for_variable(cmake_path, variables, variable):
""" Returns the path values associated with |variable| and relative to the
|cmake_path| directory. """
if not variable in variables:
raise Exception('Variable %s does not exist' % variable)
# Cmake file directory.
cmake_dirname = os.path.dirname(cmake_path) + '/'
# Return path values relative to the cmake file directory.
# Example 1:
# cmake file = "/path/to/libcef_dll/CMakeLists.txt"
# include path = "/path/to/libcef_dll/wrapper/cef_browser_info_map.h"
# return path = "wrapper/cef_browser_info_map.h"
# Example 2:
# cmake file = "/path/to/libcef_dll/CMakeLists.txt"
# include path = "/path/to/include/internal/cef_export.h"
# return path = "../include/internal/cef_export.h"
new_paths = []
paths = variables[variable]
for path in paths:
abspath = os.path.join(cef_dir, path)
newpath = normalize_path(os.path.relpath(abspath, cmake_dirname))
new_paths.append(newpath)
return new_paths | 5,325,916 |
def SingleChannelDDR4_2400(size: Optional[str] = "1024MB") -> SingleChannel:
"""
A single channel DDR3_2400.
:param size: The size of the memory system. Default value of 1024MB.
"""
return SingleChannel("DDR4_4Gb_x8_2400", size) | 5,325,917 |
def convert_Pa_to_dBSPL(pa):
""" Converts units of Pa to dB re 20e-6 Pa (dB SPL) """
return 20. * np.log10(pa / 20e-6) | 5,325,918 |
def isPalindromic(seq):
"""
is a sequence palindromic?
returns True or False
"""
if rc_expanded(seq.lower()) == seq.lower():
return(True)
return(False) | 5,325,919 |
def get_corpus(data_dir):
"""Get list of words in the text.
Args:
data_dir: data directory.
Returns:
list of str words.
"""
corpus = []
files = os.listdir(data_dir)
for filename in files:
data_path = os.path.join(data_dir, filename)
if not os.path.isfile(data_path):
continue
with open(data_path, 'r') as f:
text = f.read().strip('\n')
corpus.extend(del_useless_char(text))
return corpus | 5,325,920 |
def randomPairsMatch(n_records_A: int, n_records_B: int, sample_size: int) -> IndicesIterator:
"""
Return random combinations of indices for record list A and B
"""
n: int = n_records_A * n_records_B
if not sample_size:
return iter([])
elif sample_size >= n:
random_pairs = numpy.arange(n)
else:
random_pairs = numpy.array(random.sample(range(n), sample_size))
i, j = numpy.unravel_index(random_pairs, (n_records_A, n_records_B))
return zip(i, j) | 5,325,921 |
def GetScaffoldLengths(genome_fna_fp):
""" This function gets the lengths of the scaffolds, returns a dict
Args:
genome_fna_fp: (str) Path to genome fna file (FASTA)
Returns:
Scaffold_To_Length: (dict)
scaffold_name: (str) -> length (int)
"""
Scaffold_To_Length = {}
FNA_FH = open(genome_fna_fp)
c_line = FNA_FH.readline().strip()
c_scaffold_name = ""
while c_line != "":
if c_line[0] == ">":
if c_scaffold_name != "":
Scaffold_To_Length[c_scaffold_name] = cs_len
if " " in c_line:
logging.warning(f"A space found in scaffold name: '{c_line}'."
" This might cause an error.")
c_scaffold_name = (c_line.split(' ')[0])[1:]
logging.warning(f"Instead using scaffold name {c_scaffold_name}")
else:
c_scaffold_name = c_line[1:]
# Current scaffold length is reset
cs_len = 0
else:
cs_len += len(c_line)
c_line = FNA_FH.readline().strip()
FNA_FH.close()
if c_scaffold_name != "":
Scaffold_To_Length[c_scaffold_name] = cs_len
if len(Scaffold_To_Length.keys()) == 0:
logging.warning("No Scaffolds found in " + genome_fna_fp)
return Scaffold_To_Length | 5,325,922 |
def define_properties(definitions, superprop=None):
"""
Define BRICK properties
"""
if len(definitions) == 0:
return
for prop, propdefn in definitions.items():
if isinstance(prop, str):
prop = BRICK[prop]
G.add((prop, A, OWL.ObjectProperty))
if superprop is not None:
G.add((prop, RDFS.subPropertyOf, superprop))
# define property types
prop_types = propdefn.get(A, [])
assert isinstance(prop_types, list)
for prop_type in prop_types:
G.add((prop, A, prop_type))
# define any subproperties
subproperties_def = propdefn.get("subproperties", {})
assert isinstance(subproperties_def, dict)
define_properties(subproperties_def, prop)
# define other properties of the Brick property
for propname, propval in propdefn.items():
# all other key-value pairs in the definition are
# property-object pairs
expected_properties = ["subproperties", A]
other_properties = [
prop for prop in propdefn.keys() if prop not in expected_properties
]
for propname in other_properties:
propval = propdefn[propname]
G.add((prop, propname, propval)) | 5,325,923 |
def insert_series(series):
"""insert series if missing"""
if not frappe.db.get_value('Series', series, 'name', order_by="name"):
frappe.db.sql("insert into tabSeries (name, current) values (%s, 0)", (series)) | 5,325,924 |
def log(message):
"""Convenience function to log a message to the database."""
db.session.add(LogEntry(message=message)) | 5,325,925 |
def approx_version_number():
"""
In the event that git is unavailable and the VERSION file is not present
this returns a "version number" in the following precedence:
- version number from path
downloads of viral-ngs from GitHub tagged releases
are likely to be extracted into directories containing
the version number. If they contain a version number
in the form d.d.d, we can use it
- modification time of this file (unix timestamp)
file modification time for github releases corresponds to
when the release archives were created, a rough way to ballpark
the release date. If we can't get the version number from the path
we can at least use the modification time of this file as a proxy
for the true version number
- the current time (unix timestamp)
the current time is better than not having any version number
"""
version = ""
version_re = re.compile(r"(?:(\d+)\.)?(?:(\d+)\.)?(?:(\d+))")
# path relative to version.py
viral_ngs_path = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# for tagged releases, it is likely the version number is part of
# the viral-ngs root directory name
matches = version_re.search(viral_ngs_path)
if matches and len([n for n in matches.groups() if n]) == 3:
version = ".".join( map(str,matches.groups()) )
else:
try:
mtime = os.path.getmtime(__file__)
except OSError:
mtime = 0
if mtime > 0:
# if we could get the modification time of the current file, use it
version = str(int(mtime))
else:
# just use the current time
version = str(int(time.time()))
return version | 5,325,926 |
def print_skew(
dat: str,
tsv_sam_sum: pd.Series) -> None:
"""
Parameters
----------
dat : str
Dataset name
tsv_sam_sum : pd.Series
Sum of reads per sample
"""
count, division = np.histogram(tsv_sam_sum)
skw = skew(count)
if abs(skw) > 1:
print()
print(' ==> Consider rarefying <==')
print('[%s] Reads-per-sample distribution [skewness=%s] (>1!)' % (
dat, round(abs(float(skw)), 3)))
division_std = np.interp(
count, (min(count), max(count)), (0, 20))
print('\treadsbin\tsamples\thistogram')
for ddx, div in enumerate(division_std):
if div > 1:
print('\t%s\t%s\t%s' % (
format(division[ddx], '6.3E'), count[ddx], '-' * int(div)))
elif div == 0:
print('\t%s\t%s\t%s' % (
format(division[ddx], '6.3E'), count[ddx], ''))
else:
print('\t%s\t%s\t%s' % (
format(division[ddx], '6.3E'), count[ddx], '-')) | 5,325,927 |
def plot(io, key, plottype=None, seedid=None, day=None, prep_kw={},
corrid=None, show=False,
**kwargs):
"""
Plot everything
:param io: |io|
:param key: key of objects to plot, or one of stations, data, prepdata
:param plottype: plot type to use
(non default values are ``'vs_dist'`` and ``'wiggle'`` for
correlation plots, ``'velocity'`` for plots of stretching results)
:param seedid: seed id of a channel (for data or prepdata)
:param day: |UTC| object with day (for data or prepdata)
:param dict prep_kw: options passed to preprocess (for prepdata only)
:param corrid: correlation configuration (for prepdata only)
:param show: show interactive plot
:param \*\*kwargs: all other kwargs are passed to
the corresponding plot function in `~yam.imaging` module
"""
import yam.imaging
path = io['plot']
if not os.path.exists(path):
os.mkdir(path)
if key in ('stations', 'data', 'prepdata'):
pt = key
else:
is_corr = 't' not in _analyze_key(key)
if is_corr and plottype == 'vs_dist':
pt = 'corr_vs_dist'
elif is_corr and plottype == 'wiggle':
pt = 'corr_vs_time_wiggle'
elif is_corr and plottype is None:
pt = 'corr_vs_time'
elif not is_corr and plottype is None:
pt = 'sim_mat'
elif not is_corr and plottype == 'velocity':
pt = 'velocity_change'
else:
raise ParseError('Combination of key and plottype not supported')
kw = kwargs.get('plot_%s_options' % pt, {})
kw.update(kwargs.get('plot_options', {}))
bname = os.path.join(path, pt)
if key == 'stations':
yam.imaging.plot_stations(io['inventory'], bname, **kw)
elif key in ('data', 'prepdata'):
data = load(io, key, do='return', seedid=seedid, day=day,
prep_kw=prep_kw)
fname = bname + '_%s_%s' % (seedid, day)
if key == 'prepdata':
fname = fname + '_c' + corrid
yam.imaging.plot_data(data, fname, show=show, **kw)
else:
plot_ = getattr(yam.imaging, 'plot_' + pt)
if pt == 'corr_vs_dist':
fname2 = _get_fname(io, key)
stream = obspy.read(fname2, 'H5', group=key)
fname = bname + '_' + key.replace('/', '_')
plot_(stream, fname, **kw)
elif pt == 'velocity_change':
results = [res for task, res in _iter_h5(io, key)]
fname = bname + '_' + key.replace('/', '_')
plot_(results, fname, **kw)
else:
for task, res in _iter_h5(io, key):
fname = bname + task.replace('/', '_')
plot_(res, fname, **kw)
if show:
from matplotlib.pyplot import show
show() | 5,325,928 |
def flip_mesh(mesh):
"""
It flips the mesh of a shape.
----------------------------
Args:
mesh (obj: 'base.Trimesh'): The mesh of a shape
Returns:
mesh (obj: 'base.Trimesh'): The flipped mesh of the shape
"""
triangles = np.zeros((3, len(mesh.faces)))
for i, index in enumerate(mesh.faces[1:]):
x, y, z = [], [], []
for num in index:
vertices = mesh.vertices[num]
x.append(vertices[0])
y.append(vertices[1])
z.append(vertices[2])
triangles[0][i] = np.sum(x)/3
triangles[1][i] = np.sum(y)/3
triangles[2][i] = np.sum(z)/3
f_x = calculate_f(triangles[0])
f_y = calculate_f(triangles[1])
f_z = calculate_f(triangles[2])
R = np.array([[np.sign(f_x), 0, 0], [0, np.sign(f_y), 0], [0, 0, np.sign(f_z)]])
mesh.vertices = np.matmul(mesh.vertices, R)
return mesh | 5,325,929 |
def leaky_twice_relu6(x, alpha_low=0.2, alpha_high=0.2, name="leaky_relu6"):
""":func:`leaky_twice_relu6` can be used through its shortcut: :func:`:func:`tl.act.ltrelu6`.
This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
`Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
This function push further the logic by adding `leaky` behaviour both below zero and above six.
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x in [0, 6]: ``f(x) = x``.
- When x > 6: ``f(x) = 6 + (alpha_high * (x-6))``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha_low : float
Slope for x < 0: ``f(x) = alpha_low * x``.
alpha_high : float
Slope for x < 6: ``f(x) = 6 (alpha_high * (x-6))``.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_twice_relu6(x, 0.2, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
- `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
"""
if not isinstance(alpha_high, tf.Tensor) and not (0 < alpha_high <= 1):
raise ValueError("`alpha_high` value must be in [0, 1]`")
if not isinstance(alpha_low, tf.Tensor) and not (0 < alpha_low <= 1):
raise ValueError("`alpha_low` value must be in [0, 1]`")
with tf.name_scope(name, "leaky_twice_relu6") as name_scope:
x = tf.convert_to_tensor(x, name="features")
x_is_above_0 = tf.minimum(x, 6 * (1 - alpha_high) + alpha_high * x)
x_is_below_0 = tf.minimum(alpha_low * x, 0)
return tf.maximum(x_is_above_0, x_is_below_0, name=name_scope) | 5,325,930 |
def fully_connected(inputs,
num_outputs,
scope,
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
""" Fully connected layer with non-linear operation.
Args:
inputs: 2-D tensor BxN
num_outputs: int
Returns:
Variable tensor of size B x num_outputs.
"""
with tf.variable_scope(scope) as sc:
num_input_units = inputs.get_shape()[-1].value
weights = _variable_with_weight_decay('weights',
shape=[num_input_units, num_outputs],
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.matmul(inputs, weights)
biases = tf.get_variable('biases', [num_outputs],
initializer = tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = tf.contrib.layers.batch_norm(outputs, decay = bn_decay, updates_collections = None,
epsilon = 1e-5, scale = True, is_training = is_training,
scope = 'bn')
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs | 5,325,931 |
def _x_orientation_rep_dict(x_orientation):
""""Helper function to create replacement dict based on x_orientation"""
if x_orientation.lower() == 'east' or x_orientation.lower() == 'e':
return {'x': 'e', 'y': 'n'}
elif x_orientation.lower() == 'north' or x_orientation.lower() == 'n':
return {'x': 'n', 'y': 'e'}
else:
raise ValueError('x_orientation not recognized.') | 5,325,932 |
def write_interrupted_test_results_to(filepath, test_start_time):
"""Writes a test results JSON file* to filepath.
This JSON file is formatted to explain that something went wrong.
*src/docs/testing/json_test_results_format.md
Args:
filepath: A path to a file to write the output to.
test_start_time: The start time of the test run expressed as a
floating-point offset in seconds from the UNIX epoch.
"""
with open(filepath, 'w') as fh:
output = {
'interrupted': True,
'num_failures_by_type': {},
'seconds_since_epoch': test_start_time,
'tests': {},
'version': 3,
}
json.dump(output, fh) | 5,325,933 |
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
y_std = y.std()
corr_lst = []
for column in X:
df = X[[column]].copy()
df.insert(1, 'y', y)
c_std = X[column].std()
corr = (df.cov() / (y_std * c_std))[column][1]
corr_lst.append((column, corr))
fig = go.Figure(layout=dict(title=f'The correlation between {column} and prices is {corr}'))
fig.add_trace(go.Scatter(x=X[column], y=y, name=column, mode="markers"))
fig.update_yaxes(title_text='House Prices')
fig.update_xaxes(title_text=column)
fig.write_image(output_path + f'\\{column}.jpg')
# print(corr_lst) | 5,325,934 |
def move_at_objc_to_access_note(access_notes_file, arg, offset, access_note_name):
"""Write an @objc attribute into an access notes file, then return the
string that will replace the attribute and trailing comment."""
access_notes_file.write(u"""
- Name: '{}'
ObjC: true""".format(access_note_name))
if arg:
access_notes_file.write(u"""
ObjCName: '{}'""".format(arg))
# Default to shifting expected diagnostics down 1 line.
if offset is None:
offset = 1
return u"// access-note-adjust" + offsetify(offset) + u" [attr moved] " + \
u"expected-remark{{access note for fancy tests adds attribute 'objc' to " + \
u"this }} expected-note{{add attribute explicitly to silence this warning}}" | 5,325,935 |
def test_idempotent_lambda_with_validator_util(
config_without_jmespath: IdempotencyConfig,
persistence_store: DynamoDBPersistenceLayer,
lambda_apigw_event,
timestamp_future,
serialized_lambda_response,
deserialized_lambda_response,
hashed_idempotency_key_with_envelope,
mock_function,
lambda_context,
):
"""
Test idempotent decorator where event with matching event key has already been succesfully processed, using the
validator utility to unwrap the event
"""
stubber = stub.Stubber(persistence_store.table.meta.client)
ddb_response = {
"Item": {
"id": {"S": hashed_idempotency_key_with_envelope},
"expiration": {"N": timestamp_future},
"data": {"S": serialized_lambda_response},
"status": {"S": "COMPLETED"},
}
}
expected_params = {
"TableName": TABLE_NAME,
"Key": {"id": hashed_idempotency_key_with_envelope},
"ConsistentRead": True,
}
stubber.add_client_error("put_item", "ConditionalCheckFailedException")
stubber.add_response("get_item", ddb_response, expected_params)
stubber.activate()
@validator(envelope=envelopes.API_GATEWAY_HTTP)
@idempotent(config=config_without_jmespath, persistence_store=persistence_store)
def lambda_handler(event, context):
mock_function()
return "shouldn't get here!"
mock_function.assert_not_called()
lambda_resp = lambda_handler(lambda_apigw_event, lambda_context)
assert lambda_resp == deserialized_lambda_response
stubber.assert_no_pending_responses()
stubber.deactivate() | 5,325,936 |
def load_pdbbind_fragment_coordinates(frag1_num_atoms,
frag2_num_atoms,
complex_num_atoms,
max_num_neighbors,
neighbor_cutoff,
pdbbind_dir,
base_dir,
datafile="INDEX_core_data.2013"):
"""Featurize PDBBind dataset.
Parameters
----------
frag1_num_atoms: int
Maximum number of atoms in fragment 1.
frag2_num_atoms: int
Maximum number of atoms in fragment 2.
complex_num_atoms: int
Maximum number of atoms in complex.
max_num_neighbors: int
Maximum number of neighbors per atom.
neighbor_cutoff: float
Interaction cutoff [Angstrom].
pdbbind_dir: str
Location of PDBbind datafile.
base_dir: str
Location for storing featurized dataset.
datafile: str
Name of PDBbind datafile, optional (Default "INDEX_core_data.2013").
Returns
-------
tasks: list
PDBbind tasks.
dataset: dc.data.DiskDataset
PDBbind featurized dataset.
transformers: list
dc.trans.NLP objects.
"""
# Create some directories for analysis
# The base_dir holds the results of all analysis
if not reload:
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
current_dir = os.path.dirname(os.path.realpath(__file__))
#Make directories to store the raw and featurized datasets.
data_dir = os.path.join(base_dir, "dataset")
# Load PDBBind dataset
labels_file = os.path.join(pdbbind_dir, datafile)
tasks = ["-logKd/Ki"]
print("About to load contents.")
contents_df = load_pdbbind_labels(labels_file)
ids = contents_df["PDB code"].values
y = np.array([float(val) for val in contents_df["-logKd/Ki"].values])
# Define featurizers
featurizer = ComplexNeighborListFragmentAtomicCoordinates(
frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors,
neighbor_cutoff)
w = np.ones_like(y)
#Currently featurizes with shard_size=1
#Dataset can be reshard: dataset = dataset.reshard(48) for example
def shard_generator():
for ind, pdb_code in enumerate(ids):
print("Processing %s" % str(pdb_code))
pdb_subdir = os.path.join(pdbbind_dir, pdb_code)
computed_feature = compute_pdbbind_coordinate_features(
featurizer, pdb_subdir, pdb_code)
if computed_feature[0] is None:
print("Bad featurization")
continue
else:
X_b = np.reshape(np.array(computed_feature), (1, 9))
y_b = y[ind]
w_b = w[ind]
y_b = np.reshape(y_b, (1, -1))
w_b = np.reshape(w_b, (1, -1))
yield (X_b, y_b, w_b, pdb_code)
dataset = dc.data.DiskDataset.create_dataset(
shard_generator(), data_dir=data_dir, tasks=tasks)
transformers = []
return tasks, dataset, transformers | 5,325,937 |
def cross_validation(dictionary):
"""
parses the data to enable cross validation.
This function takes as input the result of the load_data function
:param dictionary:
:return:
"""
result_dictionary = dict([('test data', np.array([])), ('training data', np.array([])), ('test subject', "")])
for i in range(len(dictionary)):
data_list = np.zeros(shape=(0, 22))
for j in range(len(dictionary)):
if j != i:
data_list = np.append(data_list, dictionary.values()[j], axis=0)
result_dictionary.update(({'training data': np.array(data_list)}))
result_dictionary.update(({'test data': np.array(dictionary.values()[i])}))
result_dictionary.update({'test subject': dictionary.keys()[i]})
yield result_dictionary | 5,325,938 |
def get_version_if_modified(gh_type, repo_name, typ, force=False):
"""
Return the latest version if the latest version is different
from the previously indexed version.
Return None if no change.
if force in True, always return the latest version
"""
latest_version = get_latest_version(gh_type, repo_name, typ)
if force:
return latest_version
indexed_version = get_indexed_version(gh_type, repo_name, typ)
if indexed_version == latest_version:
print '%s (%s): skipping %s' % (repo_name, gh_type, typ)
return None
else:
return latest_version | 5,325,939 |
def do_rest_request(**kwargs):
"""This function expects full_url or in absence of which, expects a combination of "url" and "query_params"""
if 'full_url' in kwargs:
query_url = kwargs['full_url']
elif 'rest_url' in kwargs and 'query_params' in kwargs:
query_url = kwargs['url'] + '?' + urlparse.urlencode(kwargs['query_params'])
else:
raise Exception('Provide either "full_url" or a combination of "url" and "query_params"')
print("Querying {}".format(query_url))
rest_response = requests.get(query_url, headers={'content-type': 'application/json'})
if rest_response.status_code != 200:
raise Exception('Cannot fetch info: {}'.format(rest_response.status_code))
rest_response = json.loads(rest_response.text)
return rest_response | 5,325,940 |
def variable(init_val, lb=None, ub=None):
"""
Initialize a scalar design variable.
:param init_val: Initial guess
:param lb: Optional lower bound
:param ub: Optional upper bound
:return: The created variable
"""
var = opti.variable()
opti.set_initial(var, init_val)
if lb is not None:
opti.subject_to(var >= lb)
if ub is not None:
opti.subject_to(var <= ub)
return var | 5,325,941 |
def current_user() -> str:
"""
Retorna o usuário corrente.
"""
session_id = request.get_cookie(cookie_session_name())
c = get_cursor()
c.execute(
"""
select username
from sessions
where session_id = :session_id
""",
{"session_id": session_id},
)
return c.fetchone()["username"] | 5,325,942 |
def compare_outputs(output1, output2, rtol=1e-5, atol=1e-7):
"""Compare outputs of two quantities."""
assert len(list(output1.keys())) == len(
list(output2.keys())
), "Different number of entries"
for key in output1.keys():
if isinstance(output1[key], dict):
compare_outputs(output1[key], output2[key])
else:
val1, val2 = output1[key], output2[key]
compare_fn = get_compare_function(val1, val2)
compare_fn(val1, val2, atol=atol, rtol=rtol) | 5,325,943 |
def printer_worker(prefix, pause, exitRequest, pipelineIn, pipelineOut):
"""
This function is used for multiprocessing.
The controller of the process(es) will be referred to as the manager (just to make it easier).
Parameters
----------
prefix: Data for class constructor
pause: A mutex lock used to pause/resume this process
exitRequest: Queue used for indicating that this process should exit
pipelineIn: Queue used for consumer data input
pipelineOut: Queue used for producer data output
Returns
-------
"""
print("Printer start!")
# Instantiate a class object for this process
printerClass = printer.Printer(prefix)
while (True):
# Pause turnstile
pause.acquire()
pause.release()
# Queues are process-safe, so no need to protect it
# IMPORTANT: If the queue is empty it will be stuck here forever,
# This is bad if an exit is requested, so a way to get it unstuck
# is to have the manager manually push a few items (see manager below)
inData = pipelineIn.get()
# Do the work (in the class, not in this wrapper function)
# Single Responsibility Principle
ret, outData = printerClass.print(inData)
# outData should also be the only thing coming out, already packed and ready to go
# Something went wrong so we'll skip
if (not ret):
continue
print("Inserting data: " + outData)
# Gets stuck if full
pipelineOut.put(outData)
# Check whether a reset was called
if (exit_requested(exitRequest)):
break
# Once the process reaches the end of the function it will die automatically
print("Printer finished!")
return | 5,325,944 |
def test_feeds(mock_feeds, user, feeds):
"""
Test for:
User.iter_feeds
User.feeds
"""
assert [*user.iter_feeds()] == feeds
assert user.feeds() == feeds
limit = len(feeds) - len(feeds) // 2
assert user.feeds(limit=limit) == feeds[:limit] | 5,325,945 |
async def async_setup(hass, hassconfig):
"""Setup Component."""
hass.data.setdefault(DOMAIN, {})
config = hassconfig.get(DOMAIN) or {}
hass.data[DOMAIN]['config'] = config
hass.data[DOMAIN].setdefault('entities', {})
hass.data[DOMAIN].setdefault('configs', {})
hass.data[DOMAIN].setdefault('miot_main_entity', {})
hass.data[DOMAIN].setdefault('micloud_devices', [])
hass.data[DOMAIN].setdefault('cloud_instance_list', [])
hass.data[DOMAIN].setdefault('event_fetcher_list', [])
hass.data[DOMAIN].setdefault('add_handler', {})
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
hass.data[DOMAIN]['component'] = component
await component.async_setup(config)
return True | 5,325,946 |
def mplplot(peaklist, w=1, y_min=-0.01, y_max=1, points=800, limits=None):
"""
A no-frills routine that plots spectral simulation data.
Arguments
---------
peaklist : [(float, float)...]
a list of (frequency, intensity) tuples.
w : float
peak width at half height
y_max : float or int
maximum intensity for the plot.
points : int
number of data points
limits : (float, float)
frequency limits for the plot
"""
# import matplotlib.pyplot as plt
peaklist.sort() # Could become costly with larger spectra
if limits:
try:
l_limit, r_limit = limits
l_limit = float(l_limit)
r_limit = float(r_limit)
except Exception as e:
print(e)
print('limits must be a tuple of two numbers')
# return None
raise
if l_limit > r_limit:
l_limit, r_limit = r_limit, l_limit
else:
l_limit = peaklist[0][0] - 50
r_limit = peaklist[-1][0] + 50
x = np.linspace(l_limit, r_limit, points)
plt.ylim(y_min, y_max)
plt.gca().invert_xaxis() # reverses the x axis
y = add_signals(x, peaklist, w)
# noinspection PyTypeChecker
plt.plot(x, y)
plt.show()
return x, y
# TODO: or return plt? Decide behavior | 5,325,947 |
def get_node_centroids(mesh):
"""
Calculate the node centroids of the given elements.
Parameters
----------
mesh : list of dicts or single dict
each dict containing
at least the following keywords
nodes : ndarray
Array with all node postions.
elements : dict of ndarrays
Contains array of nodes for elements sorted by element types.
Returns
-------
result : list of dictionaries or single dict of ndarrays (like 'mesh')
Centroids of elements sorted by element types.
"""
single = False
if not isinstance(mesh, (list, tuple)):
tmp_mesh = [mesh]
single = True
else:
tmp_mesh = mesh
result = []
for mesh_i in tmp_mesh:
out = {}
for elem in ELEM_NAMES:
if elem not in mesh_i["elements"]:
continue
points = mesh_i["nodes"][mesh_i["elements"][elem]]
out[elem] = np.mean(points, axis=1)
result.append(out)
if single:
result = result[0]
return result | 5,325,948 |
def find_saas_replication_price(package, tier=None, iops=None):
"""Find the price in the given package for the desired replicant volume
:param package: The product package of the endurance storage type
:param tier: The tier of the primary storage volume
:param iops: The IOPS of the primary storage volume
:return: Returns the replication price, or an error if not found
"""
if tier is not None:
target_value = ENDURANCE_TIERS.get(tier)
target_item_keyname = 'REPLICATION_FOR_TIERBASED_PERFORMANCE'
target_restriction_type = 'STORAGE_TIER_LEVEL'
else:
target_value = iops
target_item_keyname = 'REPLICATION_FOR_IOPSBASED_PERFORMANCE'
target_restriction_type = 'IOPS'
for item in package['items']:
if item['keyName'] != target_item_keyname:
continue
price_id = _find_price_id(
item['prices'],
'performance_storage_replication',
target_restriction_type,
target_value
)
if price_id:
return price_id
raise ValueError("Could not find price for replicant volume") | 5,325,949 |
def _get_option_of_highest_precedence(config, option_name):
"""looks in the config and returns the option of the highest precedence
This assumes that there are options and flags that are equivalent
Args:
config (_pytest.config.Config): The pytest config object
option_name (str): The name of the option
Returns:
str: The value of the option that is of highest precedence
None: no value is present
"""
# Try to get configs from CLI and ini
try:
cli_option = config.getoption("--{}".format(option_name))
except ValueError:
cli_option = None
try:
ini_option = config.getini(option_name)
except ValueError:
ini_option = None
highest_precedence = cli_option or ini_option
return highest_precedence | 5,325,950 |
def get_industry_categories():
"""按编制部门输出{代码:名称}映射"""
expr = STOCK_DB.industries.drop_field('last_updated')
df = odo(expr, pd.DataFrame)
res = {}
for name, group in df.groupby('department'):
res[name] = group.set_index('industry_id').to_dict()['name']
return res | 5,325,951 |
def test_get_transaction_by_transaction_id_from_budget(ynab):
"""Tests that a transaction can be retrieved from a Budget object by ID"""
budget = ynab.budget("string")
transaction = budget.transaction("string")
assert isinstance(transaction, models.Transaction)
assert transaction.id == "string" | 5,325,952 |
def rate(t, y, dt, elph_tau, pol_tau, delay, start):
"""Rate equation function for two state model. y[0] is charge transfer state,
y[1] is polaron state, elph_tau is electron-phonon scattering constant, pol_tau is
polaron formation constant."""
dydt = [(pulse(t, dt, delay, start) - (y[0] - y[1])/elph_tau - y[0]*y[1]/pol_tau),
((y[0] - y[1])/elph_tau - y[0]*y[1]/pol_tau),
(y[0]*y[1]/pol_tau)]
return dydt | 5,325,953 |
def trace_module(no_print=True):
""" Trace my_module_original exceptions """
with putil.exdoc.ExDocCxt() as exdoc_obj:
try:
docs.support.my_module.func('John')
obj = docs.support.my_module.MyClass()
obj.value = 5
obj.value
except:
raise RuntimeError(
'Tracing did not complete successfully'
)
if not no_print:
module_prefix = 'docs.support.my_module.'
callable_names = ['func', 'MyClass.value']
for callable_name in callable_names:
callable_name = module_prefix+callable_name
print('\nCallable: {0}'.format(callable_name))
print(exdoc_obj.get_sphinx_doc(callable_name, width=70))
print('\n')
return copy.copy(exdoc_obj) | 5,325,954 |
def data_to_seq(X, Y,
t_lag=8,
t_future_shift=1,
t_future_steps=1,
t_sw_step=1,
X_pad_with=None):
"""Slice X and Y into sequences using a sliding window.
Arguments:
----------
X : np.ndarray with ndim == 2
Y : np.ndarray with ndim == 2
t_sw_step : uint (default: 1)
Time step of the sliding window.
t_lag : uint (default: 8)
(t_lag - 1) past time steps used to construct a sequence of inputs.
t_future_shift : uint (default: 0)
How far in the future predictions are supposed to be made.
t_future_steps : uint (default: 1)
How many steps to be predicted from t + t_future_shift.
The sequences are constructed in a way that the model can be
trained to predict Y[t_future:t_future+t_future_steps]
from X[t-t_lag:t] where t_future = t + t_future_shift.
"""
# Assume that provided X and Y are matrices and are aligned in time
assert X.ndim == 2 and Y.ndim == 2
assert len(X) == len(Y)
# Pad X sequence from the beginning
X_padding_left = np.zeros((t_lag - 1, X.shape[1]))
X = np.vstack([X_padding_left, X])
# The future steps of X should be skipped, hence padded with zeros
# X_padding_right = np.zeros((t_future_shift+t_future_steps-1, X.shape[1]))
nb_t_steps = 1 + len(X) - (t_future_shift + (t_future_steps - 1))
X_seq, Y_seq = [], []
for t in xrange(t_lag, nb_t_steps, t_sw_step):
t_past = t - t_lag
t_future = t_past + t_future_shift
# X_seq.append(np.vstack([X[t_past:t], X_padding_right]))
X_seq.append(X[t_past:t])
Y_seq.append(Y[t_future:t_future+t_future_steps])
X_seq = np.asarray(X_seq)
Y_seq = np.asarray(Y_seq)
return [X_seq, Y_seq] | 5,325,955 |
def masterbias(files,med=False,outfile=None,clobber=True,verbose=False):
"""
Load the bias images. Overscan correct and trim them. Then average them.
Parameters
----------
files : list
List of bias FITS files.
med : boolean, optional
Use the median of all the files. By default med=False and the mean is calculated.
outfile : string, optional
Filename to write the master bias image to.
clobber : boolean, optional
If the output file already exists, then overwrite it. Default is True.
verbose : boolean, optional
Verbose output to the screen. Default is False.
Returns
-------
aim : numpy image
The 2D master bias image.
ahead : header dictionary
The master bias header.
Example
-------
bias, bhead = masterbias(bias_files)
"""
nfiles = len(files)
if verbose:
print('Creating master bias using '+str(nfiles)+' files')
# File loop
for i in range(nfiles):
im,head = fits.getdata(files[i],0,header=True)
sh = im.shape
if verbose:
print(str(i+1)+' '+files[i]+' ['+str(sh[1])+','+str(sh[0])+']')
# Fix header, if necessary
if (head.get('TRIMSEC') is None) | (head.get('BIASSEC') is None):
head = fixheader(head)
# Check image type
imagetyp = head.get('IMAGETYP')
exptime = head.get('EXPTIME')
if imagetyp is not None:
if 'bias' not in imagetyp.lower() and 'zero' not in imagetyp.lower() and exptime != 0.0:
raise ValueError(files[i]+' is not a bias')
# Image processing, overscan+trim
im2,head2 = ccdproc(im,head)
# Initialize array
if i==0:
ny,nx = im2.shape
if med:
imarr = np.zeros((ny, nx, nfiles),float)
else:
totim = np.zeros(im2.shape,float)
if med:
imarr[:,:,i] = im2
else:
totim += im2
if i==0: ahead=head2.copy()
ahead['CMB'+str(i+1)] = files[i]
# Final calculation
if med:
aim = np.median(imarr,axis=2)
ahead['HISTORY'] = 'Median combine'
else:
aim = totim/nfiles
ahead['HISTORY'] = 'Mean combine'
ahead['NCOMBINE'] = nfiles
ahead['HISTORY'] = time.ctime()+' bias combine'
aim = aim.astype(np.float32) # convert to 32 bit
# Output file
if outfile is not None:
if os.path.exists(outfile):
if clobber is False:
raise ValueError(outfile+' already exists and clobber=False')
else:
os.remove(outfile)
if verbose:
print('Writing master bias to '+outfile)
hdu = fits.PrimaryHDU(aim,ahead).writeto(outfile)
return aim, ahead | 5,325,956 |
def get_model_kind(model):
"""Returns the "kind" of the given model.
NOTE: A model's kind is usually, but not always, the same as a model's class
name. Specifically, the kind is different when a model overwrites the
_get_kind() class method. Although Oppia never does this, the Apache Beam
framework uses "kind" to refer to models extensively, so we follow the same
convention and take special care to always return the correct value.
Args:
model: base_models.Model|cloud_datastore_types.Entity. The model to
inspect.
Returns:
bytes. The model's kind.
Raises:
TypeError. When the argument is not a model.
"""
if isinstance(model, base_models.BaseModel) or (
isinstance(model, type) and
issubclass(model, base_models.BaseModel)):
return model._get_kind() # pylint: disable=protected-access
elif isinstance(model, cloud_datastore_types.Entity):
return model.kind
else:
raise TypeError('%r is not a model type or instance' % model) | 5,325,957 |
def get_Zvalence_from_pseudo(pseudo):
"""
Extract the number of valence electrons from a pseudo
"""
with open(pseudo.get_file_abs_path(),'r') as f:
lines=f.readlines()
for line in lines:
if 'valence' in line:
try:
return int(float(line.split("z_valence=\""
)[-1].split("\"")[0].strip()))
except (ValueError, IndexError):
try:
return int(float(line.split("Z")[0].strip()))
except (ValueError, IndexError):
return None | 5,325,958 |
def detect_data_shifts(time_series,
filtering=True, use_default_models=True,
method=None, cost=None, penalty=40):
"""
Detect data shifts in the time series, and return list of dates where these
data shifts occur.
Parameters
----------
time_series : Pandas series with datetime index.
Daily time series of a PV data stream, which can include irradiance
and power data streams. This series represents the summed daily values
of the particular data stream.
filtering : Boolean, default True.
Whether or not to filter out outliers and stale data from the time
series. If True, then this data is filtered out before running the
data shift detection sequence. If False, this data is not filtered
out. Default set to True.
use_default_models: Boolean, default True
If True, then default change point detection search parameters are
used. For time series shorter than 2 years in length, the search
function is `rpt.Window` with `model='rbf'`, `width=40` and
`penalty=30`. For time series 2 years or longer in length, the
search function is `rpt.BottomUp` with `model='rbf'`
and `penalty=40`.
method: ruptures search method instance or None, default None.
Ruptures search method instance. See
https://centre-borelli.github.io/ruptures-docs/user-guide/.
cost: str or None, default None
Cost function passed to the ruptures changepoint search instance.
See https://centre-borelli.github.io/ruptures-docs/user-guide/
penalty: int, default 40
Penalty value passed to the ruptures changepoint detection method.
Default set to 40.
Returns
-------
Pandas Series
Series of boolean values with a datetime index, where detected
changepoints are labeled as True, and all other values are labeled
as False.
.. warning:: If the passed time series is less than 2 years in length,
it will not be corrected for seasonality. Data shift detection will
be run on the min-max normalized time series with no seasonality
correction.
References
-------
.. [1] Perry K., and Muller, M. "Automated shift detection in sensor-based
PV power and irradiance time series", 2022 IEEE 48th Photovoltaic
Specialists Conference (PVSC). Submitted.
"""
try:
import ruptures as rpt
except ImportError:
raise ImportError("data_shifts() requires ruptures.")
# Run data checks on cleaned data to make sure that the data can be run
# successfully through the routine
_run_data_checks(time_series)
# Run the filtering sequence, if marked as True
if filtering:
time_series_filtered = _erroneous_filter(time_series)
# Drop any duplicated data from the time series
time_series_filtered = time_series_filtered.drop_duplicates()
# Check if the time series is more than 2 years long. If so, remove
# seasonality. If not, run analysis on the normalized time series
if (time_series_filtered.index.max() -
time_series_filtered.index.min()).days <= 730:
time_series_processed = _preprocess_data(time_series_filtered,
remove_seasonality=False)
seasonality_rmv = False
else:
# Perform pre-processing on the time series, to get the
# seasonality-removed time series.
time_series_processed = _preprocess_data(time_series_filtered,
remove_seasonality=True)
seasonality_rmv = True
points = np.array(time_series_processed.dropna())
# If seasonality has been removed and default model is used, run
# BottomUp method
if (seasonality_rmv) & (use_default_models):
algo = rpt.BottomUp(model='rbf').fit(points)
result = algo.predict(pen=40)
# If there is no seasonality but default model is used, run
# Window-based method
elif (not seasonality_rmv) & (use_default_models):
algo = rpt.Window(model='rbf',
width=50).fit(points)
result = algo.predict(pen=30)
# Otherwise run changepoint detection with the passed parameters
else:
algo = method(model=cost).fit(points)
result = algo.predict(pen=penalty)
# Remove the last index of the time series, if present
if len(points) in result:
result.remove(len(points))
# Return a list of dates where changepoints are detected
time_series_processed.index.name = "datetime"
mask = pd.Series(False, index=time_series_processed.index)
mask.iloc[result] = True
# Re-index the mask to include any timestamps that were
# filtered out as outliers
mask = mask.reindex(time_series.index, fill_value=False)
return mask | 5,325,959 |
def add_to_instapaper(account, res, limit, interests):
"""Processes and calls post on array of hackernews post objects
:param account: user's Instapaper account information
:param res: array of hackernews post objects
:param interests: array of subdomain's user is interested in
"""
username, password = account[0], account[1]
if isinstance(res, list):
posts = []
for p in res:
if "url" in p and "title" in p:
posts.append(p)
if interests:
print("[~] Filtering posts...")
posts = filter_posts(posts, interests)
if limit is not None:
added = 0
for p in posts:
added += post(username, password, p["url"])
if added == limit:
break
else:
for p in posts:
post(username, password, p["url"])
else:
if "url" in res:
post(username, password, res["url"]) | 5,325,960 |
def read_rds(filepath):
"""Read an RDS-format matrix into a Pandas dataframe.
Location can be data, scratch, or results.
Index is populated from first column"""
raw_df = pyreadr.read_r(filepath)[None]
if raw_df.isnull().values.any():
raise ValueError("NaN's were found in the data matrix.")
return raw_df.set_index(raw_df.columns[0], drop=True) | 5,325,961 |
def history():
"""Show history of transactions."""
# Read Transactions database for desired elements
transactions = db.execute("SELECT symbol, share, price, method, timestamp FROM Transactions WHERE id = :uid", uid = session["user_id"])
# Convert prices to 2 decimal places
for transaction in transactions:
transaction["price"] = usd(transaction["price"])
return render_template("history.html", transactions = transactions) | 5,325,962 |
def make_pdf(outfile, target=None, bypass_errors=False, remove_tmp=True,
only_page="", es_upload=NO_ES_UP):
"""Use prince to convert several HTML files into a PDF"""
logger.info("rendering PDF-able versions of pages...")
target = get_target(target)
temp_files_path = temp_dir()
render_pages(target=target, mode="pdf", bypass_errors=bypass_errors,
temp_files_path=temp_files_path, only_page=only_page,
es_upload=es_upload)
# Choose a reasonable default filename if one wasn't provided yet
if outfile == DEFAULT_PDF_FILE:
outfile = default_pdf_name(target)
# Prince will need the static files, so copy them over
copy_static_files(out_path=temp_files_path)
# Make sure the path we're going to write the PDF to exists
if not os.path.isdir(config["out_path"]):
logger.info("creating output folder %s" % config["out_path"])
os.makedirs(config["out_path"])
abs_pdf_path = os.path.abspath(os.path.join(config["out_path"], outfile))
# Start preparing the prince command
args = [config["prince_executable"], '--javascript', '-o', abs_pdf_path, '--no-warn-css']
pages = get_pages(target, bypass_errors)
if only_page:
pages = [p for p in pages if match_only_page(only_page, p)][:1]
if not len(pages):
recoverable_error("Couldn't find 'only' page %s" % only_page,
bypass_errors)
return
# Each HTML output file in the target is another arg to prince
args += [p["html"] for p in pages]
# Change dir to the tempfiles path; this may avoid a bug in Prince
old_cwd = os.getcwd()
os.chdir(temp_files_path)
logger.info("generating PDF: running %s..." % " ".join(args))
prince_resp = subprocess.check_output(args, universal_newlines=True)
print(prince_resp)
# Clean up the tempdir now that we're done using it
os.chdir(old_cwd)
if remove_tmp:
remove_tree(temp_files_path) | 5,325,963 |
def _load_expert_models(scenario_name, run_id, len_stream):
"""Load ExML experts.
If necessary, the model are automatically downloaded.
"""
# base_dir = f'/raid/carta/EXML_CLVISION_PRETRAINED_EXPERTS/{scenario_name}'
base_dir = default_dataset_location(
f"EXML_CLVISION22_PRETRAINED_EXPERTS/{scenario_name}/run{run_id}"
)
weburl = (
f"http://131.114.50.174/data/EXML_CLVISION22_PRETRAINED_EXPERTS"
f"/{scenario_name}/run{run_id}"
)
experts_stream = []
for i in range(len_stream):
fname_i = f"{base_dir}/model_e{i}.pt"
weburl_i = f"{weburl}/model_e{i}.pt"
if not os.path.exists(fname_i):
os.makedirs(base_dir, exist_ok=True)
print(f"Downloading expert model {i}")
urllib.request.urlretrieve(weburl_i, fname_i)
model = torch.load(fname_i).to("cpu")
model.eval()
experts_stream.append(model)
return experts_stream | 5,325,964 |
def generate_args(job_name, common, cloud_provider, image, k8s_version,
test_suite, job):
"""Returns a list of args fetched from the given fields."""
args = []
args.extend(get_args(job_name, common))
args.extend(get_args(job_name, cloud_provider))
args.extend(get_args(job_name, image))
args.extend(get_args(job_name, k8s_version))
args.extend(get_args(job_name, test_suite))
args.extend(get_args(job_name, job))
return args | 5,325,965 |
def isPrime(n):
"""
check is Prime,for positive integer.
使用试除法
"""
if n <= 1:
return False
if n == 2:
return True
i = 2
thres = math.ceil(math.sqrt(n))
while i <= thres:
if n % i == 0:
return False
i += 1
return True | 5,325,966 |
def plotCorrelation(X, Y, history=10000, name=None, group=None):
"""Plotting the correlation of two parameters X and Y over time.
(Using a buffer in the backend).
Args:
:X(Record): An event parameter, e.g. hit rate
:Y(Record): An event parameter, e.g. some motor position
Kwargs:
:history(int): Buffer length
"""
if name is None:
name = "Corr(%s,%s)" %(X.name, Y.name)
if (not name in _existingPlots):
ipc.broadcast.init_data(name, history_length=100, group=group)
_existingPlots[name] = True
x,y = (X.data, Y.data)
xArray.append(x)
yArray.append(y)
correlation = x*y/(np.mean(xArray)*np.mean(yArray))
ipc.new_data(name, correlation) | 5,325,967 |
def reconstruction_loss(loss_type: str,
in_dim: Tuple[int],
x: torch.Tensor,
x_reconstr: torch.Tensor,
logits: bool = True,
) -> torch.Tensor:
"""
Computes reconstruction loss (mse or cross-entropy)
without mean reduction (used in VAE objectives)
"""
batch_dim = x.size(0)
if loss_type == "mse":
reconstr_loss = 0.5 * torch.sum(
(x_reconstr.reshape(batch_dim, -1) - x.reshape(batch_dim, -1))**2, 1)
elif loss_type == "ce":
rs = (np.product(in_dim[:2]),)
if len(in_dim) == 3:
rs = rs + (in_dim[-1],)
xe = (F.binary_cross_entropy_with_logits if
logits else F.binary_cross_entropy)
reconstr_loss = xe(x_reconstr.reshape(-1, *rs), x.reshape(-1, *rs),
reduction='none').sum(-1)
else:
raise NotImplementedError("Reconstruction loss must be 'mse' or 'ce'")
return reconstr_loss | 5,325,968 |
def NOR(*variables):
"""NOR.
Return the boolean expression for the OR of the variables. Equivalent to
``NOT(OR(*variables))``.
Parameters
----------
*variables : arguments.
``variables`` can be of arbitrary length. Each variable can be a
hashable object, which is the label of the boolean variable, or a dict
(or subclass of dict) representing a boolean expression.
Return
------
P : ``qubovert.PUBO`` object or same type as ``type(variables[0])``.
The boolean expression for the logic operation.
If ``variables[0]`` is a ``qubovert.QUBO``, ``qubovert.PCBO``,
``qubovert.utils.QUBOMatrix``, or ``qubovert.utils.PUBOMatrix`` object,
then ``type(P) == type(variables[0])``. Otherwise,
``type(P) == type(variables[0])``.
Example
-------
>>> from qubovert.sat import NOR
>>> P = NOR(0, 1)
>>> P
{(0,): -1, (0, 1): 1, (1,): -1, (): 1}
>>> P.value({0: 0, 1: 0})
1
>>> P.value({0: 0, 1: 1})
0
>>> P.value({0: 1, 1: 0})
0
>>> P.value({0: 1, 1: 1})
0
>>> type(P)
qubovert._pubo.PUBO
>>> P = NOR({(0, 1): 1}, 'x') # nor of 0, 1, and 'x'.
>>> P
{(0, 1): -1, (0, 1, 'x'): 1, ('x',): -1, (): 1}
>>> type(P)
qubovert._pubo.PUBO
>>> from qubovert import boolean_var
>>> x, y = boolean_var('x'), boolean_var('y')
>>> P = NOR(x, y)
>>> type(P)
qubovert.PCBO
"""
return NOT(OR(*variables)) | 5,325,969 |
def post_index(new_index, old_index, alias, index_name, settings):
"""
Perform post-indexing tasks:
* Optimize (which also does a refresh and a flush by default).
* Update settings to reset number of replicas.
* Point the alias to this new index.
* Unflag the database.
* Remove the old index.
* Output the current alias configuration.
"""
_print('Optimizing, updating settings and aliases.', alias)
# Optimize.
ES.indices.optimize(index=new_index)
# Update the replicas.
ES.indices.put_settings(index=new_index, body=settings)
# Add and remove aliases.
actions = [
{'add': {'index': new_index, 'alias': alias}}
]
if old_index:
actions.append(
{'remove': {'index': old_index, 'alias': alias}}
)
ES.indices.update_aliases(body=dict(actions=actions))
_print('Unflagging the database.', alias)
Reindexing.unflag_reindexing(alias=alias)
_print('Removing index {index}.'.format(index=old_index), alias)
if old_index and ES.indices.exists(index=old_index):
ES.indices.delete(index=old_index)
alias_output = ''
for indexer in INDEXERS:
alias = ES_INDEXES[indexer.get_mapping_type_name()]
alias_output += unicode(ES.indices.get_aliases(index=alias)) + '\n'
_print('Reindexation done. Current aliases configuration: '
'{output}\n'.format(output=alias_output), alias) | 5,325,970 |
def timing_run(args, shell: bool = False, stdin=None, stdout=None, stderr=None,
environ=None, cwd=None, resources=None, identification=None, shuffle=False) -> RunResult:
"""
Create an timing process with stream
:param args: arguments for execution
:param shell: use shell to execute args
:param stdin: stdin stream (none means nothing)
:param stdout: stdout stream (none means nothing)
:param stderr: stderr stream (none means nothing)
:param environ: environment variables
:param cwd: new work dir
:param resources: resource limit
:param identification: user and group for execution
:param shuffle: Shuffle the inputs with similar timestamp.
:return: run result of this time
"""
stdin_need_close = not stdin
stdin = stdin or io.BytesIO()
stdout_need_close = not stdout
stdout = stdout or io.BytesIO()
stderr_need_close = not stderr
stderr = stderr or io.BytesIO()
with eclosing(stdin, stdin_need_close) as stdin, \
eclosing(stdout, stdout_need_close) as stdout, \
eclosing(stderr, stderr_need_close) as stderr:
_stdin = TimingStdin.loads(_try_read_to_bytes(stdin))
if shuffle:
_stdin = _stdin.to_shuffled()
with interactive_process(
args=args, shell=shell,
environ=environ, cwd=cwd,
resources=resources, identification=identification,
) as ip:
for _time, _line in _stdin.lines:
_target_time = ip.start_time + _time
while time.time() < _target_time and not ip.completed:
time.sleep(max(min(0.2, _target_time - time.time()), 0.0))
try:
ip.print_stdin(_line)
except BrokenPipeError:
break
ip.close_stdin()
_stdout, _stderr = [], []
for _time, _tag, _line in ip.output_yield:
if _tag == 'stdout':
_stdout.append((_time, _line))
elif _tag == 'stderr':
_stderr.append((_time, _line))
else:
raise ValueError('Unknown output type - {type}.'.format(type=repr(_time))) # pragma: no cover
ip.join()
_try_write(stdout, TimingStdout.loads(_stdout).dumps())
_try_write(stderr, TimingStderr.loads(_stderr).dumps())
return ip.result | 5,325,971 |
def alterMethods(cls):
"""
Alter Monte methods on behalf of AutoHelp.
Return the signatures of the altered methods.
NOT_RPYTHON
"""
atoms = []
imports = set()
def nextName(nameIndex=[0]):
name = "_%d" % nameIndex[0]
nameIndex[0] += 1
return name
execNames = {"Refused": Refused}
dispatchClauses = []
d = {}
# Walk the MRO and harvest Monte methods. The repacker has already placed
# them in the correct location.
for c in reversed(cls.__mro__):
if hasattr(c, "_monteMethods_"):
d.update(c._monteMethods_)
for attr, (f, verb, args, kwargs, rv) in d.iteritems():
# The verb is now Unicode.
verb = verb.decode("utf-8")
assignments = []
if isStarArgs(args):
atomTest = "atom.verb == %r" % verb
call = "self.%s(args)" % attr
else:
atomName = nextName()
execNames[atomName] = atom = getAtom(verb, len(args))
atoms.append(atom)
atomTest = "atom is %s" % atomName
argNames = []
for i, arg in enumerate(args):
argName = nextName()
argNames.append(argName)
assignments.append("%s = args[%d]" % (argName, i))
if arg != "Any":
unwrapperModule = wrappers[arg]
pred = "is" + arg
imports.add("from %s import %s" % (unwrapperModule, pred))
atomTest += " and %s(args[%d])" % (pred, i)
unwrapper = "unwrap" + arg
imports.add("from %s import %s" % (unwrapperModule,
unwrapper))
assignments.append("%s = %s(%s)" % (argName, unwrapper,
argName))
for k, v in kwargs.iteritems():
kwargName = nextName()
argNames.append("%s=%s" % (k, kwargName))
assignments.append("%s = namedArgs.extractStringKey(%r, None)"
% (kwargName, k.decode("utf-8")))
if v != "Any":
unwrapperModule = wrappers[v]
unwrapper = "unwrap" + v
imports.add("from %s import %s" % (unwrapperModule,
unwrapper))
assignments.append("%s = %s(%s) if %s is None else None" %
(kwargName, unwrapper, kwargName, kwargName))
call = "self.%s(%s)" % (attr, ",".join(argNames))
retvals = []
if rv == "Any":
# No wrapping.
retvals.append("return rv")
elif rv == "Void":
# Enforced correctness. Disobedience will not be tolerated.
retvals.append("assert rv is None, 'habanero'")
retvals.append("from typhon.objects.constants import NullObject")
retvals.append("return NullObject")
else:
wrapperModule = wrappers[rv]
wrapper = "wrap" + rv
imports.add("from %s import %s" % (wrapperModule, wrapper))
retvals.append("return %s(rv)" % wrapper)
dispatchClauses.append("""
if %s:
%s
rv = %s
%s
""" % (atomTest, ";".join(assignments), call, ";".join(retvals)))
setattr(cls, attr, f)
# Temporary. Soon, all classes shall receive AutoHelp, and no class will
# have a handwritten recv().
if dispatchClauses:
exec py.code.Source("""
def recvNamed(self, atom, args, namedArgs):
%s
%s
rv = self.mirandaMethods(atom, args, namedArgs)
if rv is None:
raise Refused(self, atom, args)
else:
return rv
""" % (";".join(imports), "\n".join(dispatchClauses))).compile() in execNames
cls.recvNamed = execNames["recvNamed"]
return atoms | 5,325,972 |
def make_multiclouds(docs: List[Union[dict, object, str, tuple]],
opts: dict = None,
ncols: int = 3,
title: str = None,
labels: List[str] = None,
show: bool = True,
figure_opts: dict = None,
round: int = None
):
"""Make multiclouds.
Accepts data from a string, list of lists or tuples, a dict with
terms as keys and counts/frequencies as values, or a dataframe.
The best input is a dtm produced by `get_dtm_table()`.
Args:
docs (List[Union[dict, object, str, tuple]]): The data. Accepts a list of text strings, a list of tuples,
or dicts with the terms as keys and the counts/frequencies as values, or a dataframe with "term" and
"count" or "frequency" columns.
opts (dict): The WordCloud() options.
For testing, try {"background_color": "white", "max_words": 2000, "contour_width": 3, "contour_width": "steelblue"}
ncols (int): The number of columns in the grid.
title (str): The title of the grid.
labels (List[str]): The document labels for each subplot.
show (bool): Whether to show the plotted word cloud or return it as a WordCloud object.
figure_opts (dict): A dict of matplotlib figure options.
round (int): An integer (generally between 100-300) to apply a mask that rounds the word cloud.
Returns:
object: A WordCloud object if show is set to False.
Notes:
- For a full list of options, see https://amueller.github.io/word_cloud/generated/wordcloud.WordCloud.html#wordcloud-wordcloud.
- If `show=False` the function expects to be called with something like `wordcloud = make_wordcloud(data, show=False)`.
This returns WordCloud object which can be manipulated by any of its methods, such as `to_file()`. See the
WordCloud documentation for a list of methods.
"""
# Process the docs data into a list
if isinstance(docs, pd.core.frame.DataFrame):
# Assumes a df with columns: Terms, Doc_Label, DocLabel,...
# Transpose the df
docs = docs.T
# Grab the first row for the header
new_header = docs.iloc[0]
# Drop the first row
docs = docs[1:]
# Set the header row as the df header
docs.columns = new_header
# Return a dict
docs = docs.to_dict(orient="records")
# Ensure that anything that is not a list of strings is converted
# to the appropriate format.
elif isinstance(docs, list):
if all(isinstance(s, str) for s in docs):
pass
else:
docs = [{x[0:1]: x[1:2] for x in data} for data in docs]
# List for multiple word clouds if they are to be returned.
multiclouds = []
# Create a rounded mask.
if round:
x, y = np.ogrid[:300, :300]
mask = (x - 150) ** 2 + (y - 150) ** 2 > round ** 2
mask = 255 * mask.astype(int)
opts["mask"] = mask
# Constrain the layout
figure_opts["constrained_layout"] = True
# Create the figure.
fig = plt.figure(**figure_opts)
# Add the title
if title:
fig.suptitle(title)
# Calculate the number of rows and columns.
nrows = int(np.ceil(len(docs) / ncols))
spec = fig.add_gridspec(nrows, ncols)
# Divide the data into rows.
rows = list(get_rows(docs, ncols))
# Set an index for labels
i = 0
# Loop through the rows.
for row, doc in enumerate(rows):
# Loop through the documents in the row.
for col, data in enumerate(doc):
# Create a subplot.
ax = fig.add_subplot(spec[row, col])
# Generate the subplot's word cloud.
if isinstance(data, str):
wordcloud = WordCloud(**opts).generate_from_text(data)
else:
wordcloud = WordCloud(**opts).generate_from_frequencies(data)
# If `show=True`, show the word cloud.
if show:
ax.imshow(wordcloud)
ax.axis("off")
# Set the image title from the label
if labels:
ax.set_title(labels[i])
i += 1
# Otherwise, add the word cloud to the multiclouds list.
else:
multiclouds.append(wordcloud)
# If `show=False`, return the multiclouds list.
if not show:
return multiclouds | 5,325,973 |
def action_prop(param, val=1):
"""A param that performs an action"""
def fdo(self):
self.setter(param, val)
return fdo | 5,325,974 |
def find_or_create(find, create):
"""Given a find and a create function, create a resource if it doesn't exist"""
result = find()
return result if result else create() | 5,325,975 |
def Gsigma(sigma):
"""Pickle a gaussian function G(x) for given sigma"""
def G(x):
return (math.e ** (-(x**2)/(2*sigma**2)))/(2 * math.pi* sigma**2)**0.5
return G | 5,325,976 |
def session_to_ical(session, detailed=False):
"""Serialize a session into an iCal.
:param session: The session to serialize
:param detailed: If True, iCal will include the session's contributions
"""
calendar = icalendar.Calendar()
calendar.add('version', '2.0')
calendar.add('prodid', '-//CERN//INDICO//EN')
related_event_uid = f'indico-event-{session.event.id}@{url_parse(config.BASE_URL).host}'
if not detailed:
component = generate_session_component(session, related_event_uid)
calendar.add_component(component)
else:
from indico.modules.events.contributions.ical import generate_contribution_component
contributions = (Contribution.query.with_parent(session)
.filter(Contribution.is_scheduled)
.all())
components = [generate_contribution_component(contribution, related_event_uid)
for contribution in contributions]
for component in components:
calendar.add_component(component)
return calendar.to_ical() | 5,325,977 |
def add_selection_methods_to_method_holder():
"""
Adds the default methods to the SelectionMethodHolder.
"""
SelectionMethodHolder.add_selection_method(LineChart, SelectionTypes.HorizontalLine, get_selection_line_chart_horizontal_rect)
SelectionMethodHolder.add_selection_method(LineChart, SelectionTypes.VerticalLine, get_selection_line_chart_vertical_rect)
SelectionMethodHolder.add_selection_method(LineChart, SelectionTypes.Rectangle, get_selection_line_chart_rect)
SelectionMethodHolder.add_selection_method(LineChart, SelectionTypes.Lasso, get_selection_line_chart_lasso)
SelectionMethodHolder.add_selection_method(BarChartBase, SelectionTypes.HorizontalLine, get_selection_bar_chart_horizontal_rect)
SelectionMethodHolder.add_selection_method(BarChartBase, SelectionTypes.VerticalLine, get_selection_bar_chart_vertical_rect)
SelectionMethodHolder.add_selection_method(BarChartBase, SelectionTypes.Rectangle, get_selection_bar_chart_rect)
SelectionMethodHolder.add_selection_method(BarChartBase, SelectionTypes.Lasso, get_selection_bar_chart_lasso)
SelectionMethodHolder.add_selection_method(BarChart, SelectionTypes.HorizontalLine, get_selection_bar_chart_horizontal_rect)
SelectionMethodHolder.add_selection_method(BarChart, SelectionTypes.VerticalLine, get_selection_bar_chart_vertical_rect)
SelectionMethodHolder.add_selection_method(BarChart, SelectionTypes.Rectangle, get_selection_bar_chart_rect)
SelectionMethodHolder.add_selection_method(BarChart, SelectionTypes.Lasso, get_selection_bar_chart_lasso)
SelectionMethodHolder.add_selection_method(BarChartManyObjects, SelectionTypes.HorizontalLine, get_selection_bar_chart_horizontal_rect)
SelectionMethodHolder.add_selection_method(BarChartManyObjects, SelectionTypes.VerticalLine, get_selection_bar_chart_vertical_rect)
SelectionMethodHolder.add_selection_method(BarChartManyObjects, SelectionTypes.Rectangle, get_selection_bar_chart_rect)
SelectionMethodHolder.add_selection_method(BarChartManyObjects, SelectionTypes.Lasso, get_selection_bar_chart_lasso)
SelectionMethodHolder.add_selection_method(BarChartOneObject, SelectionTypes.HorizontalLine, get_selection_bar_chart_horizontal_rect)
SelectionMethodHolder.add_selection_method(BarChartOneObject, SelectionTypes.VerticalLine, get_selection_bar_chart_vertical_rect)
SelectionMethodHolder.add_selection_method(BarChartOneObject, SelectionTypes.Rectangle, get_selection_bar_chart_rect)
SelectionMethodHolder.add_selection_method(BarChartOneObject, SelectionTypes.Lasso, get_selection_bar_chart_lasso)
SelectionMethodHolder.add_selection_method(ScatterPlot, SelectionTypes.HorizontalLine, get_selection_scatter_plot_horizontal_rect)
SelectionMethodHolder.add_selection_method(ScatterPlot, SelectionTypes.VerticalLine, get_selection_scatter_plot_vertical_rect)
SelectionMethodHolder.add_selection_method(ScatterPlot, SelectionTypes.Rectangle, get_selection_scatter_plot_rect)
SelectionMethodHolder.add_selection_method(ScatterPlot, SelectionTypes.Circle, get_selection_scatter_plot_circle)
SelectionMethodHolder.add_selection_method(ScatterPlot, SelectionTypes.Lasso, get_selection_scatter_plot_lasso)
# TODO: Fix selection with pcp.
SelectionMethodHolder.add_selection_method(ParallelCoordinatesPlot, SelectionTypes.HorizontalLine, get_selection_parallel_coordinates_plot_horizontal_rect)
SelectionMethodHolder.add_selection_method(ParallelCoordinatesPlot, SelectionTypes.VerticalLine, get_selection_parallel_coordinates_plot_vertical_rect)
SelectionMethodHolder.add_selection_method(ParallelCoordinatesPlot, SelectionTypes.Rectangle, get_selection_parallel_coordinates_plot_rect)
SelectionMethodHolder.add_selection_method(ParallelCoordinatesPlot, SelectionTypes.Lasso, get_selection_parallel_coordinates_plot_lasso) | 5,325,978 |
def test_envelope_fails_on_message_empty_protocol_specification_id():
"""Check message.protocol_specification_id."""
class BadMessage(Message):
protocol_id = "some/some:0.1.0"
message = BadMessage()
with pytest.raises(ValueError):
Envelope(message=message, to="1", sender="1") | 5,325,979 |
def list_account_roles(nextToken=None, maxResults=None, accessToken=None, accountId=None):
"""
Lists all roles that are assigned to the user for a given AWS account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_account_roles(
nextToken='string',
maxResults=123,
accessToken='string',
accountId='string'
)
:type nextToken: string
:param nextToken: The page token from the previous response output when you request subsequent pages.
:type maxResults: integer
:param maxResults: The number of items that clients can request per page.
:type accessToken: string
:param accessToken: [REQUIRED]\nThe token issued by the CreateToken API call. For more information, see CreateToken in the AWS SSO OIDC API Reference Guide .\n
:type accountId: string
:param accountId: [REQUIRED]\nThe identifier for the AWS account that is assigned to the user.\n
:rtype: dict
ReturnsResponse Syntax
{
'nextToken': 'string',
'roleList': [
{
'roleName': 'string',
'accountId': 'string'
},
]
}
Response Structure
(dict) --
nextToken (string) --
The page token client that is used to retrieve the list of accounts.
roleList (list) --
A paginated response with the list of roles and the next token if more results are available.
(dict) --
Provides information about the role that is assigned to the user.
roleName (string) --
The friendly name of the role that is assigned to the user.
accountId (string) --
The identifier of the AWS account assigned to the user.
Exceptions
SSO.Client.exceptions.InvalidRequestException
SSO.Client.exceptions.UnauthorizedException
SSO.Client.exceptions.TooManyRequestsException
SSO.Client.exceptions.ResourceNotFoundException
:return: {
'nextToken': 'string',
'roleList': [
{
'roleName': 'string',
'accountId': 'string'
},
]
}
:returns:
SSO.Client.exceptions.InvalidRequestException
SSO.Client.exceptions.UnauthorizedException
SSO.Client.exceptions.TooManyRequestsException
SSO.Client.exceptions.ResourceNotFoundException
"""
pass | 5,325,980 |
def run_script(script_filepath, config_filepath, **kwargs):
"""Method to run experiment (defined by a script file)
Args:
script_filepath (str): input script filepath
config_filepath (str): input configuration filepath
"""
# Add config path and current working directory to sys.path to correctly load the configuration
sys.path.insert(0, Path(script_filepath).resolve().parent.as_posix())
sys.path.insert(0, Path(config_filepath).resolve().parent.as_posix())
sys.path.insert(0, os.getcwd())
module = load_module(script_filepath)
_check_script(module)
exp_name = module.__name__
run_fn = module.__dict__['run']
# Setup configuration
if kwargs.get('manual_config_load', False):
config = _ConfigObject(config_filepath, script_filepath)
else:
config = _setup_config(config_filepath, script_filepath)
logger = logging.getLogger(exp_name)
log_level = logging.INFO
setup_logger(logger, log_level)
try:
run_fn(config, logger=logger, **kwargs)
except KeyboardInterrupt:
logger.info("Catched KeyboardInterrupt -> exit")
except Exception as e: # noqa
logger.exception("")
raise e | 5,325,981 |
def _normalize_hosts(hosts):
"""
Helper function to transform hosts argument to
:class:`~elasticsearch.Elasticsearch` to a list of dicts.
"""
# if hosts are empty, just defer to defaults down the line
if hosts is None:
return [{}]
# passed in just one string
if isinstance(hosts, string_types):
hosts = [hosts]
out = []
# normalize hosts to dicts
for host in hosts:
if isinstance(host, string_types):
if "://" not in host:
host = "//%s" % host
parsed_url = urlparse(host)
h = {"host": parsed_url.hostname}
if parsed_url.port:
h["port"] = parsed_url.port
if parsed_url.scheme == "https":
h["port"] = parsed_url.port or 443
h["use_ssl"] = True
if parsed_url.username or parsed_url.password:
h["http_auth"] = "%s:%s" % (
unquote(parsed_url.username),
unquote(parsed_url.password),
)
if parsed_url.path and parsed_url.path != "/":
h["url_prefix"] = parsed_url.path
out.append(h)
else:
out.append(host)
return out | 5,325,982 |
def simple_aggregate(raw_dir: Path):
"""
A simple, sequential version of the aggregate functionality.
This combines uniref sequences with < 20% sequence identity into
a single fasta file of 200k sequences.
"""
seqrecs = []
count = 0
total = 0
dirs = list(raw_dir.iterdir())
np.random.shuffle(dirs)
for d in dirs:
total += 1
fastas = sorted(d.glob("*.fasta"))
xmls = sorted(d.glob("*.xml"))
if len(fastas) != 1 or len(xmls) != 1:
print("More than 1 fasta or xml file in dir %s" % str(d))
try:
b_recs = list(NCBIXML.parse(open(xmls[0])))
except Exception as e:
print("Exception encountered for dir %s: %s" % (str(d), str(e)))
continue
# for each rec, check if any seq id > 30%
identity = max((max(calc_identities(b_rec)) for b_rec in b_recs))
if identity < .2:
parsed_recs = [rec for rec in SeqIO.parse(fastas[0], "fasta")]
if len(parsed_recs) != 1:
print("File %s has more than 1 record?" % str(f))
seqrec = parsed_recs[0]
seqrecs.append(seqrec)
count += 1
if count % 5000 == 0:
print("%d sequences added out of %d read" % (count, total))
SeqIO.write(seqrecs, "cullUR50_20pc_13M.fasta", "fasta") | 5,325,983 |
def data(self: Client) -> DataProxy:
"""Delegates to a
:py:class:`mcipc.rcon.je.commands.data.DataProxy`
"""
return DataProxy(self, 'data') | 5,325,984 |
def proximal_policy_optimization_loss(advantage, old_prediction, loss_clipping=0.2, entropy_loss=5e-3):
"""
https://github.com/LuEE-C/PPO-Keras/blob/master/Main.py
# Only implemented clipping for the surrogate loss, paper said it was best
:param advantage:
:param old_prediction:
:param loss_clipping:
:param entropy_loss:
:return:
"""
def loss(y_true, y_pred):
prob = K.sum(y_true * y_pred, axis=-1) # Multiply with the one hot encoded taken action
old_prob = K.sum(y_true * old_prediction, axis=-1)
r = prob / (old_prob + 1e-10)
return -K.mean(K.minimum(r * advantage, K.clip(
r, min_value=1 - loss_clipping, max_value=1 + loss_clipping) * advantage) + entropy_loss * -(
prob * K.log(prob + 1e-10)))
return loss | 5,325,985 |
def tokenize_string(string):
"""Split a string up into analyzable characters.
Returns a list of individual characters that can
then be matched with the regex patterns.
Note that all accent characters can be found with
the range: \u0300-\u036F. Thus, strings are split
by [any_character][any_accent]*.
"""
norm_string = normalize_string(string)
return re.findall('.[\u0300-\u036F]*', norm_string) | 5,325,986 |
def test_azure_firewall_network_rule_create_command_for_firewall(requests_mock):
"""
Scenario: Create network rule in firewall rule collection.
Given:
- User has provided valid credentials.
When:
- azure-firewall-network-rule-create called.
Then:
- Ensure outputs prefix is correct.
- Ensure the firewall name updated is the same as in the context returned.
"""
from AzureFirewall import azure_firewall_network_rule_create_command
authorization_mock(requests_mock)
client = get_client_mock()
firewall_name = 'xsoar-firewall'
url = f'{BASE_URL}/azureFirewalls/{firewall_name}'
mock_response = json.loads(
load_mock_response('test_data/network_rule/firewall_network_rule_collection_create.json'))
requests_mock.put(url, json=mock_response)
mock_response = json.loads(load_mock_response('test_data/firewall/firewall_get.json'))
requests_mock.get(url, json=mock_response)
command_arguments = {'collection_name': 'my-network-rule-collection',
'description': 'my-poc-collection', 'destination_ports': '8080',
'destination_type': 'ip_address',
'destinations': '189.160.40.11,189.160.40.11', 'firewall_name': firewall_name,
'protocols': 'UDP,TCP',
'rule_name': 'my-ip-rule', 'source_ips': '189.160.40.11,189.160.40.11',
'source_type': 'ip_address'}
result = azure_firewall_network_rule_create_command(client, command_arguments)
assert result.outputs_prefix == 'AzureFirewall.Firewall'
assert result.outputs[0].get('name') == firewall_name | 5,325,987 |
def onSuccessfulTorrentAdd(mediaInfoRecord, mode):
"""
onSuccessfulTorrentAdd groups operations that should be run upon successfully initiating a download, including updating the mediaIndex.json file and sending an email
:testedWith: TestNewTorrentController:test_onSuccessfulTorrentAdd
:return: None
"""
# notify MediaIndex file
MediaIndexFileInterface.writeMediaFile(mediaInfoRecord)
# send email notification
torrentExtraInfo = f"Latest episode: {mediaInfoRecord.getLatestEpisodeNumber()}"
torrentRecord = mediaInfoRecord.getTorrentRecord()
messageBody = f'ADDED TORRENT: {torrentRecord.getName()} {torrentExtraInfo} \n\n Magnet:{torrentRecord.getMagnet()}'
MailInterface.getInstance().pushMail(messageBody, MailItemType.NEW_TORRENT) | 5,325,988 |
def compute_final_metrics(source_waveforms, separated_waveforms, mixture_waveform):
"""Permutation-invariant SI-SNR, powers, and under/equal/over-separation."""
perm_inv_loss = wrap(lambda tar, est: -signal_to_noise_ratio_gain_invariant(est, tar))
_, separated_waveforms = perm_inv_loss(source_waveforms,separated_waveforms)
# Compute separated and source powers.
power_separated = tf.reduce_mean(separated_waveforms ** 2, axis=-1)
power_sources = tf.reduce_mean(source_waveforms ** 2, axis=-1)
# Compute weights for active (separated, source) pairs where source is nonzero
# and separated power is above threshold of quietest source power - 20 dB.
weights_active_refs = _weights_for_nonzero_refs(source_waveforms)
weights_active_seps = _weights_for_active_seps(
tf.boolean_mask(power_sources, weights_active_refs), power_separated)
weights_active_pairs = tf.logical_and(weights_active_refs,
weights_active_seps)
# Compute SI-SNR.
sisnr_separated = signal_to_noise_ratio_gain_invariant(separated_waveforms, source_waveforms)
num_active_refs = tf.math.reduce_sum(tf.cast(weights_active_refs, tf.int32))
num_active_seps = tf.math.reduce_sum(tf.cast(weights_active_seps, tf.int32))
num_active_pairs = tf.math.reduce_sum(tf.cast(weights_active_pairs, tf.int32))
sisnr_mixture = signal_to_noise_ratio_gain_invariant(
tf.tile(mixture_waveform, (1,source_waveforms.shape[1], 1)),source_waveforms)
# Compute under/equal/over separation.
under_separation = tf.cast(tf.less(num_active_seps, num_active_refs),
tf.float32)
equal_separation = tf.cast(tf.equal(num_active_seps, num_active_refs),
tf.float32)
over_separation = tf.cast(tf.greater(num_active_seps, num_active_refs),
tf.float32)
return {'sisnr_separated': sisnr_separated,
'sisnr_mixture': sisnr_mixture,
'sisnr_improvement': sisnr_separated - sisnr_mixture,
'power_separated': power_separated,
'power_sources': power_sources,
'under_separation': under_separation,
'equal_separation': equal_separation,
'over_separation': over_separation,
'weights_active_refs': weights_active_refs,
'weights_active_seps': weights_active_seps,
'weights_active_pairs': weights_active_pairs,
'num_active_refs': num_active_refs,
'num_active_seps': num_active_seps,
'num_active_pairs': num_active_pairs} | 5,325,989 |
def main():
"""
Entry point to the script. Prints answers to the three questions to the
console.
"""
# Change this parameter to query other organizations
repos = all_repos_filtered(api_repo_url('customerio'))
issues_count = 0
watchers_max = {'name': '', 'watchers': 0}
# No need to convert to datetime object thanks to sortable format
# returned by GitHub API
repos = sorted(repos, key=attrgetter('updated'), reverse=True)
for repo in repos:
issues_count += repo.open_issues
if repo.watchers > watchers_max['watchers']:
watchers_max['name'] = repo.name
watchers_max['watchers'] = repo.watchers
# Would technically be more efficient to print the repo names
# here. To provide the output in the correct order, need to do it
# separately. It's a small list, and the performance difference
# is negligible compared to the time to retrieve the repos from the
# GitHub API, and this is a script, not a library.
# The GitHub API considers pull requests to be issues even though the UI
# counts them separately
print("#1: Open issues across all public repositories:", issues_count)
print("----------------")
print("#2: List of all repositories sorted by date updated in descending",
"order:")
for repo in repos:
print(repo.name)
print("----------------")
print("#3: Repo with most watchers:", watchers_max['name'])
print(" It has", watchers_max['watchers'], "watchers.") | 5,325,990 |
def test_get_config_and_set_config():
"""Does get_config and set_config work properly"""
app = create_ctfd()
with app.app_context():
assert get_config('setup') == True
config = set_config('TEST_CONFIG_ENTRY', 'test_config_entry')
assert config.value == 'test_config_entry'
assert get_config('TEST_CONFIG_ENTRY') == 'test_config_entry'
destroy_ctfd(app) | 5,325,991 |
def gen_sdc_pandas_series_rolling_impl(pop, put, get_result=result_or_nan,
init_result=numpy.nan):
"""Generate series rolling methods implementations based on pop/put funcs"""
def impl(self):
win = self._window
minp = self._min_periods
input_series = self._data
input_arr = input_series._data
length = len(input_arr)
output_arr = numpy.empty(length, dtype=float64)
chunks = parallel_chunks(length)
for i in prange(len(chunks)):
chunk = chunks[i]
nfinite = 0
result = init_result
if win == 0:
for idx in range(chunk.start, chunk.stop):
output_arr[idx] = get_result(nfinite, minp, result)
continue
prelude_start = max(0, chunk.start - win + 1)
prelude_stop = chunk.start
interlude_start = prelude_stop
interlude_stop = min(prelude_start + win, chunk.stop)
for idx in range(prelude_start, prelude_stop):
value = input_arr[idx]
nfinite, result = put(value, nfinite, result)
for idx in range(interlude_start, interlude_stop):
value = input_arr[idx]
nfinite, result = put(value, nfinite, result)
output_arr[idx] = get_result(nfinite, minp, result)
for idx in range(interlude_stop, chunk.stop):
put_value = input_arr[idx]
pop_value = input_arr[idx - win]
nfinite, result = put(put_value, nfinite, result)
nfinite, result = pop(pop_value, nfinite, result)
output_arr[idx] = get_result(nfinite, minp, result)
return pandas.Series(output_arr, input_series._index,
name=input_series._name)
return impl | 5,325,992 |
def drop_ships_by_key_value(key_to_check, value_to_check, db):
"""
Steady method for dropping db entries using a key:value requirement
:param key_to_check:
:param value_to_check:
:param db:
:return: alters the db in place
"""
ships = db.keys()
for ship in ships:
ship_db = db[ship]
if ship_db[key_to_check] is value_to_check:
del(db[ship]) | 5,325,993 |
def lprint(*args: Iterable):
""" Synchronized print """
if not hasattr(lprint, 'lock'):
lprint.lock = threading.Lock()
with lprint.lock:
print(*args) | 5,325,994 |
def gen_random_colors(num_groups, colors=None):
"""
Generates random colors.
Parameters
----------
num_groups : int
The number of groups for which colors should be generated.
colors : list : optional (contains strs)
Hex based colors that should be appended if not enough have been provided.
Returns
-------
colors or colors + new_colors : list (contains strs)
Randomly generated colors for figures and plotting.
"""
if colors is None:
colors = []
if len(colors) < num_groups:
while len(colors) < num_groups:
cryptogen = SystemRandom()
random_rgba = [cryptogen.random() for i in range(4)]
colors.append(random_rgba)
sns.set_palette(colors)
if isinstance(colors[0][0], float):
# Convert over for non-sns use.
colors = [mpl.colors.to_hex([c[0], c[1], c[2]]).upper() for c in colors]
return colors | 5,325,995 |
def get_file_creation_date(path):
"""
Get the file creation date.
"""
assert_file(path)
creation_timestamp = os.path.getctime(path)
creation_date = dt.datetime.fromtimestamp(creation_timestamp)
return creation_date | 5,325,996 |
def eval_moses_bleu(ref, hyp):
"""
Given a file of hypothesis and reference files,
evaluate the BLEU score using Moses scripts.
"""
assert os.path.isfile(hyp)
assert os.path.isfile(ref) or os.path.isfile(ref + "0")
assert os.path.isfile(BLEU_SCRIPT_PATH)
command = BLEU_SCRIPT_PATH + " %s < %s"
p = subprocess.Popen(command % (ref, hyp), stdout=subprocess.PIPE, shell=True)
result = p.communicate()[0].decode("utf-8")
if result.startswith("BLEU"):
return float(result[7 : result.index(",")])
else:
logger.warning('Impossible to parse BLEU score! "%s"' % result)
return -1 | 5,325,997 |
def lint_mypy(session):
"""Check types with mypy."""
files = session.posargs or [PACKAGE_DIR, TESTS_DIR]
session.install(".[tests,lint]")
session.run(
"python",
"-m",
"mypy",
*files,
) | 5,325,998 |
def build_graph(sorted_sequence):
"""
Each node points to a list of the nodes that are reacheable from it.
"""
elements = set(sorted_sequence)
graph = defaultdict(lambda : [])
for element in sorted_sequence:
for i in [1, 2, 3]:
if element + i in elements:
graph[element].append(element + i)
return graph | 5,325,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.