content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def _plot_fronts(front_line_table, ternary_front_matrix, title_string,
annotation_string, output_file_name):
"""Plots one set of WPC fronts (either before or after dilation).
:param front_line_table: See doc for `fronts_io.write_polylines_to_file`.
:param ternary_front_matrix: numpy array created by
`machine_learning_utils.dilate_ternary_target_images`.
:param title_string: Title (will be placed above figure).
:param annotation_string: Text annotation (will be placed in top left of
figure).
:param output_file_name: Path to output file (figure will be saved here).
"""
(narr_row_limits, narr_column_limits
) = nwp_plotting.latlng_limits_to_rowcol_limits(
min_latitude_deg=MIN_LATITUDE_DEG, max_latitude_deg=MAX_LATITUDE_DEG,
min_longitude_deg=MIN_LONGITUDE_DEG,
max_longitude_deg=MAX_LONGITUDE_DEG,
model_name=nwp_model_utils.NARR_MODEL_NAME)
_, axes_object, basemap_object = nwp_plotting.init_basemap(
model_name=nwp_model_utils.NARR_MODEL_NAME,
first_row_in_full_grid=narr_row_limits[0],
last_row_in_full_grid=narr_row_limits[1],
first_column_in_full_grid=narr_column_limits[0],
last_column_in_full_grid=narr_column_limits[1])
plotting_utils.plot_coastlines(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR)
plotting_utils.plot_countries(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR)
plotting_utils.plot_states_and_provinces(
basemap_object=basemap_object, axes_object=axes_object,
line_colour=BORDER_COLOUR)
plotting_utils.plot_parallels(
basemap_object=basemap_object, axes_object=axes_object,
bottom_left_lat_deg=-90., upper_right_lat_deg=90.,
parallel_spacing_deg=PARALLEL_SPACING_DEG)
plotting_utils.plot_meridians(
basemap_object=basemap_object, axes_object=axes_object,
bottom_left_lng_deg=0., upper_right_lng_deg=360.,
meridian_spacing_deg=MERIDIAN_SPACING_DEG)
this_matrix = ternary_front_matrix[
0, narr_row_limits[0]:(narr_row_limits[1] + 1),
narr_column_limits[0]:(narr_column_limits[1] + 1)
]
front_plotting.plot_narr_grid(
frontal_grid_matrix=this_matrix, axes_object=axes_object,
first_row_in_narr_grid=narr_row_limits[0],
first_column_in_narr_grid=narr_column_limits[0],
basemap_object=basemap_object, opacity=FRONT_LINE_OPACITY)
num_fronts = len(front_line_table.index)
for i in range(num_fronts):
front_plotting.plot_polyline(
latitudes_deg=front_line_table[
front_utils.LATITUDES_COLUMN].values[i],
longitudes_deg=front_line_table[
front_utils.LONGITUDES_COLUMN].values[i],
basemap_object=basemap_object, axes_object=axes_object,
front_type=front_line_table[
front_utils.FRONT_TYPE_COLUMN].values[i],
line_width=FRONT_LINE_WIDTH)
pyplot.title(title_string)
plotting_utils.annotate_axes(
axes_object=axes_object, annotation_string=annotation_string)
print 'Saving figure to: "{0:s}"...'.format(output_file_name)
file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)
pyplot.savefig(output_file_name, dpi=FIGURE_RESOLUTION_DPI)
pyplot.close()
imagemagick_utils.trim_whitespace(
input_file_name=output_file_name, output_file_name=output_file_name) | 34,500 |
def dump(file_path, spectrum, append=False,
overwrite=False, group_name="spectrum"):
""" Dump the spectrum to the file_path.
Args:
file_path (string): Location to save to.
spectrum (:class:`spectra.Spectra`): The spectrum to save
"""
if append:
file_opt = "a"
else:
file_opt = "w"
with h5py.File(file_path, file_opt) as file_:
if overwrite and group_name in file_.keys(): # Delete existing group
_logger.warning("Removing existing group %s" % group_name)
del file_[group_name]
group = file_.create_group(group_name)
group.attrs["name"] = spectrum.get_name()
group.attrs["config_name"] = spectrum.get_config().get_name()
group.attrs["config"] = json.dumps(spectrum.get_config().dump())
if spectrum.get_fit_config():
group.attrs["fit_config_name"] = spectrum.get_fit_config().\
get_name()
group.attrs["fit_config"] = json.dumps(
spectrum.get_fit_config().dump())
group.attrs["num_decays"] = spectrum.get_num_decays()
group.attrs["raw_events"] = spectrum._raw_events
group.attrs["bipo"] = spectrum.get_bipo()
if len(spectrum.get_style()) == 0:
group.attrs["style"] = ""
else:
group.attrs["style"] = json.dumps(spectrum.get_style())
if len(spectrum._rois) == 0:
group.attrs["rois"] = ""
else:
group.attrs["rois"] = json.dumps(spectrum._rois)
group.create_dataset("data", data=spectrum._data, compression="gzip")
_logger.info("Saved spectrum %s to %s" % (spectrum.get_name(), file_path)) | 34,501 |
def update_bc_val(gridx, gridy, ivar, t):
"""Update Dirichlet boundary values for the velocity components.
Parameters
----------
gridx : flowx.GridFaceX object
The grid for the x-component of the velocity.
gridy : flowx.GridFaceY object
The grid for the y-component of the velocity.
ivar : string
Name of the velocity variable in the Grid structures.
t : float
Time.
"""
coeff = numpy.exp(-2 * t)
bc_val_u = [-coeff * numpy.cos(gridx.xmin) * numpy.sin(gridx.y),
-coeff * numpy.cos(gridx.xmax) * numpy.sin(gridx.y),
-coeff * numpy.cos(gridx.x) * numpy.sin(gridx.ymin),
-coeff * numpy.cos(gridx.x) * numpy.sin(gridx.ymax)]
bc_val_v = [coeff * numpy.sin(gridy.xmin) * numpy.cos(gridy.y),
coeff * numpy.sin(gridy.xmax) * numpy.cos(gridy.y),
coeff * numpy.sin(gridy.x) * numpy.cos(gridy.ymin),
coeff * numpy.sin(gridy.x) * numpy.cos(gridy.ymax)]
gridx.update_bc_val({ivar: bc_val_u})
gridy.update_bc_val({ivar: bc_val_v})
return | 34,502 |
def countDigits(string):
"""return number of digits in a string (Helper for countHaveTenDigits)"""
count = 0
for char in string:
if char == '0' or char == '1' or char == '2' or char == '3' or char == '4' or \
char == '5' or char == '6' or char == '7' or char == '8' or char == '9':
count += 1
return count | 34,503 |
def get_process_entry(process_id: int) -> Process:
"""Get process entry
:raises AssertionError: When illegal state: Active processes != 1
:param process_id: specify process
:return: Process entry
"""
active_process_entry_query = db.session.query(Process).filter(Process.id == process_id)
assert active_process_entry_query.count() == 1, "Active processes != 1: " + str(active_process_entry_query.count())
return active_process_entry_query.first() | 34,504 |
def shrink(filename):
"""
The function will make the original image shrink to its half without losing too much quality.
:param filename: The directory of an image tou want to process.
:return img: SimpleImage, a shrink image that is similar to the original image.
"""
img = SimpleImage(filename)
# Create a blank image that its size is 1/2 of the original image.
img_blank = SimpleImage.blank(img.width // 2, img.height // 2)
for x in range(img_blank.width):
for y in range(img_blank.height):
new_pixel = img_blank.get_pixel(x, y)
# Right upper corner.
if x == 0 and y == 0:
new_pixel.red = (img.get_pixel(x, y+1).red + img.get_pixel(x+1, y).red + img.get_pixel(x+1, y+1).red + img.get_pixel(x, y).red) // 4
new_pixel.green = (img.get_pixel(x, y+1).green + img.get_pixel(x+1, y).green + img.get_pixel(x+1, y+1).green + img.get_pixel(x, y).green) // 4
new_pixel.blue = (img.get_pixel(x, y+1).blue + img.get_pixel(x+1, y).blue + img.get_pixel(x+1, y+1).blue) + img.get_pixel(x, y).blue // 4
# Left side.
elif x == 0 and y != 0:
new_pixel.red = (img.get_pixel(x, y*2).red + img.get_pixel(x, y*2+1).red + img.get_pixel(x+1, y*2).red + img.get_pixel(x+1, y*2+1).red) // 4
new_pixel.green = (img.get_pixel(x, y*2).green + img.get_pixel(x, y*2+1).green + img.get_pixel(x+1, y*2).green + img.get_pixel(x+1, y*2+1).green) // 4
new_pixel.blue = (img.get_pixel(x, y*2).blue + img.get_pixel(x, y*2+1).blue + img.get_pixel(x+1, y*2).blue + img.get_pixel(x+1, y*2+1).blue) // 4
# Top.
elif y == 0 and x != 0:
new_pixel.red = (img.get_pixel(x*2, y).red + img.get_pixel(x*2+1, y).red + img.get_pixel(x*2, y+1).red + img.get_pixel(x*2+1, y+1).red) // 4
new_pixel.green = (img.get_pixel(x*2, y).green + img.get_pixel(x*2+1, y).green + img.get_pixel(x*2, y+1).green + img.get_pixel(x*2+1, y+1).green) // 4
new_pixel.blue = (img.get_pixel(x*2, y).blue + img.get_pixel(x*2+1, y).blue + img.get_pixel(x*2, y+1).blue + img.get_pixel(x*2+1, y+1).blue) // 4
else:
new_pixel.red = (img.get_pixel(x*2, y*2).red + img.get_pixel(x*2+1, y*2).red + img.get_pixel(x*2, y*2+1).red + img.get_pixel(x*2+1, y*2+1).red) // 4
new_pixel.green = (img.get_pixel(x*2, y*2).green + img.get_pixel(x*2+1, y*2).green + img.get_pixel(x*2, y*2+1).green + img.get_pixel(x*2+1, y*2+1).green) // 4
new_pixel.blue = (img.get_pixel(x*2, y*2).blue + img.get_pixel(x*2+1, y*2).blue + img.get_pixel(x*2, y*2+1).blue + img.get_pixel(x*2+1, y*2+1).blue) // 4
return img_blank | 34,505 |
def get_target_config():
"""
Get details of the target database (Postgres)
"""
print('\n------------------------------------------')
print('Enter target database settings:')
print('------------------------------------------')
config = {}
config['username'] = input('- Username on target database (default "postgres"): ') or 'postgres'
config['host'] = input('- Hostname for target database (default "localhost"): ') or 'localhost'
config['port'] = input('- Port for target database (default "5432"): ') or 5432
config['database'] = input("- Name of target database (default 'oracle_migration'): ") or "oracle_migration"
config['password'] = getpass.getpass('- Password for target database: ')
print('\nUsername: {}'.format(config['username']))
print('Hostname: {}'.format(config['host']))
print('Port: {}'.format(config['port']))
print('Database name: {}'.format(config['database']))
print('Password: {}'.format('*'*len(config['password'])))
return config | 34,506 |
def do_cli( # pylint: disable=too-many-locals, too-many-statements
click_ctx,
function_identifier: Optional[str],
template: str,
base_dir: Optional[str],
build_dir: str,
cache_dir: str,
clean: bool,
use_container: bool,
cached: bool,
parallel: bool,
manifest_path: Optional[str],
docker_network: Optional[str],
skip_pull_image: bool,
parameter_overrides: Dict,
mode: Optional[str],
container_env_var: Optional[Tuple[str]],
container_env_var_file: Optional[str],
build_image: Optional[Tuple[str]],
) -> None:
"""
Implementation of the ``cli`` method
"""
from samcli.commands.build.build_context import BuildContext
LOG.debug("'build' command is called")
if cached:
LOG.info("Starting Build use cache")
if use_container:
LOG.info("Starting Build inside a container")
processed_env_vars = _process_env_var(container_env_var)
processed_build_images = _process_image_options(build_image)
with BuildContext(
function_identifier,
template,
base_dir,
build_dir,
cache_dir,
cached,
parallel=parallel,
clean=clean,
manifest_path=manifest_path,
use_container=use_container,
parameter_overrides=parameter_overrides,
docker_network=docker_network,
skip_pull_image=skip_pull_image,
mode=mode,
container_env_var=processed_env_vars,
container_env_var_file=container_env_var_file,
build_images=processed_build_images,
aws_region=click_ctx.region,
) as ctx:
ctx.run() | 34,507 |
def add_utxos_to_set(utxo_set, utxos):
"""
将UTXO添加到集合中
:param utxo_set: UTXO集合
:param utxos: UTXO列表
"""
if isinstance(utxos, dict):
utxos = utxos.values()
for utxo in utxos:
utxo_set[utxo.pointer] = utxo | 34,508 |
def GetGPU():
"""Get the global index of GPU.
Returns
-------
int
The global index of GPU.
"""
return option['device_id'] | 34,509 |
def test_empty_chain(object_store):
"""Check that empty chain raises only the expected error."""
chain = Chain(object_store)
assert chain.head is None
assert chain.get_block_by_index(0) is None
with pytest.raises(IndexError):
chain[0] | 34,510 |
def _docker_call(method, msg, *args, **kwargs):
""" Calls `method`, echoing `msg`, and passing `*args` and `**kwargs` to the method """
click.echo(msg, nl=False)
try:
method(*args, **kwargs)
except docker.errors.APIError as exc:
if exc.status_code != 409:
raise
click.secho('Failed (retry with "--force")', fg='red')
else:
click.secho('Succeeded', fg='green') | 34,511 |
def get_generic_explanation(exception_type):
"""Provides a generic explanation about a particular exception."""
if hasattr(exception_type, "__name__"):
exception_name = exception_type.__name__
else:
exception_name = exception_type
if exception_name in GENERIC:
return GENERIC[exception_name]()
elif exception_name.endswith("Warning"):
return GENERIC["UnknownWarning"]()
elif hasattr(exception_type, "__name__") and issubclass(exception_type, OSError):
return os_error_subclass(exception_type.__name__)
else:
return no_information() | 34,512 |
def test_runtime_config():
"""Basic test for the class RuntimeConfig."""
config = RuntimeConfig()
assert config is not None | 34,513 |
def _normalized_bam_coverage(name, bam_input, data):
"""Run bamCoverage from deeptools but produce normalized bigWig files"""
cmd = ("{bam_coverage} --bam {bam_input} --outFileName {bw_output} "
"--binSize 20 --effectiveGenomeSize {size} "
"--smoothLength 60 --extendReads 150 --centerReads -p {cores} ")
size = bam.fasta.total_sequence_length(dd.get_ref_file(data))
cores = dd.get_num_cores(data)
try:
bam_coverage = config_utils.get_program("bamCoverage", data)
except config_utils.CmdNotFound:
logger.info("No bamCoverage found, skipping bamCoverage.")
return None
method = dd.get_chip_method(data)
cmd += "--normalizeUsing CPM "
toignore = get_mitochondrial_chroms(data)
if toignore:
ignorenormflag = f"--ignoreForNormalization {' '.join(toignore)} "
cmd += ignorenormflag
resources = config_utils.get_resources("bamCoverage", data["config"])
if resources:
options = resources.get("options")
if options:
cmd += " %s" % " ".join([str(x) for x in options])
bw_output = os.path.join(os.path.dirname(bam_input), "%s.bw" % name)
if utils.file_exists(bw_output):
return bw_output
with file_transaction(bw_output) as out_tx:
do.run(cmd.format(**locals()), "Run bamCoverage in %s" % name)
return bw_output | 34,514 |
def requestPump():
"""Request a core pump.
This will perform any queued activity.
It is delayed slightly so that queues can implement rate limiting,
filter extraneous events, etc.
"""
global _isPumpPending
global _pump
# print("#### rp", _isPumpPending, _pump)
if not _pump or _isPumpPending:
return
_isPumpPending = True
if threading.get_ident() == mainThreadId:
_pump.Start(PUMP_MAX_DELAY, True)
return
# This isn't the main thread. wx timers cannot be run outside the main thread.
# Therefore, Have wx start it in the main thread with a CallAfter.
import wx
wx.CallAfter(_pump.Start,PUMP_MAX_DELAY, True) | 34,515 |
def simple_histogram(queryset, column, bins):
"""
Return a histogram from data in queryset.
:param queryset: A Queryet, Model, or Manager
:param column: The column we are aggregating into a histogram
:param bins: An ordered iterable of left endpoints of the bins. Must have at least two elements.
The endpoints must be a convertible to strings by force_text
:return: A dictionary with bin endpoints converted to strings as keys and
"""
queryset = _get_queryset(queryset)
queryset = queryset.annotate(column_name=Value(column, output_field=CharField()))
return multi_histogram(queryset, column, bins, slice_on='column_name', choices=((column, column),)) | 34,516 |
def test_md013_good_medium_line_with_long_last_word_with_config_stern():
"""
Test to make sure this rule does not trigger with a document that
contains a single line the crosses the normal 80 character limit
with a 31 character last "word" and stern mode active.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"--set",
"plugins.md013.stern=$!True",
"--strict-config",
"scan",
"test/resources/rules/md013/good_medium_line_with_very_long_last_word.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md013/good_medium_line_with_very_long_last_word.md:1:1: "
+ "MD013: Line length "
+ "[Expected: 80, Actual: 102] (line-length)"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
) | 34,517 |
def ProbeDebuggerDir():
"""Probes the debugger installed path and returns the path."""
program_files = os.environ.get('ProgramFiles')
if not program_files:
return None
# Probing debugger installed path.
# Starting with 32 bit debugger on 32 bit platform.
debugger_dir = '%s\\Debugging Tools For Windows' % program_files
if CdbExistsAtLocation(debugger_dir):
return debugger_dir
# 32 bit debugger on 32 bit platform.
debugger_dir = '%s\\Debugging Tools For Windows (x86)' % program_files
if CdbExistsAtLocation(debugger_dir):
return debugger_dir
# 64 bit debugger.
debugger_dir = '%s\\Debugging Tools For Windows (x64)' % program_files
if CdbExistsAtLocation(debugger_dir):
return debugger_dir
# windows 8 32 bit
debugger_dir = '%s\\Windows Kits\\8.0\\Debuggers\\x86' % program_files
if CdbExistsAtLocation(debugger_dir):
return debugger_dir
# windows 8.1 64 bit
debugger_dir = '%s\\Windows Kits\\8.1\\Debuggers\\x64' % program_files
if CdbExistsAtLocation(debugger_dir):
return debugger_dir
program_files = os.environ.get('PROGRAMW6432')
if not program_files:
return None
# 64 bit debugger on 64 bit platform.
debugger_dir = '%s\\Debugging Tools For Windows (x64)' % program_files
if CdbExistsAtLocation(debugger_dir):
return debugger_dir
return None | 34,518 |
def bounding_box(points):
"""Bounding box
Args:
points: Array of shape (amount_of_points, dimensions)
Returns:
numpy.ndarray: Array of [[min, max], [min, max], ...] along the
dimensions of points.
"""
out = np.empty((points.ndim, 2))
for i in range(points.ndim):
x = points[:, i]
out[i, 0] = x.min()
out[i, 1] = x.max()
return out | 34,519 |
def add_signer_layer(api_client, key_file, key_password, consumer_key):
"""Create and load configuration. Decorate APIClient.request with header signing"""
api_signer = SignerInterceptor(key_file, key_password, consumer_key)
api_client.request = api_signer.oauth_signing(api_client.request) | 34,520 |
def get_ts_WFI(self):
"""
Get kinetic energy density
"""
ts = np.zeros((self.grid.Nelem, len(self.solver[0,:]) ))
if self.optInv.ens_spin_sym is not True:
for i in range(self.solver.shape[0]):
for j in range(self.solver.shape[1]):
self.solver[i,j].calc_ked_WFI()
#ts[i,j] = self.solver[i,j].get_ked_WFI()
#get_ked_WFI cannot be defined as a solver's method
#Get Kinetic Energy Density
for i in range(self.solver.shape[0]):
for j in range(self.solver.shape[1]):
if self.solver[i,j].ked_WFI is not None:
ts[:,j] = np.sum( self.solver[i,j].ked_WFI, axis=1 )
else:
for i in range(self.solver.shape[0]):
self.solver[i,0].calc_ked_WFI()
#Get Kinetic Energy Density
for i in range(self.solver.shape[0]):
for j in range(self.solver.shape[1]):
if self.solver[i,j].ked_WFI is not None:
ts[:,j] = np.sum( self.solver[i,j].ked_WFI, axis=1 )
return ts | 34,521 |
def _correct_outlier_correlation(rpeaks: pd.DataFrame, bool_mask: np.array, corr_thres: float, **kwargs) -> np.array:
"""Apply outlier correction method 'correlation'.
This function compute the cross-correlation coefficient between every single beat and the average of all detected
beats. It marks beats as outlier if the cross-correlation coefficient is below a certain threshold.
Parameters
----------
rpeaks : :class:`~pandas.DataFrame`
dataframe with detected R peaks. Output from :meth:`biopsykit.signals.ecg.EcgProcessor.ecg_process()`
bool_mask : :class:`numpy.array`
boolean array with beats marked as outlier.
Results of this outlier correction method will be combined with the array using a logical 'or'
corr_thres : float
threshold for cross-correlation coefficient. Beats below that threshold will be marked as outlier
**kwargs : additional parameters required for this outlier function, such as:
* ecg_signal :class:`~pandas.DataFrame`
dataframe with processed ECG signal. Output from :meth:`biopsykit.signals.ecg.EcgProcessor.ecg_process()`
* sampling_rate : float
sampling rate of recorded data in Hz
Returns
-------
:class:`numpy.array`
boolean array with beats marked as outlier. Logical 'or' combination of ``bool_mask`` and results from
this algorithm
"""
ecg_signal = kwargs.get("ecg_signal", None)
sampling_rate = kwargs.get("sampling_rate", None)
if any(v is None for v in [ecg_signal, sampling_rate]):
raise ValueError(
"Cannot apply outlier correction method 'correlation' because not all additionally required arguments "
"were provided! Make sure you pass the following arguments: 'ecg_signal', 'sampling_rate'."
)
# signal outlier
# segment individual heart beats
heartbeats = nk.ecg_segment(ecg_signal["ECG_Clean"], rpeaks["R_Peak_Idx"], int(sampling_rate))
heartbeats = nk.epochs_to_df(heartbeats)
heartbeats_pivoted = heartbeats.pivot(index="Time", columns="Label", values="Signal")
heartbeats = heartbeats.set_index("Index")
heartbeats = heartbeats.loc[heartbeats.index.intersection(rpeaks["R_Peak_Idx"])].sort_values(by="Label")
heartbeats = heartbeats[~heartbeats.index.duplicated()]
heartbeats_pivoted.columns = heartbeats.index
# compute the average over all heart beats and compute the correlation coefficient between all beats and
# the average
mean_beat = heartbeats_pivoted.mean(axis=1)
heartbeats_pivoted["mean"] = mean_beat
corr_coeff = heartbeats_pivoted.corr()["mean"].abs().sort_values(ascending=True)
corr_coeff = corr_coeff.drop("mean")
# compute RR intervals (in seconds) from R Peak Locations
rpeaks["RR_Interval"] = np.ediff1d(rpeaks["R_Peak_Idx"], to_end=0) / sampling_rate
# signal outlier: drop all beats that are below a correlation coefficient threshold
return np.logical_or(bool_mask, rpeaks["R_Peak_Idx"].isin(corr_coeff[corr_coeff < corr_thres].index)) | 34,522 |
def formatLH(figsizex = 2, figsizey = 2, frame = False):
"""
:param: figsizex, integer specifying how many figures should be next to each other in x-direction
:param: figsizey, integer specifying how many figures should be next to each other in y-direction
"""
import matplotlib as mpl
mpl.rcParams['legend.frameon'] = False
mpl.rcParams['figure.frameon'] = False
mpl.rcParams['font.sans-serif'] = 'Gill Sans'
mpl.rcParams['font.size'] = 14
mpl.rcParams['figure.figsize'] = 5.25 / figsizex, 4.75 / figsizey
mpl.rcParams['axes.labelpad'] = 10
mpl.rcParams['figure.autolayout'] = True
mpl.rcParams['legend.fontsize'] = 12
mpl.rcParams['patch.antialiased'] = True
mpl.rcParams['axes.labelsize'] = 'large'
mpl.rcParams['axes.titlesize'] = 'x-large'
mpl.rcParams['axes.spines.right'] = frame
mpl.rcParams['axes.spines.top'] = frame
mpl.rcParams["errorbar.capsize"] = 5 | 34,523 |
def _time_from_timestamp(timestamp: int) -> time:
"""
Casts a timestamp representing the number of seconds from the midnigh to a time object
Parameters
----------
timestamp : int
The number of seconds since midnight
Returns
-------
time
The associated time object
"""
SECONDS_IN_MINUTE = 60
SECONDS_IN_HOUR = 60 * SECONDS_IN_MINUTE
remaining_time = timestamp
hour, remaining_time = divmod(remaining_time, SECONDS_IN_HOUR)
minute, second = divmod(remaining_time, SECONDS_IN_MINUTE)
return time(hour, minute, second) | 34,524 |
def get_marginal_frequencies_of_spikes_in_bins(symbol_counts, number_of_bins_d):
"""
Compute for each past bin 1...d the sum of spikes found in that bin across all
observed symbols.
"""
return np.array(sum((emb.symbol_binary_to_array(symbol, number_of_bins_d)
* symbol_counts[symbol]
for symbol in symbol_counts)), dtype=int) | 34,525 |
def member_stand(v, m):
""" returns member m stand on vote v """
va = VoteAction.objects.filter(member = m, vote = v)
if va:
for (name,string) in VOTE_ACTION_TYPE_CHOICES:
if va[0].type==name:
stand = _(string)
cls = name
return {'stand':stand, 'class':cls, 'name':va[0].member.name}
else:
stand=_('Absent')
cls = 'absent'
try:
return {'stand':stand, 'class':cls, 'name':m.name}
except Exception, e:
logging.debug(e,exc_info=True)
return | 34,526 |
def pagination(cl):
"""
Generate the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 2
ON_ENDS = 1
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 8:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range += [
*range(0, ON_ENDS),
DOT,
*range(page_num - ON_EACH_SIDE, page_num + 1),
]
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range += [
*range(page_num + 1, page_num + ON_EACH_SIDE - 1),
DOT,
*range(paginator.num_pages - ON_ENDS, paginator.num_pages),
]
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
"cl": cl,
"pagination_required": pagination_required,
"show_all_url": need_show_all_link and cl.get_query_string({ALL_VAR: ""}),
"page_range": page_range,
"ALL_VAR": ALL_VAR,
"1": 1,
} | 34,527 |
def set_attr(objs, attr, value):
"""Remove an attribute from a list of objects."""
try:
for o in objs:
setattr(o, attr, value)
except TypeError:
setattr(obj, attr, value) | 34,528 |
def ants_apply_inverse_warps_template_to_func(
workflow, strat, num_strat, num_ants_cores, input_node, input_outfile,
ref_node, ref_outfile, func_name, interp, input_image_type
):
"""Apply the functional-to-structural and structural-to-template warps
inversely to functional time-series in template space to warp it back to
native functional space.
Parameters
----------
workflow: Nipype workflow object
the workflow containing the resources involved
strat: C-PAC Strategy object
a strategy with one or more resource pools
num_strat: int
the number of strategy objects
num_ants_cores: int
the number of CPU cores dedicated to ANTS anatomical-to-standard
registration
input_node: Nipype pointer
pointer to the node containing the 4D functional time-series (often
the leaf node)
input_outfile: Nipype pointer
pointer to the output of the node, i.e. the 4D functional time-series
itself
ref_node: Nipype pointer
pointer to the node containing the reference volume for the C3D
FSL-to-ITK affine conversion (often the mean of the functional
time-series, which is a single volume)
ref_outfile: Nipype pointer
pointer to the output of ref_node, i.e. the reference volume itself
func_name: str
what the name of the warped functional should be when written to the
resource pool
interp: str
which interpolation to use when applying the warps
input_image_type: int
argument taken by the ANTs apply warp tool; in this case, should be
3 for 4D functional time-series
"""
# converts FSL-format .mat affine xfm into ANTS-format
# .txt; .mat affine comes from Func->Anat registration
fsl_to_itk_mni_func = create_wf_c3d_fsl_to_itk(
name='fsl_to_itk_%s_%d' % (func_name, num_strat)
)
# collects series of warps to be applied
collect_transforms_mni_func = \
create_wf_collect_transforms(
inverse=True,
name='collect_transforms_%s_%d' % (func_name, num_strat)
)
# apply ants warps
apply_ants_warp_mni_func = \
create_wf_apply_ants_warp(
inverse=True,
name='apply_ants_warp_%s_%d' % (func_name, num_strat),
ants_threads=int(num_ants_cores))
apply_ants_warp_mni_func.inputs.inputspec.dimension = 3
apply_ants_warp_mni_func.inputs.inputspec.interpolation = interp
# input_image_type:
# (0 or 1 or 2 or 3)
# Option specifying the input image type of scalar
# (default), vector, tensor, or time series.
apply_ants_warp_mni_func.inputs.inputspec. \
input_image_type = input_image_type
# convert the .mat from linear Func->Anat to
# ANTS format
node, out_file = strat['functional_to_anat_linear_xfm']
workflow.connect(node, out_file, fsl_to_itk_mni_func,
'inputspec.affine_file')
node, out_file = strat["anatomical_brain"]
workflow.connect(node, out_file, fsl_to_itk_mni_func,
'inputspec.reference_file')
workflow.connect(ref_node, ref_outfile,
fsl_to_itk_mni_func,
'inputspec.source_file')
workflow.connect(ref_node, ref_outfile,
apply_ants_warp_mni_func, 'inputspec.reference_image')
# Field file from anatomical nonlinear registration
node, out_file = strat['mni_to_anatomical_nonlinear_xfm']
workflow.connect(node, out_file,
collect_transforms_mni_func,
'inputspec.warp_file')
# initial transformation from anatomical registration
node, out_file = strat['ants_initial_xfm']
workflow.connect(node, out_file,
collect_transforms_mni_func,
'inputspec.linear_initial')
# affine transformation from anatomical registration
node, out_file = strat['ants_affine_xfm']
workflow.connect(node, out_file,
collect_transforms_mni_func,
'inputspec.linear_affine')
# rigid transformation from anatomical registration
node, out_file = strat['ants_rigid_xfm']
workflow.connect(node, out_file,
collect_transforms_mni_func,
'inputspec.linear_rigid')
# Premat from Func->Anat linear reg and bbreg
# (if bbreg is enabled)
workflow.connect(fsl_to_itk_mni_func,
'outputspec.itk_transform',
collect_transforms_mni_func,
'inputspec.fsl_to_itk_affine')
# this <node, out_file> pulls in directly because
# it pulls in the leaf in some instances
workflow.connect(input_node,
input_outfile,
apply_ants_warp_mni_func,
'inputspec.input_image')
workflow.connect(collect_transforms_mni_func,
'outputspec.transformation_series',
apply_ants_warp_mni_func,
'inputspec.transforms')
strat.update_resource_pool({
func_name: (apply_ants_warp_mni_func, 'outputspec.output_image')
})
strat.append_name(apply_ants_warp_mni_func.name)
return apply_ants_warp_mni_func | 34,529 |
def get_metrics_influx(query, query_index):
""" Function to Query InfluxDB """
influx_connect = InfluxDBClient(
host=defs.INFLUX_DETAILS[query_index][0],
database=defs.INFLUX_DETAILS[query_index][1],
port=8086,
timeout=5,
retries=5)
response = influx_connect.query(query, epoch='s')
return response | 34,530 |
def spectra(data, freq_sel=None, prod_sel=None, time_sel=None, part_sel=None, **kwargs):
"""Plots spectra at different times and for different correlation products."""
plt_data = _coerce_data_shape(data, freq_sel, prod_sel, time_sel, axes=())
ntime = plt_data.shape[2]
nprod = plt_data.shape[1]
for ii in range(ntime):
for jj in range(nprod):
plt.plot(plt_data[:, ii, jj]) | 34,531 |
def recursive_filter(condition: Callable[[OrgBaseNode], bool], root: Iterable[OrgBaseNode]) -> Iterable[OrgBaseNode]:
"""recursively trasvese all possible nodes from root and return only those for which
condition returns True
Args:
condition: condition which evaluates to true
nodes: nodes to be traversed
Yields each node with matches the condition
"""
for node in root:
if condition(node):
yield node
if node.children:
yield from recursive_filter(condition, node.children) | 34,532 |
def find_buckets(pc, target_centres, N, bucket_height=.38, bucket_radius=.15):
"""
Returns: pc, bucket_centres
"""
### find buckets and remove ###
print ('finding buckets')
buckets = pc[pc.z.between(.1, .4)]
# voxelise to speed-up dbscan
buckets.loc[:, 'xx'] = (buckets.x // .005) * .005
buckets.loc[:, 'yy'] = (buckets.y // .005) * .005
buckets.loc[:, 'zz'] = (buckets.z // .005) * .005
buckets.sort_values(['xx', 'yy', 'zz', 'refl'], inplace=True)
bucket_voxels = buckets[~buckets[['xx', 'yy', 'zz']].duplicated()]
# print(buckets)
dbscan = DBSCAN(min_samples=20, eps=.05).fit(bucket_voxels[['xx', 'yy', 'zz']])
bucket_voxels.loc[:, 'labels_'] = dbscan.labels_
# merge results back
buckets = pd.merge(buckets, bucket_voxels[['xx', 'yy', 'zz', 'labels_']], on=['xx', 'yy', 'zz'])
# find three largest targets (assumed buckets)
labels = buckets.labels_.value_counts().index[:N]
buckets = buckets[buckets.labels_.isin(labels)]
bucket_centres = buckets.groupby('labels_')[['x', 'y']].mean().reset_index()
bucket_centres.loc[:, 'aruco'] = -1
try:
# pair up aruco and buckets , identify and label bucket points
for i, lbl in enumerate(buckets.labels_.unique()):
bucket = buckets[buckets.labels_ == lbl]
X, Y = bucket[['x', 'y']].mean(), target_centres[['x', 'y']].astype(float)
dist2bucket = np.linalg.norm(X - Y, axis=1)
aruco = target_centres.loc[np.where(dist2bucket == dist2bucket.min())].aruco.values[0]
print ('bucket {} associated with aruco {}'.format(lbl, aruco))
bucket_centres.loc[bucket_centres.labels_ == lbl, 'aruco'] = aruco
# identify buckets points
x_shift = bucket_centres[bucket_centres.aruco == aruco].x.values
y_shift = bucket_centres[bucket_centres.aruco == aruco].y.values
pc.dist = np.sqrt((pc.x - x_shift)**2 + (pc.y - y_shift)**2)
idx = pc[(pc.z < bucket_height) & (pc.dist < bucket_radius) & (pc.is_branch)].index
pc.loc[idx, 'is_branch'] = False
# label branch base with aruco
idx = pc[(pc.z < bucket_height + .5) & (pc.dist < bucket_radius)].index
pc.loc[idx, 'aruco'] = aruco
except Exception as err:
plt.scatter(buckets.x.loc[::100], buckets.y.loc[::100], c=buckets.labels_.loc[::100])
plt.scatter(target_centres.x, target_centres.y)
[plt.text(r.x, r.y, r.aruco) for ix, r in target_centres.iterrows()]
raise Exception
return pc, bucket_centres | 34,533 |
def fit_model(model, state_train, action_train, num_epochs, learning_rate = 1e-2, batch_size=32, shuffle=True):
"""
Trains a pytorch module model to predict actions from states for num_epochs passes through the dataset.
This is used to do a (relatively naive) version of behavior cloning
pretty naive (but fully functional) training loop right now, will want to keep adding to this and will want to
eventually make it more customizable.
The hope is that this will eventually serve as a keras model.fit funtion, but custimized to our needs.
Attributes:
model: pytorch module implementing your controller
states_train numpy array (or pytorch tensor) of states (inputs to your network) you want to train over
action_train: numpy array (or pytorch tensor) of actions (outputs of the network)
num_epochs: how many passes through the dataset to make
learning_rate: initial learning rate for the adam optimizer
Returns:
Returns a list of average losses per epoch
but note that the model is trained in place!!
Example:
model = nn.Sequential(
nn.Linear(4,12),
nn.ReLU(),
nn.Linear(12,12),
nn.ReLU(),
nn.Linear(12,1)
)
states = np.random.randn(100,4)
actions = np.random.randn(100,1)
loss_hist = fit_model(model,states, actions, 200)
"""
# Check if GPU is available , else fall back to CPU
# TODO this might belong in module body
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
# Normalize training data set
state_train_norm, state_train_mean, state_train_std = normalize_data(state_train)
action_train_norm, action_train_mean, action_train_std = normalize_data(action_train)
state_tensor = torch.as_tensor(state_train_norm, dtype = torch.float32) # make sure that our input is a tensor
action_tensor = torch.as_tensor(action_train_norm, dtype = torch.float32)
training_data = data.TensorDataset(state_tensor, action_tensor)
training_generator = data.DataLoader(training_data, batch_size=batch_size, shuffle=shuffle)
# action_size = action_train.size()[1]
loss_hist = []
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
epoch_loss = 0
for local_states, local_actions in training_generator:
# Transfer to GPU (if GPU is enabled, else this does nothing)
local_states, local_actions = local_states.to(device), local_actions.to(device)
# predict and calculate loss for the batch
action_preds = model(local_states)
loss = loss_fn(local_actions, action_preds)
epoch_loss += loss # only used for metrics
# do the normal pytorch update
optimizer.zero_grad()
loss.backward()
optimizer.step()
# after each epoch append the average loss
loss_hist.append(epoch_loss / len(state_train))
return loss_hist | 34,534 |
def export_item(item_task, library_home):
"""Create strm file for an item and add it to the library"""
destination_folder = os.path.join(
library_home, item_task['section'], item_task['destination'])
export_filename = os.path.join(
destination_folder, item_task['filename'] + '.strm')
_add_to_library(item_task['videoid'], export_filename)
_create_destination_folder(destination_folder)
_write_strm_file(item_task, export_filename)
common.debug('Exported {}'.format(item_task['title'])) | 34,535 |
def _deserialize_dict(
class_reference, data, debug_name, *, throw_on_unhandled: bool, raw_storage_mode: RawStorageMode
):
"""Deserialize a dictionary to a Python object."""
# Check if we are doing a straightforward dictionary parse first, or if it
# has to be deserialized
remaining_properties = set(data.keys())
if not isinstance(data, dict):
raise DeserializeException(
f"Data was not dict for instance: {class_reference} for {debug_name}"
)
if is_dict(class_reference):
if class_reference is dict:
# If types of dictionary entries are not defined, do not deserialize
return data
key_type, value_type = dict_content_types(class_reference, debug_name)
result = {}
for dict_key, dict_value in data.items():
if key_type != Any and not isinstance(dict_key, key_type):
raise DeserializeException(
f"Could not deserialize key {dict_key} to type {key_type} for {debug_name}"
)
result[dict_key] = _deserialize(
value_type,
dict_value,
f"{debug_name}.{dict_key}",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
remaining_properties.remove(dict_key)
if throw_on_unhandled and len(remaining_properties) > 0:
raise UnhandledFieldException(
f"The following field was unhandled: {list(remaining_properties)[0]} for {debug_name}"
)
return result
# It wasn't a straight forward dictionary, so we are in deserialize mode
class_instance = None
class_reference_downcast_field = _get_downcast_field(class_reference)
if class_reference_downcast_field:
downcast_value = data[class_reference_downcast_field]
new_reference = _get_downcast_class(class_reference, downcast_value)
if new_reference is None:
if _allows_downcast_fallback(class_reference):
return _deserialize(
Dict[Any, Any],
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
raise UndefinedDowncastException(
f"Could not find subclass of {class_reference} with downcast identifier '{downcast_value}' for {debug_name}"
)
class_reference = new_reference
class_instance = class_reference.__new__(class_reference)
handled_fields = set()
hints = typing.get_type_hints(class_reference)
if len(hints) == 0:
raise DeserializeException(
f"Could not deserialize {data} into {class_reference} due to lack of type hints ({debug_name})"
)
for attribute_name, attribute_type in hints.items():
if _should_ignore(class_reference, attribute_name):
continue
property_key = _get_key(class_reference, attribute_name)
parser_function = _get_parser(class_reference, property_key)
if is_classvar(attribute_type):
if property_key in data:
raise DeserializeException(
f"ClassVars cannot be set: {debug_name}.{attribute_name}"
)
continue
if _uses_auto_snake(class_reference) and attribute_name.lower() != attribute_name:
raise DeserializeException(
f"When using auto_snake, all properties must be snake cased. Error on: {debug_name}.{attribute_name}"
)
using_default = False
if property_key in data:
value = data[property_key]
handled_fields.add(property_key)
property_value = parser_function(value)
elif _uses_auto_snake(class_reference) and camel_case(property_key) in data:
value = data[camel_case(property_key)]
handled_fields.add(camel_case(property_key))
property_value = parser_function(value)
elif _uses_auto_snake(class_reference) and pascal_case(property_key) in data:
value = data[pascal_case(property_key)]
handled_fields.add(pascal_case(property_key))
property_value = parser_function(value)
else:
if _has_default(class_reference, attribute_name):
deserialized_value = _get_default(class_reference, attribute_name)
using_default = True
else:
if not is_union(attribute_type) or type(None) not in union_types(
attribute_type, debug_name
):
raise DeserializeException(
f"Unexpected missing value for: {debug_name}.{attribute_name}"
)
property_value = parser_function(None)
if not using_default:
deserialized_value = _deserialize(
attribute_type,
property_value,
f"{debug_name}.{attribute_name}",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
setattr(class_instance, attribute_name, deserialized_value)
unhandled = set(data.keys()) - handled_fields
if throw_on_unhandled and len(unhandled) > 0:
filtered_unhandled = [
key for key in unhandled if not _should_allow_unhandled(class_reference, key)
]
if len(filtered_unhandled) > 0:
raise UnhandledFieldException(
f"Unhandled field: {list(filtered_unhandled)[0]} for {debug_name}"
)
_call_constructed(class_reference, class_instance)
return class_instance | 34,536 |
def red_bg(text):
"""
Red background.
"""
return _create_color_func(text, bgcolor=1) | 34,537 |
async def send_dir(pathDir, writer, oDir):
"""Fonction envoyant les fichiers d'un dossier de manière récursive. On lui passe en paramètre le chemin pour accéder au dossier originel,
le writer, et le nom du dossier originel."""
if os.path.isdir(pathDir):
tree = os.listdir(pathDir)
for fileordir in tree: #Pour chaque fichier/dossier du dossier originel :
await send_dir(pathDir+'/'+fileordir, writer, oDir)
await writer.drain()
elif os.path.isfile(pathDir):
await send_file(pathDir, writer, oDir) | 34,538 |
def save_model(model, neural_net_type, bands, tile_size):
"""Save a DeepOSM tflearn model and its metadata. """
model.save(CACHE_PATH + 'model.pickle')
# dump the training metadata to disk, for later loading model from disk
training_info = {'neural_net_type': neural_net_type,
'bands': bands,
'tile_size': tile_size}
with open(CACHE_PATH + MODEL_METADATA_FILENAME, 'w') as outfile:
pickle.dump(training_info, outfile) | 34,539 |
def does_block_type_support_children(block_type):
"""
Does the specified block type (e.g. "html", "vertical") support child
blocks?
"""
try:
return XBlock.load_class(block_type).has_children
except PluginMissingError:
# We don't know if this now-uninstalled block type had children
# but to be conservative, assume it may have.
return True | 34,540 |
def jsonDateTimeHandler(obj):
"""Takes an object and tries to serialize it in JSON
by using strftime or isoformat."""
if hasattr(obj, "strftime"):
# To avoid problems with the js date-time format
return obj.strftime("%a %b %d, %Y %I:%M %p")
elif hasattr(obj, 'isoformat'):
return obj.isoformat()
# elif isinstance(obj, ...):
# return ...
else:
raise TypeError(
'Object of type %s with value of %s is not JSON serializable' %
(type(obj), repr(obj))) | 34,541 |
def truncate_desired(cluster, desired, min_size, max_size):
"""Do truncation of desired capacity for non-strict cases.
:param cluster: The target cluster.
:param desired: The expected capacity of the cluster.
:param min_size: The NEW minimum capacity set for the cluster.
:param max_size: The NEW maximum capacity set for the cluster.
"""
if min_size is not None and desired < min_size:
desired = min_size
LOG.debug("Truncating shrinkage to specified min_size (%s).",
desired)
if min_size is None and desired < cluster.min_size:
desired = cluster.min_size
LOG.debug("Truncating shrinkage to cluster's min_size (%s).",
desired)
if max_size is not None and max_size > 0 and desired > max_size:
desired = max_size
LOG.debug("Truncating growth to specified max_size (%s).",
desired)
if (max_size is None and desired > cluster.max_size and
cluster.max_size > 0):
desired = cluster.max_size
LOG.debug("Truncating growth to cluster's max_size (%s).",
desired)
return desired | 34,542 |
def _plot_slice(ax, slice, slice_nr, orientation, x_lab, y_lab, color_map):
"""
Plot a slice on an ax object, along with labels and titles
:param ax: matplotlib ax object
:param slice: 2D numpy array with grey values
:param slice_nr: int, the slice number
:param orientation: str, choose from ["sagittal", "frontal", "axial"]
:param slice_nr: int, desired slice number
:param timepoint: int, desired timepoint
:param color_map: str, color map that you want to use for fancy slice plotting
"""
ax.imshow(slice.T, cmap = color_map, origin = "lower")
ax.set_title(f'Slice #{slice_nr} - {orientation}', fontsize = 18)
ax.set_xlabel(x_lab, fontsize = 18)
ax.set_ylabel(y_lab, fontsize = 18) | 34,543 |
def test_required_fields(data, fld):
"""Verify that the required fields without custome error message
raise the default messge if they are not provided.
Arguments:
- `data`:
"""
data[fld] = None
with pytest.raises(ValidationError) as excinfo:
FN124(**data)
msg = "none is not an allowed value"
assert msg in str(excinfo.value) | 34,544 |
def get_crypto_price(crypto, fiat):
"""Helper function to convert any cryptocurrency to fiat"""
converted_btc_value = float(binance_convert_crypto(
crypto, "BTC").split('=')[1].strip().split()[0])
# grab latest bitcoin price
btc_price = float(get_price("btc", fiat).split('=')[1].strip().split()[0])
# converted_btc_value * latest reading
return converted_btc_value * btc_price | 34,545 |
def get_js_files() -> Generator[str, None, None]:
"""Yield all the js files that are needed for the users selected extensions."""
# For every extension...
for ext in (simple_bulma_path / "extensions").iterdir():
# ...check if it is enabled...
if is_enabled(ext):
dist_folder = ext / "dist"
# ...and add its JS file.
# This really makes a lot of assumptions about the extension,
# but so does everything else up until here.
# Basically, try get a minified version first before settling
# for whatever might be there.
js_file = next(dist_folder.rglob("*.min.js"), None) or \
next(dist_folder.rglob("*.js"), None)
if js_file:
yield js_file.relative_to(simple_bulma_path).as_posix() | 34,546 |
def boxblur(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#boxblur"""
return filter(stream, boxblur.__name__, *args, **kwargs) | 34,547 |
def add_stretch(layout: QLayout):
"""
Adds a stretcheable zone to a layout.
"""
layout.addStretch() | 34,548 |
def create_netcdf_dataset(
location,
name,
start_time,
end_time,
sweep,
inpath=None,
outpath="",
chunks={},
engine="h5netcdf",
):
"""Create NetCDF file from radar data"""
radar_path = get_xpol_path(inpath=inpath, start_time=start_time, loc=location)
file_path = os.path.join(radar_path, name)
file_obj = list(create_filelist(os.path.join(file_path, "*"), start_time, end_time))
vol = wrl.io.open_odim(file_obj, loader="h5py", flavour="GAMIC", chunks=chunks)
ds = vol[sweep].data
ds = ds.assign_coords({"location": location})
# ds = ds.chunk({"time": 24})
ofname = save_netcdf_dataset(ds, outpath, engine=engine)
del vol
return os.path.abspath(ofname) | 34,549 |
def fmxKeys(mKey):
"""
"""
mq = MootQuery(None)
constits = vectorOfConstitInfo()
ci = mq.getActiveFilters(mKey,constits,0)
for ci in constits:
print (ci.getKey(),ci.getFswId(),ci.getSchemaId(),ci.getSchemaVersionId(),ci.getInstanceId() ) | 34,550 |
def flatten(lis):
"""Given a list, possibly nested to any level, return it flattened."""
new_lis = []
for item in lis:
if type(item) == type([]):
new_lis.extend(flatten(item))
else:
new_lis.append(item)
return new_lis | 34,551 |
def not_dumpable( py_obj, h_group, name, **kwargs): # pragma: no cover
"""
create_dataset method attached to loader of dummy py_object which is used to
mimic PyContainer class for groups in legacy hickle 4.x file.
Raises
------
RuntimeError:
in any case as this function shall never be called
"""
raise RuntimeError("types defined by loaders not dump able") | 34,552 |
def name(ndims=2, ndepth=2):
""" encrypt n and version into a standardized string """
# Model name, depth and version
value = 'care_denoise_%dDdepth%d' % (ndims, ndepth)
return value | 34,553 |
def _generate_url_slug(size=10, chars=string.ascii_lowercase + string.digits):
"""
This is for a Django project and it assumes your instance
has a model with a slug field and a title character (char) field.
Parameters
----------
size: <Int>
Size of the slug.
chars: <string.class>
Character class to be included in the slug.
"""
slug = ''.join(random.choice(chars) for _ in range(size))
if redis.exists(slug):
try:
return _generate_url_slug()
except RecursionError:
return
else:
return slug | 34,554 |
def mc_compute_stationary(P):
"""
Computes the stationary distribution of Markov matrix P.
Parameters
----------
P : array_like(float, ndim=2)
A discrete Markov transition matrix
Returns
-------
solution : array_like(float, ndim=1)
The stationary distribution for P
Note: Currently only supports transition matrices with a unique
invariant distribution. See issue 19.
"""
n = len(P) # P is n x n
I = np.identity(n) # Identity matrix
B, b = np.ones((n, n)), np.ones((n, 1)) # Matrix and vector of ones
A = np.transpose(I - P + B)
solution = np.linalg.solve(A, b).flatten()
return solution | 34,555 |
def isight_prepare_data_request(a_url, a_query, a_pub_key, a_prv_key):
"""
:param a_url:
:type a_url:
:param a_query:
:type a_query:
:param a_pub_key:
:type a_pub_key:
:param a_prv_key:
:type a_prv_key:
:return:
:rtype:
"""
header = set_header(a_prv_key, a_pub_key, a_query)
result = isight_load_data(a_url, a_query, header)
if not result:
PySight_settings.logger.error('Something went wrong when retrieving indicators from the FireEye iSight API')
return False
else:
return result | 34,556 |
def norm_fisher_vector(v, method=['power', 'l2']):
"""
Normalize a set of fisher vectors.
:param v: numpy.array
A matrix with Fisher vectors as rows (each row corresponding to an
image).
:param method: list
A list of normalization methods. Choices: 'power', 'l2'.
:return: numpy.array
The set of normalized vectors (as a matrix).
"""
if 'power' in method:
v = np.sign(v) * np.abs(v)**0.5
if 'l2' in method:
nrm = np.sqrt(np.sum(v**2, axis=1))
v /= nrm.reshape(-1, 1)
v[np.isnan(v)] = 100000.0 # some large value
return v | 34,557 |
def download_datasets(num=10, local_database=None, msg_flag=True, download_flag=True):
"""
Downloads datasets and puts them in a local directory named after the dataset.
By default downloads first 10 datasets only. User can choose the number of dataets to be downloaded.
msg_flag: Controls verbosity.
download_flag: Default is True. If set to False, only creates the directories but does not initiate download (for testing purpose).
"""
import pandas as pd
if local_database != None:
local_df_flag = True
df = pd.read_csv(local_database, index_col="Dataset")
else:
local_df_flag = False
if msg_flag:
print(
"Local database not supplied.\nBuilding the master database by crawling the website..."
)
df = build_full_dataframe(msg_flag=False)
if msg_flag:
print("Done!")
if num < 1:
print("Invalid entry for the number of datasets.")
else:
for i in range(num):
if msg_flag:
print(f"Downloading dataset(s) for: {df['Name'][i]}")
download_dataset_url(
df["Datapage URL"][i],
df["Name"][i],
msg_flag=False,
download_flag=download_flag,
)
print("\nFinished downloading.") | 34,558 |
def get_top_funnels_df(funurl: str, funlen: int, useResolvedUrls: bool, events: DataFrame, limit_rows: int = 0) -> dict:
"""Get top funnels of specified length which contain the specified URL
:param funurl: URL that should be contained in the funnel
:param funlen: funnel length
:param useResolvedUrls: indicates whether original or resolved URLs should be used
:param events: events DataFrame
:param limit_rows: number of rows of events DataFrame to use (use all rows if 0)
:return: dictionary of funnels and their frequencies
"""
if useResolvedUrls:
columnToUse = analyze_traffic.RESOLVEDURL
else:
columnToUse = analyze_traffic.PAGEURL
if limit_rows != 0:
events = events.head(limit_rows)
if useResolvedUrls:
url_regex_resolver.resolve_urls(events, manage_resolutions.get_regex_dict(), analyze_traffic.PAGEURL, analyze_traffic.RESOLVEDURL)
si = analyze_traffic.build_session_index(events, columnToUse)
funnelCounts = get_funnel_lists(events, si, funurl, funlen, columnToUse)
return funnelCounts | 34,559 |
def check_write_access(filepath: Path):
"""Checks that the program can write safely in a file.
Args:
filepath (Path): file to check write permissions.
Raises:
click.ClickException: if the file raises PermissionError.
"""
try:
with filepath.open("a"):
pass
except PermissionError as exc:
raise click.ClickException("Error writing to %r" % filepath.as_posix()) from exc | 34,560 |
def count_alphabet():
"""
Return dict which contains rating of alplabet
"""
# Get all txt file in folder data
list_file = []
for file in os.listdir("data"):
if file.endswith(".txt"):
list_file.append(os.path.join("data", file))
# Int result
result = {}
for i in string.ascii_lowercase:
result[i] = 0
# Counting
for file in list_file:
f = open(file, 'r')
content = f.read()
content = slugify(content)
content = content.replace("-", '')
for char in content:
if char in result.keys():
result[char] = result[char] + 1
f.close()
# Compute total
total = 0
for k in result.keys():
total = total + result[k]
# Compute
for k in result.keys():
result[k] = 100.0 * result[k] / total
return result | 34,561 |
def CompositeToBayesComposite(obj):
""" converts a Composite to a BayesComposite
if _obj_ is already a BayesComposite or if it is not a _Composite.Composite_ ,
nothing will be done.
"""
if obj.__class__ == BayesComposite:
return
elif obj.__class__ == Composite.Composite:
obj.__class__ = BayesComposite
obj.resultProbs = None
obj.condProbs = None | 34,562 |
def resilience(msg="ignoring error {type}", acceptable=Exception, unacceptable=(), log_level=logging.DEBUG, pred=None):
"""Suppress exceptions raised from the wrapped scope.
msg - format of log to print when an exception is suppressed.
acceptable - exception or tuple of exceptions which to suppress.
unacceptable - exception or tuple of exception which to not suppress, even if they are in `acceptable`.
the exceptions in UNACCEPTABLE_EXCEPTIONS are always unacceptable, unless `unacceptable` is None.
log_level - level of the log to emit when suppressing an exception.
pred - if given, then an exception is suppressed only if pred(exception) is True.
>>> import errno
>>> with resilience(acceptable=OSError, pred=lambda ex: ex.errno == errno.ENOENT):
... print('before')
... open('non-existent-file')
... print('after')
before
"""
if unacceptable is None:
unacceptable = ()
elif isinstance(unacceptable, tuple):
unacceptable += UNACCEPTABLE_EXCEPTIONS
else:
unacceptable = (unacceptable,) + UNACCEPTABLE_EXCEPTIONS
try:
yield
except unacceptable as exc:
raise
except acceptable as exc:
if pred and not pred(exc):
raise
raise_if_async_exception(exc)
_logger.log(log_level, msg.format(exc=exc, type=exc.__class__.__qualname__))
if log_level > logging.DEBUG:
_logger.debug("Traceback:", exc_info=True) | 34,563 |
def graph_to_json(obj: Graph) -> Dict[str, Any]:
"""
Uses regular serialization but excludes "operator" field to rid of circular references
"""
serialized_obj = {
k: v
for k, v in any_to_json(obj).items()
if k != 'operator' # to prevent circular reference
}
return serialized_obj | 34,564 |
def log_ttest_vs_basal(df, basal_key):
"""Do t-tests in log space to see if sequences has the same activity as basal.
Parameters
----------
df : pd.DataFrame
Index is sequence ID, columns are average RNA/DNA barcode counts for each replicate.
basal_key : str
Index value for basal.
Returns
-------
pvals : pd.Series
p-value for t-test of the null hypothesis that the log activity of a sequence is the same as that of basal.
Does not include a p-value for basal.
"""
log_params = df.apply(_get_lognormal_params, axis=1)
# Pull out basal params
basal_mean, basal_std, basal_n = log_params.loc[basal_key]
# Drop basal from the df
log_params = log_params.drop(index=basal_key)
# Do t-tests on each row
pvals = log_params.apply(lambda x: stats.ttest_ind_from_stats(basal_mean, basal_std, basal_n,
x["mean"], x["std"], x["n"],
equal_var=False)[1],
axis=1)
return pvals | 34,565 |
def spleen_lymph_cite_seq(
save_path: str = "data/",
protein_join: str = "inner",
remove_outliers: bool = True,
run_setup_anndata: bool = True,
) -> anndata.AnnData:
"""
Immune cells from the murine spleen and lymph nodes [GayosoSteier21]_.
This dataset was used throughout the totalVI manuscript, and named SLN-all.
Parameters
----------
save_path
Location to use when saving/loading the data.
protein_join
Whether to take an inner join or outer join of proteins
remove_outliers
Whether to remove clusters annotated as doublet or low quality
run_setup_anndata
If true, runs setup_anndata() on dataset before returning
Returns
-------
AnnData with batch info (``.obs['batch']``), label info (``.obs['cell_types']``),
protein expression (``.obsm["protein_expression"]``), and tissue (``.obs['tissue']``).
Missing protein values are zero, when ``protein_join == "outer`` and are identified during ``AnnData`` setup.
Examples
--------
>>> import scvi
>>> adata = scvi.data.spleen_lymph_cite_seq()
"""
return _load_spleen_lymph_cite_seq(
save_path=save_path,
protein_join=protein_join,
remove_outliers=remove_outliers,
run_setup_anndata=run_setup_anndata,
) | 34,566 |
def enable_packet_aging(duthost):
"""
Enable packet aging feature (only on MLNX switches)
Args:
duthost (AnsibleHost): Device Under Test (DUT)
Returns:
N/A
"""
if isMellanoxDevice(duthost):
duthost.copy(src="qos/files/mellanox/packets_aging.py", dest="/tmp")
duthost.command("docker cp /tmp/packets_aging.py syncd:/")
duthost.command("docker exec syncd python /packets_aging.py enable")
duthost.command("docker exec syncd rm -rf /packets_aging.py") | 34,567 |
def test_remote_constructor_valid_ssh(mock_key, valid_ssh_conn):
"""Validate Remote command runner SSH connections strings."""
valid = valid_ssh_conn
runner = Remote(environment=valid[0])
assert runner.user == valid[1]
assert runner.host == valid[2]
assert runner.port == valid[3] | 34,568 |
def getInputs(path, sequenceNames):
"""Requires setting SON_TRACE_DATASETS variable and having access to datasets.
"""
seqPath = os.path.join(TestStatus.getPathToDataSets(), path)
sequences = [ os.path.join(seqPath, sequence) for sequence in sequenceNames ] #Same order as tree
newickTreeString = parseNewickTreeFile(os.path.join(path, "tree.newick"))
return sequences, newickTreeString | 34,569 |
def save_model(model, model_filepath):
"""
Export the model as a pickle file
Args:
model: sklearn.model_selection.GridSearchCV.
model_filepath: String. location to save the trained model
"""
with open(model_filepath, 'wb') as file:
pickle.dump(model, file) | 34,570 |
def strftime_local(aware_time, fmt="%Y-%m-%d %H:%M:%S"):
"""
格式化aware_time为本地时间
"""
if not aware_time:
# 当时间字段允许为NULL时,直接返回None
return None
if timezone.is_aware(aware_time):
# translate to time in local timezone
aware_time = timezone.localtime(aware_time)
return aware_time.strftime(fmt) | 34,571 |
def process_message(ws, message_json):
""" Parse at high level and output JSON of message """
message_type = message_json['Type']
if message_type == "Refresh":
if 'Domain' in message_json:
message_domain = message_json['Domain']
if message_domain == "Login":
process_login_response(ws, message_json)
elif message_domain == "MarketByPrice":
process_mbp_response(message_json, True)
if message_type == "Update":
process_mbp_response(message_json, False)
elif message_type == "Ping":
pong_json = { 'Type':'Pong' }
ws.send(json.dumps(pong_json))
print("SENT:")
print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':'))) | 34,572 |
def exclude(community, index_name, facets):
"""Exclude some facets on a given index for a given community."""
community = OARepoCommunity.get_community(community)
_validate_facets(index_name=index_name, facets=facets)
with db.session.begin_nested():
community.json.setdefault('excluded_facets', {})
community.json['excluded_facets'] = {index_name: facets}
flag_modified(community, 'json')
db.session.add(community)
db.session.commit()
click.secho(f'Excluded: {",".join(facets)} on index {index_name} for {community.title}', fg='green') | 34,573 |
def filter_issues_fixed_by_prs(issues, prs, show_related_prs, show_related_issues):
"""
Find related issues to prs and prs to issues that are fixed.
This adds extra information to the issues and prs listings.
"""
words = [
'close', 'closes', 'fix', 'fixes', 'fixed', 'resolve', 'resolves',
'resolved'
]
pattern = re.compile(
r'(?P<word>' + r'|'.join(words) + r') '
r'((?P<repo>.*?)#(?P<number>\d*)|(?P<full_repo>.*)/(?P<number_2>\d*))',
re.IGNORECASE, )
issue_pr_map = {}
pr_issue_map = {}
for pr in prs:
is_pr = bool(pr.get('pull_request'))
if is_pr:
pr_url = pr.html_url
pr_number = pr.number
user = pr.user
repo_url = pr_url.split('/pull/')[0] + '/issues/'
pr_issue_map[pr_url] = []
body = pr.body or ''
# Remove blanks and markdown comments
if body:
lines = body.splitlines()
no_comments = [l for l in lines
if (l and not l.startswith("<!---"))]
body = '\n'.join(no_comments)
for matches in pattern.finditer(body):
dic = matches.groupdict()
issue_number = dic['number'] or dic['number_2'] or ''
repo = dic['full_repo'] or dic['repo'] or repo_url
# Repo name can't have spaces.
if ' ' not in repo:
# In case spyder-ide/loghub#45 was for example used
if 'http' not in repo:
repo = 'https://github.com/' + repo
if '/issues' not in repo:
issue_url = repo + '/issues/' + issue_number
elif repo.endswith('/') and issue_number:
issue_url = repo + issue_number
elif issue_number:
issue_url = repo + '/' + issue_number
else:
issue_url = None
else:
issue_url = None
# Set the issue data
issue_data = {'url': pr_url, 'text': pr_number, 'user': user}
if issue_url is not None:
if issue_number in issue_pr_map:
issue_pr_map[issue_url].append(issue_data)
else:
issue_pr_map[issue_url] = [issue_data]
pr_data = {'url': issue_url, 'text': issue_number}
pr_issue_map[pr_url].append(pr_data)
if show_related_issues:
pr['loghub_related_issues'] = pr_issue_map[pr_url]
for issue in issues:
issue_url = issue.html_url
if issue_url in issue_pr_map and show_related_prs:
issue['loghub_related_pulls'] = issue_pr_map[issue_url]
# Now sort the numbers in descending order
for issue in issues:
related_pulls = issue.get('loghub_related_pulls', [])
related_pulls = sorted(
related_pulls, key=lambda p: p['url'], reverse=True)
issue['loghub_related_pulls'] = related_pulls
for pr in prs:
related_issues = pr.get('loghub_related_issues', [])
related_issues = sorted(
related_issues, key=lambda i: i['url'], reverse=True)
pr['loghub_related_issues'] = related_issues
return issues, prs | 34,574 |
def run_job(answer: str, job: dict, grade: float, feedback: str):
"""
Match answer to regex inside job dictionary.
Add weight to grade if successful, else add comment to feedback.
:param answer: Answer.
:param job: Dictionary with regex, weight, and comment.
:param grade: Current grade for the answer.
:param feedback: Current feedback for the answer.
:return: Modified answer, grade, and feedback.
"""
match = re.search(job["regex"], answer)
if match:
grade += job["weight"]
answer = answer.replace(match[0], "", 1)
else:
feedback += job["comment"] + "\n"
return answer, grade, feedback | 34,575 |
def __build_data__(feature, qars):
"""
Return all the data needed to build the Benin republic departments Layer
"""
data = {
'qars': qars,
}
# GEOJSON layer consisting of a single feature
department_name = feature["properties"]["NAME_1"]
data["department"] = department_name
data["predictions"] = data_dictionary[feature["properties"]["NAME_0"]][feature["properties"]["NAME_1"]][
"properties"]
z_list = []
# looping through all departments in Benin Repubic to get the ranking
for d in range(len(DeptSatellite.objects.all())):
y = DeptSatellite.objects.all()[d].department
x = CommuneSatellite.objects.filter(department=y).aggregate(Sum('cashew_tree_cover'))
x = x['cashew_tree_cover__sum']
z_list.append((y, x))
sorted_by_second = sorted(z_list, reverse=True, key=lambda tup: tup[1])
list1, _ = zip(*sorted_by_second)
# A small logic to solve the French symbols department error when viewed on local host
if heroku:
position = list1.index(department_name)
else:
position = 1
data["position"] = position
my_dict = {'0': "highest", '1': "2nd", '2': "3rd", '3': "4th", '4': "5th", '5': "6th", '6': "7th", '7': "8th",
'8': "9th", '9': "10th", '10': "11th", '11': "lowest"}
data["my_dict"] = my_dict
pred_dept_data = []
pred_ground_dept_data = [['Communes', 'Satellite Prediction', 'Ground Data Estimate']]
for c in CommuneSatellite.objects.filter(department=department_name):
y = c.commune
x = round(c.cashew_tree_cover / 10000, 2)
pred_dept_data.append([y, x])
pred_ground_dept_data.append([y, x, x])
data["pred_dept_data"] = pred_dept_data
data["pred_ground_dept_data"] = pred_ground_dept_data
# load statistics from the database and formating them for displaying on popups.
# The try catch is to avoid error that arise when we round null values
tree_ha_pred_dept = CommuneSatellite.objects.filter(department=department_name).aggregate(Sum('cashew_tree_cover'))
try:
tree_ha_pred_dept = int(round(tree_ha_pred_dept['cashew_tree_cover__sum'] / 10000, 2))
except Exception as e:
tree_ha_pred_dept = 0
data["tree_ha_pred_dept"] = tree_ha_pred_dept
surface_area_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('surface_area'))
try:
surface_area_d = int(round(surface_area_d['surface_area__sum'], 2))
except Exception as e:
surface_area_d = 0
data["surface_area_d"] = surface_area_d
total_yield_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_yield_kg'))
try:
total_yield_d = int(round(total_yield_d['total_yield_kg__sum'], 2))
except Exception as e:
total_yield_d = 0
data["total_yield_d"] = total_yield_d
yield_ha_d = BeninYield.objects.filter(department=department_name).aggregate(Avg('total_yield_per_ha_kg'))
try:
yield_ha_d = int(round(yield_ha_d['total_yield_per_ha_kg__avg'], 2))
except Exception as e:
yield_ha_d = 0
data["yield_ha_d"] = yield_ha_d
# Used only in case of error in the try and except catch
yield_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Avg('total_yield_per_tree_kg'))
try:
yield_tree_d = int(round(yield_tree_d['total_yield_per_tree_kg__avg'], 2))
except Exception as e:
yield_tree_d = 0
data["yield_tree_d"] = yield_tree_d
num_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_number_trees'))
try:
num_tree_d = int(num_tree_d['total_number_trees__sum'])
except Exception as e:
num_tree_d = 0
data["num_tree_d"] = num_tree_d
sick_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_sick_trees'))
try:
sick_tree_d = int(sick_tree_d['total_sick_trees__sum'])
except Exception as e:
sick_tree_d = 0
data["sick_tree_d"] = sick_tree_d
out_prod_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_trees_out_of_prod'))
try:
out_prod_tree_d = int(out_prod_tree_d['total_trees_out_of_prod__sum'])
except Exception as e:
out_prod_tree_d = 0
data["out_prod_tree_d"] = out_prod_tree_d
dead_tree_d = BeninYield.objects.filter(department=department_name).aggregate(Sum('total_dead_trees'))
try:
dead_tree_d = int(round(dead_tree_d['total_dead_trees__sum'], 2))
except Exception as e:
dead_tree_d = 0
data["dead_tree_d"] = dead_tree_d
region_size_d = area(feature['geometry']) / 10000
try:
active_trees_d = num_tree_d - sick_tree_d - out_prod_tree_d - dead_tree_d
except Exception as e:
active_trees_d = 0
data["active_trees_d"] = active_trees_d
try:
r_tree_ha_pred_dept = round(tree_ha_pred_dept, 1 - int(
floor(log10(abs(tree_ha_pred_dept))))) if tree_ha_pred_dept < 90000 else round(tree_ha_pred_dept,
2 - int(floor(log10(
abs(tree_ha_pred_dept)))))
except Exception as e:
r_tree_ha_pred_dept = tree_ha_pred_dept
data["r_tree_ha_pred_dept"] = r_tree_ha_pred_dept
try:
r_surface_area_d = round(surface_area_d,
1 - int(floor(log10(abs(surface_area_d))))) if surface_area_d < 90000 else round(
surface_area_d, 2 - int(floor(log10(abs(surface_area_d)))))
except Exception as e:
r_surface_area_d = surface_area_d
data["r_surface_area_d"] = r_surface_area_d
try:
r_total_yield_d = round(total_yield_d,
1 - int(floor(log10(abs(total_yield_d))))) if total_yield_d < 90000 else round(
total_yield_d, 2 - int(floor(log10(abs(total_yield_d)))))
except Exception as e:
r_total_yield_d = total_yield_d
data["r_total_yield_d"] = r_total_yield_d
try:
r_yield_ha_d = round(yield_ha_d, 1 - int(floor(log10(abs(yield_ha_d))))) if yield_ha_d < 90000 else round(
yield_ha_d, 2 - int(floor(log10(abs(yield_ha_d)))))
except Exception as e:
r_yield_ha_d = yield_ha_d
data["r_yield_ha_d"] = r_yield_ha_d
try:
yield_pred_dept = int(r_yield_ha_d * tree_ha_pred_dept)
except Exception as e:
yield_pred_dept = 0
data["yield_pred_dept"] = yield_pred_dept
try:
r_yield_pred_dept = round(yield_pred_dept, 1 - int(
floor(log10(abs(yield_pred_dept))))) if yield_pred_dept < 90000 else round(yield_pred_dept, 2 - int(
floor(log10(abs(yield_pred_dept)))))
except Exception as e:
r_yield_pred_dept = yield_pred_dept
data["r_yield_pred_dept"] = r_yield_pred_dept
try:
r_yield_tree_d = round(r_total_yield_d / active_trees_d)
except Exception as e:
r_yield_tree_d = yield_tree_d
data["r_yield_tree_d"] = r_yield_tree_d
try:
r_num_tree_d = round(num_tree_d, 1 - int(floor(log10(abs(num_tree_d))))) if num_tree_d < 90000 else round(
num_tree_d, 2 - int(floor(log10(abs(num_tree_d)))))
except Exception as e:
r_num_tree_d = num_tree_d
data["r_num_tree_d"] = r_num_tree_d
try:
r_region_size_d = round(region_size_d,
1 - int(floor(log10(abs(region_size_d))))) if region_size_d < 90000 else round(
region_size_d, 2 - int(floor(log10(abs(region_size_d)))))
except Exception as e:
r_region_size_d = region_size_d
data["r_region_size_d"] = r_region_size_d
return data | 34,576 |
def hook(callback):
"""
Installs a global listener on all available mouses, invoking `callback`
each time it is moved, a key status changes or the wheel is spun. A mouse
event is passed as argument, with type either `mouse.ButtonEvent`,
`mouse.WheelEvent` or `mouse.MoveEvent`.
Returns the given callback for easier development.
"""
_listener.add_handler(callback)
return callback | 34,577 |
def notification_list(request):
"""
returns the notification list
"""
notifications = Notification.get_notifications(user=request.user)
return {"notifications": notifications} | 34,578 |
def filter_samples_by_detected_language_via_langid(
samples_iterator: Iterator[Sample],
lang_code: str,
) -> Iterator[Sample]:
"""Return sample documents whose language detected by langid matches the expected language.
Documents are converted to a simple text via the method
`slub_docsa.data.preprocess.document.document_as_concatenated_string`.
Parameters
----------
samples_iterator: Iterator[Sample]
an iterator over samples that is being filtered
lang_code: str
the expected language
Returns
-------
Iterator[Sample]
an iterator over samples only including samples that match the expected language
"""
def condition(sample: Sample) -> bool:
text = document_as_concatenated_string(sample.document)
if text is not None:
detected_lang_code = detect_language_from_text_via_langid(text)
if detected_lang_code != lang_code:
logger.debug(
"document '%s' with unexpected detected language of '%s'",
sample.document.uri,
detected_lang_code
)
logger.debug("document text begins with: %s", text[:100])
return False
return True
return False
return filter_samples_by_condition(samples_iterator, condition) | 34,579 |
async def test_state_update(hass):
"""Test water heater is updated accordingly to data."""
assert await setup_multimatic(hass)
_assert_state(hass, OperatingModes.AUTO, HotWater.MIN_TARGET_TEMP, 45, "off")
dhw = SystemManagerMock.data["get_dhw"]
SystemManagerMock.data["DomesticHotWaterTankTemperature"].value = 65
dhw.hotwater.operating_mode = OperatingModes.ON
dhw.hotwater.target_high = 45
await goto_future(hass)
_assert_state(hass, OperatingModes.ON, 45, 65, "off") | 34,580 |
def predictClass(x, mus, sigmas, X_train, number_of_classes, class_probabilities):
"""
For every model, it calculates the likelihood for each class, and picks the class with max likelihood.
:param x: The datapoint we want to derive the class for.
:param mus: A list with the mean vector for each method. First three are for first class, next three for
second class, etc.
:param sigmas: A list with the covariance matrix for each method. Same as mus.
:param X_train: The train set - needed for Parzen Windows method.
:param number_of_classes: The number of different classes in the dataset.
:param class_probabilities: An array with the probability of each class.
:return: A vector with the predicted classes by each model.
"""
predictions = []
# For the parametric methods
number_of_models = int(len(mus) / 2)
for i in range(0, number_of_models):
method_likelihoods = []
for j in range(number_of_classes):
index = i + j * number_of_models # the index will "jump" over the other methds in the lists.
prob = gaussian(x, mus[index], sigmas[index]) * class_probabilities[j] # The beyes classifier rule
method_likelihoods.append(prob)
predictions.append(np.argmax(method_likelihoods))
# For the non-parametric method
method_likelihoods = []
for j in range(number_of_classes):
sumlog_pi = question_d(X_train, x)
p_i = sumlog_pi * class_probabilities[j] # The beyes classifier rule
method_likelihoods.append(p_i)
predictions.append(np.argmax(method_likelihoods))
return predictions | 34,581 |
def create_secret_id(vault, name, version=None):
"""
:param vault: The vault uri.
:type vault: str
:param name: The secret name.
:type name: str
:param version: The secret version.
:type version: str
:rtype: KeyVaultId
"""
return create_object_id('secrets', vault, name, version) | 34,582 |
def test_keyword__Table__3(address_book, KeywordFactory, browser):
"""A visitor is allowed to see the keywords in the `Table`."""
KeywordFactory(address_book, u'Arbeit')
browser.login('visitor')
browser.open(browser.KEYWORDS_LIST_URL)
assert ['Arbeit'] == browser.etree.xpath('//tbody/tr/td/a/text()') | 34,583 |
def config_output_page():
"""
Configuration landing page
:return: config.html
"""
config_type = "output"
c = ConfigFile()
# First load in all the configuration from the provided configuration file, if it exists
c.load_from_file(DEFAULT_CONFIG_FILE)
cdb = c.get_cdb()
cdb.update_path(config_type)
docs = cdb.get_all()
outputs = []
for doc in docs:
i = c.get_output_from_data(doc)
outputs.append(i)
output_types = c.get_outputs_available()
config_descr = """
Outputs act as stores - seperate from the local database - for host information
"""
return render_template('config.html', items=outputs, config_type=config_type, config_descr=config_descr, item_types=output_types) | 34,584 |
def compute_all_aggregator_metrics(
per_plan_confidences: np.ndarray,
predictions: np.ndarray,
ground_truth: np.ndarray,
metric_name: Optional[str] = None
):
"""Batch size B, we assume consistent number of predictions D per scene.
per_plan_confidences: np.ndarray, shape (B, D), we assume that all
prediction requests have the same number of proposed plans here.
predictions: np.ndarray, shape (B, D, T, 2)
ground_truth: np.ndarray, shape (B, T, 2), there is only one
ground_truth trajectory for each prediction request.
metric_name: Optional[str], if specified, compute a particular metric only.
"""
metrics_dict = defaultdict(list)
if metric_name is None:
base_metrics = VALID_BASE_METRICS
else:
base_metrics = []
for metric in VALID_BASE_METRICS:
if metric.upper() in metric_name:
base_metrics.append(metric)
if not base_metrics:
raise ValueError(f'Invalid metric name {metric_name} specified.')
if metric_name is None:
aggregators = VALID_AGGREGATORS
else:
aggregators = []
for agg in VALID_AGGREGATORS:
if agg in metric_name:
aggregators.append(agg)
if not aggregators:
raise ValueError(f'Invalid metric name {metric_name} specified.')
for base_metric_name in base_metrics:
if base_metric_name == 'ade':
base_metric = average_displacement_error
elif base_metric_name == 'fde':
base_metric = final_displacement_error
else:
raise NotImplementedError
# For each prediction request:
for index, (req_preds, req_gt, req_plan_confs) in enumerate(
zip(predictions, ground_truth, per_plan_confidences)):
req_plan_losses = base_metric(
predicted=req_preds, ground_truth=req_gt)
for aggregator in aggregators:
metric_key = f'{aggregator}{base_metric_name.upper()}'
metrics_dict[metric_key].append(
aggregate_prediction_request_losses(
aggregator=aggregator,
per_plan_losses=req_plan_losses,
per_plan_weights=_softmax_normalize(req_plan_confs)))
metrics_dict = {
key: np.stack(values) for key, values in metrics_dict.items()}
return metrics_dict | 34,585 |
def query_url_base(_url, _proxy=True, _isPC=True, _isPhone=False):
""" 基于requset的模块,不能采集动态网页数据
:param _url<str>
:param _proxy<bool>
:param _isPc<bool>
:param _isPhone<bool>
:return _result<dict>
"""
_result = {}
_headers = {'Connection':'kepp-alive'}
if _isPC:
_headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36'
elif _isPhone:
_headers['User-Agent'] = 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'
_ip_url = 'https://restapi.amap.com/v3/ip?output=json&key=880b9655c8c084258bfbedf98145a936'
_proxy = {
'http': 'socks5://127.0.0.1:1080',
'https': 'socks5://127.0.0.1:1080',
} if _proxy else None
_pattern_dict = {
'title': r"<(title|TITLE)>(?P<title>[^<>]+)</(title|TITLE)>"}
# print(requests.get(_ip_url, proxies=_proxy).json())
response = requests.post(_url, proxies=_proxy, headers=_headers, verify=False, timeout=30)
content = response.text
for k,v in _pattern_dict.items():
_match = re.search(v, content)
if not re.match: continue
_result[k] = _match.groupdict()[k]
_result['text'] = html2text(content)
return _result | 34,586 |
def timefstring(dtobj, tz_name=True):
"""Standardize the format used for timestamp string format.
Include 3 letter string for timezone if set to True.
"""
if tz_name:
return f'{dtobj.strftime("%Y-%m-%d_%H:%M:%S%Z")}'
else:
return f'{dtobj.strftime("%Y-%m-%d_%H:%M:%S")}NTZ' | 34,587 |
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (ShuffleUnit, )):
return True
return False | 34,588 |
def run(job_input: IJobInput):
"""
Function named `run` is required in order for a python script to be recognized as a Data Job Python step and executed.
VDK provides to every python step an object - job_input - that has methods for:
* executing queries to OLAP Database;
* ingesting data into a database;
* processing data into a database.
See IJobInput documentation for more details.
"""
log.info(f"Starting job step {__name__}")
# Write your python code inside here ... for example:
job_input.send_object_for_ingestion(
payload=dict(id="Hello World!"), destination_table="hello_world"
) | 34,589 |
def add_data(
dates=None,
product="AOD15",
*,
inv_type=None,
latlonbox=None,
siteid=None,
daily=False,
lunar=False,
#
# post-proc
freq=None,
detect_dust=False,
interp_to_aod_values=None,
#
# joblib
n_procs=1,
verbose=10,
):
"""Load AERONET data from the AERONET Web Service.
Parameters
----------
dates : array-like of datetime-like
Expressing the desired min and max dates to retrieve.
If unset, the current day will be fetched.
product : str
inv_type : str
Inversion product type.
latlonbox : array-like of float
``[lat1, lon1, lat2, lon2]``,
where ``lat1, lon1`` is the lower-left corner
and ``lat2, lon2`` is the upper-right corner.
siteid : str
Site identifier string.
See https://aeronet.gsfc.nasa.gov/aeronet_locations_v3.txt for all valid site IDs.
.. warning::
Whether you will obtain data depends on the sites active
during the `dates` time period.
.. note::
`siteid` takes precendence over `latlonbox`
if both are specified.
daily : bool
Load daily averaged data.
lunar : bool
Load provisional lunar "Direct Moon" data instead of the default "Direct Sun".
Only for non-inversion products.
freq : str
Frequency used to resample the DataFrame.
detect_dust : bool
interp_to_aod_values : array-like of float
Values to interpolate AOD values to.
Currently requires pytspack.
n_procs : int
For joblib.
verbose : int
For joblib.
Returns
-------
pandas.DataFrame
"""
a = AERONET()
if interp_to_aod_values is not None:
interp_to_aod_values = np.asarray(interp_to_aod_values)
kwargs = dict(
product=product,
inv_type=inv_type,
latlonbox=latlonbox,
siteid=siteid,
daily=daily,
lunar=lunar,
detect_dust=detect_dust,
interp_to_aod_values=interp_to_aod_values,
)
requested_parallel = n_procs > 1 or n_procs == -1
if has_joblib and requested_parallel:
# Split up by day
min_date = dates.min()
max_date = dates.max()
days = pd.date_range(start=min_date, end=max_date, freq="D") # TODO: subtract 1?
days1 = days + pd.Timedelta(days=1)
dfs = Parallel(n_jobs=n_procs, verbose=verbose)(
delayed(_parallel_aeronet_call)(pd.DatetimeIndex([d1, d2]), **kwargs, freq=None)
for d1, d2 in zip(days, days1)
)
df = pd.concat(dfs, ignore_index=True).drop_duplicates()
if freq is not None:
df.index = df.time
df = df.groupby("siteid").resample(freq).mean().reset_index()
return df.reset_index(drop=True)
else:
if not has_joblib and requested_parallel:
print(
"Please install joblib to use the parallel feature of monetio.aeronet. "
"Proceeding in serial mode..."
)
df = a.add_data(
dates=dates,
**kwargs,
freq=freq,
)
return df | 34,590 |
def create_tiled_cogs(
input_file: str,
output_directory: str,
raise_on_fail: bool = True,
) -> None:
"""Split tiff into tiles and create COGs
Args:
input_path (str): Path to the World Climate data.
output_directory (str): The directory to which the COG will be written.
raise_on_fail (bool, optional): Whether to raise error on failure.
Defaults to True.
Returns:
None
"""
logger.info(f"Retiling {input_file}")
try:
with TemporaryDirectory() as tmp_dir:
cmd = [
"gdal_retile.py",
"-ps",
str(TILING_PIXEL_SIZE[0]),
str(TILING_PIXEL_SIZE[1]),
"-targetDir",
tmp_dir,
input_file,
]
try:
output = check_output(cmd)
except CalledProcessError as e:
output = e.output
raise
finally:
logger.info(f"output: {str(output)}")
file_names = glob(f"{tmp_dir}/*.tif")
for f in file_names:
input_file = os.path.join(tmp_dir, f)
output_file = os.path.join(output_directory,
os.path.basename(f))
with rasterio.open(input_file, "r") as dataset:
contains_data = dataset.read().any()
# Exclude empty files
if contains_data:
create_cog(input_file, output_file, raise_on_fail, False)
except Exception:
logger.error("Failed to process {}".format(input_file))
if raise_on_fail:
raise
return | 34,591 |
def _func(*args, **kwargs):
"""Test function used in some tests."""
return args, kwargs | 34,592 |
def combine_parallel_circuits(IVprev_cols, pvconst):
"""
Combine crosstied circuits in a substring
:param IVprev_cols: lists of IV curves of crosstied and series circuits
:return:
"""
# combine crosstied circuits
Irows, Vrows = [], []
Isc_rows, Imax_rows = [], []
for IVcols in zip(*IVprev_cols):
Iparallel, Vparallel = zip(*IVcols)
Iparallel = np.asarray(Iparallel)
Vparallel = np.asarray(Vparallel)
Irow, Vrow = pvconst.calcParallel(
Iparallel, Vparallel, Vparallel.max(),
Vparallel.min()
)
Irows.append(Irow)
Vrows.append(Vrow)
Isc_rows.append(np.interp(np.float64(0), Vrow, Irow))
Imax_rows.append(Irow.max())
Irows, Vrows = np.asarray(Irows), np.asarray(Vrows)
Isc_rows = np.asarray(Isc_rows)
Imax_rows = np.asarray(Imax_rows)
return pvconst.calcSeries(
Irows, Vrows, Isc_rows.mean(), Imax_rows.max()
) | 34,593 |
def update_symlinks_generic(dirname, attr, values):
"""dirname should be without "_data"; wd should be at project root"""
for v in values:
p = Path(os.path.join(attr, v))
p.mkdir(exist_ok=True)
dst = os.path.join(p, dirname)
if os.path.exists(dst):
os.unlink(dst)
os.symlink(os.path.join("..", "..", "_data", dirname), dst) | 34,594 |
def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str,
module_file: str, serving_model_dir: str,
metadata_path: str) -> tfx.dsl.Pipeline:
"""Creates a three component penguin pipeline with TFX."""
# Brings data into the pipeline.
example_gen = tfx.components.CsvExampleGen(input_base=data_root)
# Uses user-provided Python function that trains a model.
trainer = tfx.components.Trainer(
module_file=module_file,
examples=example_gen.outputs['examples'],
train_args=tfx.proto.TrainArgs(num_steps=100),
eval_args=tfx.proto.EvalArgs(num_steps=5))
# NEW: Get the latest blessed model for Evaluator.
model_resolver = tfx.dsl.Resolver(
strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy,
model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model),
model_blessing=tfx.dsl.Channel(
type=tfx.types.standard_artifacts.ModelBlessing)).with_id(
'latest_blessed_model_resolver')
# NEW: Uses TFMA to compute evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key='species')],
slicing_specs=[
# An empty slice spec means the overall slice, i.e. the whole dataset.
tfma.SlicingSpec(),
# Calculate metrics for each penguin species.
tfma.SlicingSpec(feature_keys=['species']),
],
metrics_specs=[
tfma.MetricsSpec(per_slice_thresholds={
'sparse_categorical_accuracy':
tfma.config.PerSliceMetricThresholds(thresholds=[
tfma.PerSliceMetricThreshold(
slicing_specs=[tfma.SlicingSpec()],
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
)]),
})],
)
evaluator = tfx.components.Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = tfx.components.Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'], # Pass an evaluation result.
push_destination=tfx.proto.PushDestination(
filesystem=tfx.proto.PushDestination.Filesystem(
base_directory=serving_model_dir)))
components = [
example_gen,
trainer,
# Following two components were added to the pipeline.
model_resolver,
evaluator,
pusher,
]
return tfx.dsl.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
metadata_connection_config=tfx.orchestration.metadata
.sqlite_metadata_connection_config(metadata_path),
components=components) | 34,595 |
def mask_conv2d(module, c_in, c_out):
"""Mask conv2d."""
if not isinstance(module, torch.nn.Conv2d):
return
mask_conv2d_in_channels(module, c_in)
mask_conv2d_out_channels(module, c_out) | 34,596 |
def create_global_step() -> tf.Variable:
"""Creates a `tf.Variable` suitable for use as a global step counter.
Creating and managing a global step variable may be necessary for
`AbstractTrainer` subclasses that perform multiple parameter updates per
`Controller` "step", or use different optimizers on different steps.
In these cases, an `optimizer.iterations` property generally can't be used
directly, since it would correspond to parameter updates instead of iterations
in the `Controller`'s training loop. Such use cases should simply call
`step.assign_add(1)` at the end of each step.
Returns:
A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the
first replica's value retained when synchronizing across replicas in
a distributed setting.
"""
return tf.Variable(
0,
dtype=tf.int64,
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) | 34,597 |
def supported_coins_balance(balance, tickers):
"""
Return the balance with non-supported coins removed
"""
supported_coins_balance = {}
for coin in balance.keys():
if coin != "BTC":
if f"{coin}/BTC" in tickers:
supported_coins_balance[coin] = balance[coin]
else:
try:
supported_coins_balance["BTC"] = balance[coin]
except KeyError:
print("BTC not in balance")
return supported_coins_balance | 34,598 |
def _identity_map(size):
"""Function returning list of lambdas mapping vector to itself."""
return [lambda x, id: x[id] for _ in range(size)] | 34,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.