content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _GetDatabaseLookupFunction(filename, flaps, omega_hat, thrust_coeff):
"""Produces a lookup function from an aero database file."""
db = load_database.AeroDatabase(filename)
def _Lookup(alpha, beta, dflaps=None, domega=None):
if dflaps is None:
dflaps = np.zeros((system_types.kNumFlaps,))
if domega is None:
domega = np.zeros((3,))
return db.CalcFMCoeff(alpha, beta, flaps + dflaps, omega_hat + domega,
thrust_coeff)
return _Lookup, db.format
| 5,343,800
|
def parse_copy_core_dump(raw_result):
"""
Parse the 'parse_copy_core_dump' command raw output.
:param str raw_result: copy core-dump raw result string.
:rtype: dict
:return: The parsed result of the copy core-dump to server:
::
{
0:{
'status': 'success'
'reason': 'core dump copied'
}
}
"""
if "Error code " in raw_result:
return {"status": "failed", "reason": "Error found while coping"}
if "No coredump found for" in raw_result:
return {"status": "failed", "reason": "no core dump found"}
if "Failed to validate instance ID" in raw_result:
return {"status": "failed", "reason": "instance ID not valid"}
if "ssh: connect to host" in raw_result:
return {"status": "failed", "reason": "ssh-connection issue for SFTP"}
if (
"copying ..." in raw_result and
"Sent " in raw_result and
"bytes" in raw_result and
"seconds" in raw_result
):
return {"status": "success", "reason": "core dump copied"}
else:
return {"status": "failed", "reason": "undefined error"}
| 5,343,801
|
def _handle_requirements(hass, component, name):
"""Install the requirements for a component."""
if hass.config.skip_pip or not hasattr(component, 'REQUIREMENTS'):
return True
for req in component.REQUIREMENTS:
if not pkg_util.install_package(req, target=hass.config.path('lib')):
_LOGGER.error('Not initializing %s because could not install '
'dependency %s', name, req)
return False
return True
| 5,343,802
|
def info_fits(fitsfilename, **kwargs):
"""
Print the information about a fits file.
Parameters
----------
fitsfilename : str
Path to the fits file.
**kwargs: optional
Optional arguments to the astropy.io.fits.open() function. E.g.
"output_verify" can be set to ignore, in case of non-standard header.
"""
with ap_fits.open(fitsfilename, memmap=True, **kwargs) as hdulist:
hdulist.info()
| 5,343,803
|
def ticket_id_url(workspace, number):
"""
The url for a specific ticket in a specific workspace
:param workspace: The workspace
:param number: The number of the ticket
:return: The url to fetch that specific ticket
"""
return basic_url + ' spaces/' + workspace + '/tickets/' + number + '.json'
| 5,343,804
|
def get_cifar10(data_path):
"""Returns cifar10 dataset.
Args:
data_path: dataset location.
Returns:
tuple (training instances, training labels,
testing instances, testing labels)
Instances of dimension # of instances X dimension.
"""
x_train = np.zeros((50000, 3072))
y_train = np.zeros((50000,), dtype=int)
x_val = np.zeros((10000, 3072))
y_val = np.zeros((10000,), dtype=int)
cur = 0
for batch_index in range(1, 6):
with tf.gfile.Open(
os.path.join(data_path,
"cifar-10-batches-py/data_batch_%d" % batch_index),
"rb") as fo:
batch_data = cPickle.load(fo)
m = batch_data["data"].shape[0]
x_train[cur:cur + m, :] = batch_data["data"].astype(np.float32)
y_train[cur:cur + m] = np.array(batch_data["labels"])
cur += m
assert cur == 50000
with tf.gfile.Open(
os.path.join(data_path, "cifar-10-batches-py/test_batch"), "rb") as fo:
batch_data = cPickle.load(fo)
x_val = batch_data["data"].astype(np.float32)
y_val = np.array(batch_data["labels"])
x_train /= 255.0
x_val /= 255.0
return (x_train, y_train, x_val, y_val)
| 5,343,805
|
def compute_lifting_parameter(lamb, lambda_plane_idxs, lambda_offset_idxs, cutoff):
"""One way to compute a per-particle "4D" offset in terms of an adjustable lamb and
constant per-particle parameters.
Notes
-----
(ytz): this initializes the 4th dimension to a fixed plane adjust by an offset
followed by a scaling by cutoff.
lambda_plane_idxs are typically 0 or 1 and allows us to turn off an interaction
independent of the lambda value.
lambda_offset_idxs are typically 0 and 1, and allows us to adjust the w coordinate
in a lambda-dependent way.
"""
w = cutoff * (lambda_plane_idxs + lambda_offset_idxs * lamb)
return w
| 5,343,806
|
def create_controller():
"""
1. Check the token
2. Call the worker method
"""
minimum_buffer_min = 3
if views.ds_token_ok(minimum_buffer_min):
# 2. Call the worker method
# More data validation would be a good idea here
# Strip anything other than characters listed
pattern = re.compile('([^\w \-\@\.\,])+')
signer_email = pattern.sub('', request.form.get('signer_email'))
signer_name = pattern.sub('', request.form.get('signer_name'))
cc_email = pattern.sub('', request.form.get('cc_email'))
cc_name = pattern.sub('', request.form.get('cc_name'))
envelope_args = {
'signer_email': signer_email,
'signer_name': signer_name,
'cc_email': cc_email,
'cc_name': cc_name,
'status': 'sent',
}
args = {
'account_id': session['ds_account_id'],
'base_path': session['ds_base_path'],
'ds_access_token': session['ds_access_token'],
'envelope_args': envelope_args
}
try:
results = worker(args)
except ApiException as err:
error_body_json = err and hasattr(err, 'body') and err.body
# we can pull the DocuSign error code and message from the response body
error_body = json.loads(error_body_json)
error_code = error_body and 'errorCode' in error_body and error_body['errorCode']
error_message = error_body and 'message' in error_body and error_body['message']
# In production, may want to provide customized error messages and
# remediation advice to the user.
return render_template('error.html',
err=err,
error_code=error_code,
error_message=error_message
)
if results:
session["envelope_id"] = results["envelope_id"] # Save for use by other examples
# which need an envelopeId
return render_template('example_done.html',
title="Envelope sent",
h1="Envelope sent",
message=f"""The envelope has been created and sent!<br/>
Envelope ID {results["envelope_id"]}."""
)
else:
flash('Sorry, you need to re-authenticate.')
# We could store the parameters of the requested operation
# so it could be restarted automatically.
# But since it should be rare to have a token issue here,
# we'll make the user re-enter the form data after
# authentication.
session['eg'] = url_for(eg)
return redirect(url_for('ds_must_authenticate'))
| 5,343,807
|
def reproduce_load_profile(neural_model, simulation_model: CHPP_HWT, input_data, logger):
"""
Tries to follow a real load profile
"""
# make sure the random seeds are different in each process
#np.random.seed(int.from_bytes(os.urandom(4), byteorder='little'))
temperature, powers, heat_demand = input_data
time_step_count = powers.shape[0]
# save initial states to restore them later
result = {}
result['temp_offset'] = max(-min(temperature) + 60, 0)
temperature += result['temp_offset']
# determine the initial state
simulation_model.eval() # sample with eval() setting
simulation_model.chpp.mode = 0 if powers[0] > -3000 else 1
simulation_model.chpp.min_off_time = 900
simulation_model.chpp.min_on_time = 900
simulation_model.chpp.dwell_time = 900
simulation_model.hwt.temperature = temperature[0]
simulation_model.demand.demand = heat_demand[0]
simulation_model.demand.forecast_series = heat_demand[1:].reshape(-1,1)
neural_model.load_state(simulation_model.state)
simulation_model.train() # strict constraints (which the ANN should have learned)
# do a forecast in order to predetermine the external input and the mask required to update inputs
sampling_parameters = {}
forecast, forecast_mask = simulation_model.forecast(time_step_count, **sampling_parameters)
result['infeasible_at'] = time_step_count
result['classified_infeasible_at'] = time_step_count
delta_temp_ann = []
delta_temp_sim = []
for step, power in enumerate(powers):
ann_feasible = neural_model.feasible_actions
sim_feasible = simulation_model.feasible_actions
delta_temp_ann.append(neural_model.state[-2] - temperature[step])
delta_temp_sim.append(simulation_model.state[-2] - temperature[step])
# identify the correct action to follow
if power > -3000: # off
action_choice = simulation_model.chpp.state_matrix[simulation_model.chpp.mode][0][0]
else: # on
action_choice = simulation_model.chpp.state_matrix[simulation_model.chpp.mode][1][0]
if not np.isin(action_choice, sim_feasible) and result['infeasible_at'] >= time_step_count:
# infeasible action and therefore an infeasible load profile
# an entry smaller than time_step_count means it has already been detected as infeasible
result['infeasible_at'] = step
if not np.isin(action_choice, ann_feasible) and result['classified_infeasible_at'] >= time_step_count:
# action deemed infeasible
# an entry smaller than time_step_count means it has already been detected as infeasible
result['classified_infeasible_at'] = step
# keep going to see whether the simulation model can reproduce the schedule or not
# while a not detected infeasibility is actually an error at this moment,
# the remaining load schedule could still provide further indications that it is actually infeasible
# (proceeding like this is also required for comparability with Bremer2015)
state, interaction = neural_model.transition(action_choice)
simulation_model.transition(action_choice)
if step + 1 < time_step_count:
# post processing to incorporate forecasts
neural_model.state = state * (1-forecast_mask[step+1]) + forecast_mask[step+1] * forecast[step+1]
#else:
# reached final step without stopping due to a detected infeasibility
result['delta_temp'] = delta_temp_ann
result['[delta_temp]'] = delta_temp_sim
return result
| 5,343,808
|
def session_pca(imgs, mask_img, parameters,
n_components=20,
confounds=None,
memory_level=0,
memory=Memory(cachedir=None),
verbose=0,
copy=True):
"""Filter, mask and compute PCA on Niimg-like objects
This is an helper function whose first call `base_masker.filter_and_mask`
and then apply a PCA to reduce the number of time series.
Parameters
----------
imgs: list of Niimg-like objects
See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
List of subject data
mask_img: Niimg-like object
See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
Mask to apply on the data
parameters: dictionary
Dictionary of parameters passed to `filter_and_mask`. Please see the
documentation of the `NiftiMasker` for more informations.
confounds: CSV file path or 2D matrix
This parameter is passed to signal.clean. Please see the
corresponding documentation for details.
n_components: integer, optional
Number of components to be extracted by the PCA
memory_level: integer, optional
Integer indicating the level of memorization. The higher, the more
function calls are cached.
memory: joblib.Memory
Used to cache the function calls.
verbose: integer, optional
Indicate the level of verbosity (0 means no messages).
copy: boolean, optional
Whether or not data should be copied
"""
data, affine = cache(
filter_and_mask, memory, memory_level=memory_level,
func_memory_level=2,
ignore=['verbose', 'memory', 'memory_level', 'copy'])(
imgs, mask_img, parameters,
memory_level=memory_level,
memory=memory,
verbose=verbose,
confounds=confounds,
copy=copy)
if n_components <= data.shape[0] // 4:
U, S, _ = randomized_svd(data.T, n_components)
else:
U, S, _ = linalg.svd(data.T, full_matrices=False)
U = U.T[:n_components].copy()
S = S[:n_components]
return U, S
| 5,343,809
|
def numpy_napoleon(prnt_doc, child_doc):
"""Behaves identically to the 'numpy' style, but abides by the docstring sections
specified by the "Napoleon" standard.
For more info regarding the Napoleon standard, see:
http://sphinxcontrib-napoleon.readthedocs.io/en/latest/index.html#docstring-sections
Example:
- parent's docstring:
''' Parent's line
Keyword Arguments
-----------------
x: int
description of x
y: Union[None, int]
description of y
Raises
------
NotImplemented Error'''
- child's docstring:
''' Child's line
Returns
-------
int
Notes
-----
notes blah blah'''
- docstring that is ultimately inherited:
''' Child's line
Keyword Arguments
-----------------
x: int
description of x
y: Union[None, int]
description of y
Returns
-------
int
Notes
-----
notes blah blah'''
"""
return merge_numpy_napoleon_docs(prnt_doc, child_doc)
| 5,343,810
|
def search_unique_identities_slice(db, term, offset, limit):
"""Look for unique identities using slicing.
This function returns those unique identities which match with the
given `term`. The term will be compared with name, email, username
and source values of each identity. When an empty term is given,
all unique identities will be returned. The results are limited
by `offset` (starting on 0) and `limit`.
Along with the list of unique identities, this function returns
the total number of unique identities that match the given `term`.
:param db: database manager
:param term: term to match with unique identities data
:param offset: return results starting on this position
:param limit: maximum number of unique identities to return
:raises InvalidValueError: raised when either the given value of
`offset` or `limit` is lower than zero
"""
uidentities = []
pattern = '%' + term + '%' if term else None
if offset < 0:
raise InvalidValueError('offset must be greater than 0 - %s given'
% str(offset))
if limit < 0:
raise InvalidValueError('limit must be greater than 0 - %s given'
% str(limit))
with db.connect() as session:
query = session.query(UniqueIdentity).\
join(Identity).\
filter(UniqueIdentity.uuid == Identity.uuid)
if pattern:
query = query.filter(Identity.name.like(pattern)
| Identity.email.like(pattern)
| Identity.username.like(pattern)
| Identity.source.like(pattern))
query = query.group_by(UniqueIdentity).\
order_by(UniqueIdentity.uuid)
# Get the total number of unique identities for that search
nuids = query.count()
start = offset
end = offset + limit
uidentities = query.slice(start, end).all()
# Detach objects from the session
session.expunge_all()
return uidentities, nuids
| 5,343,811
|
def get_change_description(req_sheet, row_num):
""" Accessor for Change Description
Args:
req_sheet: A variable holding an Excel Workbook sheet in memory.
row_num: A variable holding the row # of the data being accessed.
Returns:
A string value of the Change Description
"""
return (req_sheet['B' + str(row_num)].value)
| 5,343,812
|
def reduce_pca(data_df, n_components=None):
"""
Uses PCA to reduce dimension.
Parameters:
data_df (DataFrame): The input data in DataFrame format
n_components (float): The number of components or to reduce to. If the number if between 0 and 1, n_components is the % of
the principal components will be kept. Default is all components.
returns:
DataFrame: returns the data in the reduced dimension
"""
new_df = data_df.reset_index(drop=True)
data_np = new_df.to_numpy()
#Standardize the data by removing the mean and scaling to unit variance
pca_np = StandardScaler().fit_transform(data_np)
pca = PCA(n_components)
embedded = pca.fit_transform(pca_np)
return(pd.DataFrame(embedded, index=data_df.index))
| 5,343,813
|
def get_rule_set_map_file(cntlr, map_name, mode='r'):
"""Get the location of the rule set map
The rule set map will be in the application data folder for the xule plugin. An initial copy is in the
plugin folder for xule. If the map is not found in the application data folder, the initial copy is copied
to the application folder.
:param cntlr: Arelle controler
:type cntlr: Cntlr
:returns: Rule set map file location
:rtype: string
"""
rule_set_map_file_name = get_rule_set_map_file_name(cntlr, map_name)
if not os.path.isfile(rule_set_map_file_name):
# See if there is an initial copy in the plugin folder
if os.path.isabs(map_name):
initial_copy_file_name = map_name
else:
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
initial_copy_file_name = os.path.join(current_dir, map_name)
if not os.path.isfile(initial_copy_file_name):
raise xrt.XuleMissingRuleSetMap("Cannot find rule set map file for '{}'. This file is needed to determine which rule set to use.".format(map_name))
os.makedirs(os.path.dirname(rule_set_map_file_name), exist_ok=True)
shutil.copyfile(initial_copy_file_name, rule_set_map_file_name)
# Open the rule set map file
try:
rule_set_map_file_object = open(rule_set_map_file_name, mode)
except:
raise XuleProcessingError(_("Unable to open map file at {}".format(rule_set_map_file_name)))
yield rule_set_map_file_object
# Clean up
rule_set_map_file_object.close()
| 5,343,814
|
def create_disjoint_intervals(draw,
dtype,
n_intervals=10,
dt=1,
time_range=(0, 100),
channel_range=(2000, 2119),
length_range=(1, 1), ):
"""
Function which generates a hypothesis strategy for a fixed number
of disjoint intervals
:param dtype: Can be any strax-like dtype either with endtime or
dt and length field.
:param n_intervals: How many disjoint intervals should be returned.
:param dt: Sampling field, only needed for length + dt fields.
:param time_range: Time range in which random numbers will be
generated.
:param channel_range: Range of channels for which the disjoint
intervals will be generated. For a single channel set min/max
equal.
:param length_range: Range how long time intervals can be.
:return: hypothesis strategy which can be used in @given
Note:
You can use create_disjoint_intervals().example() to see an
example.
If you do not want to specify the bounds for any of the "_range"
parameters set the corresponding bound to None.
Somehow hypothesis complains that the creation of these events
takes too long ~2 s for 50 intervals. You can disable the
corresponding healt checks via:" @settings(
suppress_health_check=[hypothesis.HealthCheck.large_base_example,
hypothesis.HealthCheck.too_slow])"
"""
n = 0
if not hasattr(dtype, 'fields'):
# Convert dtype into numpy dtype
dtype = np.dtype(dtype)
is_dt = True
if 'endtime' in dtype.fields:
# Check whether interval uses dt fields or endtime
is_dt = False
stratgey_example = np.zeros(n_intervals, dtype)
if is_dt:
stratgey_example['dt'] = dt
while n < n_intervals:
# Create interval values:
time = draw(hst.integers(*time_range))
channel = draw(hst.integers(*channel_range))
length = draw(hst.integers(*length_range))
# Check if objects are disjoint:
if _test_disjoint(stratgey_example[:n], time, length, channel, dt):
stratgey_example[n]['time'] = time
stratgey_example[n]['channel'] = channel
if is_dt:
stratgey_example[n]['length'] = length
else:
stratgey_example[n]['endtime'] = time + int(length * dt)
n += 1
return stratgey_example
| 5,343,815
|
def get_deleted_resources():
"""Get a list of resources that failed to be deleted in OVN.
Get a list of resources that have been deleted from neutron but not
in OVN. Once a resource is deleted in Neutron the ``standard_attr_id``
foreign key in the ovn_revision_numbers table will be set to NULL.
Upon successfully deleting the resource in OVN the entry in the
ovn_revision_number should also be deleted but if something fails
the entry will be kept and returned in this list so the maintenance
thread can later fix it.
"""
sort_order = sa.case(value=models.OVNRevisionNumbers.resource_type,
whens=ovn_const.MAINTENANCE_DELETE_TYPE_ORDER)
session = db_api.get_reader_session()
with session.begin():
return session.query(models.OVNRevisionNumbers).filter_by(
standard_attr_id=None).order_by(sort_order).all()
| 5,343,816
|
def load_base_schema(base_schema=None, verbose=False):
"""Load base schema, schema contains base classes for
sub-classing in user schemas.
"""
_base = base_schema or BASE_SCHEMA or []
_base_schema = []
if "schema.org" in _base:
_base_schema.append(
load_schemaorg(verbose=verbose)
)
if "bioschemas" in _base:
_base_schema.append(
load_bioschemas(verbose=verbose)
)
_base_schema = merge_schema(*_base_schema)
return _base_schema
| 5,343,817
|
def file_based_convert_examples_for_bilinear(examples,
max_seq_length,
tokenizer,
output_file,
do_copa=False):
"""Convert a set of `InputExample`s to a TFRecord file."""
dirname = os.path.dirname(output_file)
if not tf.gfile.Exists(dirname):
tf.gfile.MakeDirs(dirname)
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 1000 == 0:
tf.logging.info("DANITER:Writing example %d of %d" %
(ex_index, len(examples)))
input_feature = convert_single_example_for_bilinear(ex_index, example,
max_seq_length,
tokenizer, do_copa)
features = collections.OrderedDict()
input_size = 3 if do_copa else 5
for i in range(input_size):
features["input_ids" + str(i)] = create_int_feature(
input_feature.input_ids[i])
features["input_mask" + str(i)] = create_int_feature(
input_feature.input_mask[i])
features["segment_ids" + str(i)] = create_int_feature(
input_feature.segment_ids[i])
features["labels"] = create_int_feature(input_feature.labels)
features["label_types"] = create_int_feature([4])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
| 5,343,818
|
def ShowX86UserStack(thread, user_lib_info = None):
""" Display user space stack frame and pc addresses.
params:
thread: obj referencing thread value
returns:
Nothing
"""
iss = Cast(thread.machine.iss, 'x86_saved_state_t *')
abi = int(iss.flavor)
user_ip = 0
user_frame = 0
user_abi_ret_offset = 0
if abi == 0xf:
debuglog("User process is 64 bit")
user_ip = iss.uss.ss_64.isf.rip
user_frame = iss.uss.ss_64.rbp
user_abi_ret_offset = 8
user_abi_type = "uint64_t"
else:
debuglog("user process is 32 bit")
user_ip = iss.uss.ss_32.eip
user_frame = iss.uss.ss_32.ebp
user_abi_ret_offset = 4
user_abi_type = "uint32_t"
if user_ip == 0:
print "This activation does not appear to have a valid user context."
return False
cur_ip = user_ip
cur_frame = user_frame
debuglog("ip= 0x%x , fr = 0x%x " % (cur_ip, cur_frame))
frameformat = "{0:d} FP: 0x{1:x} PC: 0x{2:x}"
if user_lib_info is not None:
frameformat = "{0:d} {3: <30s} 0x{2:x}"
print frameformat.format(0, cur_frame, cur_ip, GetBinaryNameForPC(cur_ip, user_lib_info))
print kern.Symbolicate(cur_ip)
frameno = 0
while True:
frameno = frameno + 1
frame = GetUserDataAsString(thread.task, unsigned(cur_frame), user_abi_ret_offset*2)
cur_ip = _ExtractDataFromString(frame, user_abi_ret_offset, user_abi_type)
cur_frame = _ExtractDataFromString(frame, 0, user_abi_type)
if not cur_frame or cur_frame == 0x0000000800000008:
break
print frameformat.format(frameno, cur_frame, cur_ip, GetBinaryNameForPC(cur_ip, user_lib_info))
print kern.Symbolicate(cur_ip)
return
| 5,343,819
|
def endgame_score_connectfour(board, is_current_player_maximizer) :
"""Given an endgame board, returns 1000 if the maximizer has won,
-1000 if the minimizer has won, or 0 in case of a tie."""
chains_1 = board.get_all_chains(current_player=is_current_player_maximizer)
chains_2 = board.get_all_chains(current_player= not(is_current_player_maximizer))
for chain in chains_1:
if len(chain) == 4:
return 1000
for chain in chains_2:
if len(chain) == 4:
return -1000
return 0
| 5,343,820
|
def identify(path_or_file):
"""
Accepts a single file or list of files, Returns a list of Image file names
:param path_or_file:
:return: list of Image file names
"""
files = []
# Included capitalized formats
supported_formats = set(
IMAGE_FORMATS[0] + tuple(map(lambda x: x.upper(), IMAGE_FORMATS[0])))
if os.path.isdir(path_or_file):
for img_format in supported_formats:
files.extend(
glob.iglob(os.path.join(path_or_file, '*.%s' % img_format)))
elif os.path.isfile(path_or_file):
# If its a single file, ignoring file extensions
files = [path_or_file]
if files:
return files
raise IOError(
"%s: No image files have been scheduled for processing" % path_or_file)
| 5,343,821
|
def add_colorbar(im, aspect=20, pad_fraction=0.5, **kwargs):
"""Add a vertical color bar to an image plot.
Taken from https://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph
"""
divider = axes_grid1.make_axes_locatable(im.axes)
width = axes_grid1.axes_size.AxesY(im.axes, aspect=1.0 / aspect)
pad = axes_grid1.axes_size.Fraction(pad_fraction, width)
current_ax = plt.gca()
cax = divider.append_axes("right", size=width, pad=pad)
plt.sca(current_ax)
return im.axes.figure.colorbar(im, cax=cax, **kwargs)
| 5,343,822
|
async def stream_capture(samplerate, channels, device, buffersize, dtype='float32'):
"""Generator that yields blocks of input data
captured from sounddevice InputStream into NumPy arrays.
The audio callback pushes the captured data to `in_queue`,
and at the same time is consumed the queue and yields the captured data
"""
assert buffersize != 0
in_queue = asyncio.Queue()
loop = asyncio.get_event_loop()
"""
callback(indata: numpy.ndarray, frames: int,
time: CData, status: CallbackFlags) -> None
"""
def callback(indata, frame_count, time_info, status):
loop.call_soon_threadsafe(in_queue.put_nowait, (indata.copy(), status))
stream = sd.InputStream(samplerate=samplerate,
device=device,
channels=channels,
callback=callback,
dtype=dtype,
blocksize=buffersize)
with stream:
while True:
indata, status = await in_queue.get()
yield indata, status
| 5,343,823
|
def test_create_with_kargs():
"""Test create passing named arguments"""
name = helper.user.name()
user_key = helper.user.key()
user_id = user_key.public_key
status = rbac.user.new(
signer_user_id=user_id, signer_keypair=user_key, user_id=user_id, name=name
)
assert len(status) == 1
assert status[0]["status"] == "COMMITTED"
user = rbac.user.get(object_id=user_id)
assert user.user_id == user_id
assert user.name == name
| 5,343,824
|
def wait_procs(procs, timeout, callback=None):
"""Convenience function which waits for a list of processes to
terminate.
Return a (gone, alive) tuple indicating which processes
are gone and which ones are still alive.
The gone ones will have a new 'retcode' attribute indicating
process exit status (may be None).
'callback' is a callable function which gets called every
time a process terminates (a Process instance is passed as
callback argument).
Function will return as soon as all processes terminate or when
timeout occurs.
Tipical use case is:
- send SIGTERM to a list of processes
- give them some time to terminate
- send SIGKILL to those ones which are still alive
Example:
>>> def on_terminate(proc):
... print("process {} terminated".format(proc))
...
>>> for p in procs:
... p.terminate()
...
>>> gone, still_alive = wait_procs(procs, 3, callback=on_terminate)
>>> for p in still_alive:
... p.kill()
"""
def assert_gone(proc, timeout):
try:
retcode = proc.wait(timeout=timeout)
except TimeoutExpired:
pass
else:
if retcode is not None or not proc.is_running():
proc.retcode = retcode
gone.add(proc)
if callback is not None:
callback(proc)
timer = getattr(time, 'monotonic', time.time)
gone = set()
alive = set(procs)
if callback is not None and not callable(callback):
raise TypeError("callback %r is not a callable" % callable)
deadline = timer() + timeout
while alive:
if timeout <= 0:
break
for proc in alive:
# Make sure that every complete iteration (all processes)
# will last max 1 sec.
# We do this because we don't want to wait too long on a
# single process: in case it terminates too late other
# processes may disappear in the meantime and their PID
# reused.
try:
max_timeout = 1.0 / (len(alive) - len(gone))
except ZeroDivisionError:
max_timeout = 1.0 # one alive remaining
timeout = min((deadline - timer()), max_timeout)
if timeout <= 0:
break
assert_gone(proc, timeout)
alive = alive - gone
if alive:
# Last attempt over processes survived so far.
# timeout == 0 won't make this function wait any further.
for proc in alive:
assert_gone(proc, 0)
alive = alive - gone
return (list(gone), list(alive))
| 5,343,825
|
def select(sel, truecase, falsecase):
""" Multiplexer returning falsecase for select==0, otherwise truecase.
:param WireVector sel: used as the select input to the multiplexer
:param WireVector falsecase: the WireVector selected if select==0
:param WireVector truecase: the WireVector selected if select==1
Example of mux as "ternary operator" to take the max of 'a' and 5:
select( a<5, truecase=a, falsecase=5)
"""
sel, f, t = (as_wires(w) for w in (sel, falsecase, truecase))
f, t = match_bitwidth(f, t)
outwire = WireVector(bitwidth=len(f))
net = LogicNet(op='x', op_param=None, args=(sel, f, t), dests=(outwire,))
working_block().add_net(net) # this includes sanity check on the mux
return outwire
| 5,343,826
|
def fish_collision(sprite1, sprite2):
"""Algorithm for determining if there is a collision between the sprites."""
if sprite1 == sprite2:
return False
else:
return collide_circle(sprite1, sprite2)
| 5,343,827
|
def normalizePeriodList(periods):
"""
Normalize the list of periods by merging overlapping or consecutive ranges
and sorting the list by each periods start.
@param list: a list of tuples of L{Period}. The list is changed in place.
"""
# First sort the list
def sortPeriods(p1, p2):
"""
Compare two periods. Sort by their start and then end times.
A period is a L{Period}.
@param p1: first period
@param p2: second period
@return: 1 if p1>p2, 0 if p1==p2, -1 if p1<p2
"""
assert isinstance(p1, Period), "Period is not a Period: %r" % (p1,)
assert isinstance(p2, Period), "Period is not a Period: %r" % (p2,)
if p1.getStart() == p2.getStart():
cmp1 = p1.getEnd()
cmp2 = p2.getEnd()
else:
cmp1 = p1.getStart()
cmp2 = p2.getStart()
return compareDateTime(cmp1, cmp2)
for period in periods:
period.adjustToUTC()
periods.sort(cmp=sortPeriods)
# Now merge overlaps and consecutive periods
index = None
p = None
pe = None
for i in xrange(len(periods)):
if p is None:
index = i
p = periods[i]
pe = p.getEnd()
continue
ie = periods[i].getEnd()
if (pe >= periods[i].getStart()):
if ie > pe:
periods[index] = Period(periods[index].getStart(), ie)
pe = ie
periods[i] = None
else:
index = i
p = periods[i]
pe = p.getEnd()
periods[:] = [x for x in periods if x]
| 5,343,828
|
def assemble(the_type: Callable[..., TypeT],
profile: Optional[str] = None,
**kwargs: Any) -> TypeT:
"""Create an instance of a certain type,
using constructor injection if needed."""
ready_result = _create(the_type, profile)
if ready_result is not None:
return ready_result
signature = inspect.signature(the_type)
parameters = _get_parameters(signature)
arguments: Dict[str, Any] = kwargs
uses_manual_args = False
for parameter_name, parameter_type in parameters.items():
if parameter_name in arguments:
uses_manual_args = True
continue
if _is_list_type(parameter_type):
parameter_components = _get_components(
_get_list_type_elem_type(parameter_type), profile)
arguments[parameter_name] = list(map(assemble,
map(lambda comp: comp.get_type(),
parameter_components)))
else:
parameter_component = _get_component(parameter_type, profile)
param_factory = _get_factory(parameter_type, profile)
if parameter_component is not None:
arguments[parameter_name] = assemble(
parameter_component.get_type(), profile) # parameter_type?
elif param_factory:
arguments[parameter_name] = param_factory.get_instance()
result = the_type(**arguments)
stored_component = _get_component(the_type, profile)
if stored_component and not uses_manual_args:
stored_component.set_instance_if_singleton(result)
return result
| 5,343,829
|
def validate_ip_ranges(*ip_addresses_ranges) -> None:
"""
IP addresses range validation.
Args:
ip_addresses_ranges (list): List of IP addresses ranges to validate.
Returns:
None
"""
for ip_range in ip_addresses_ranges:
if ip_range:
ip_interval = ip_range.split('-')
if len(ip_interval) != 2:
raise ValueError(
"The argument of IP address range should be in the format of:"
" 'X.X.X.X'-'Y.Y.Y.Y' where X and Y are numbers between zero to 255.")
for ip_address in ip_interval:
if not (re.match(ipv4Regex, ip_address)) or (re.match(ipv6Regex, ip_address)):
raise ValueError(
"IP address should be in the format of: 'X.X.X.X' where x is a number between 0 to 255.")
| 5,343,830
|
def test_attn_lstm_embedding():
"""Test invoking AttnLSTMEmbedding."""
max_depth = 5
n_test = 5
n_support = 11
n_feat = 10
test = np.random.rand(n_test, n_feat).astype(np.float32)
support = np.random.rand(n_support, n_feat).astype(np.float32)
layer = layers.AttnLSTMEmbedding(n_test, n_support, n_feat, max_depth)
test_out, support_out = layer([test, support])
assert test_out.shape == (n_test, n_feat)
assert support_out.shape == (n_support, n_feat)
assert len(layer.trainable_variables) == 4
| 5,343,831
|
def coord_ijk_to_xyz(affine, coords):
"""
Converts voxel `coords` in cartesian space to `affine` space
Parameters
----------
affine : (4, 4) array-like
Affine matrix
coords : (N,) list of list
Image coordinate values, where each entry is a length three list of int
denoting ijk coordinates in cartesian space
Returns
------
xyz : (N, 3) numpy.ndarray
Provided `coords` in `affine` space
"""
coords = _check_coord_inputs(coords)
mni_coords = np.dot(affine, coords)[:3].T
return mni_coords
| 5,343,832
|
def _get_create_statement(server, temp_datadir,
frm_file, version,
options, quiet=False):
"""Get the CREATE statement for the .frm file
This method attempts to read the CREATE statement by copying the .frm file,
altering the storage engine in the .frm file to MEMORY and issuing a SHOW
CREATE statement for the table/view.
If this method returns None, the operation was successful and the CREATE
statement was printed. If a string is returned, there was at least one
error (which will be printed) and the .frm file was not readable.
The returned frm file path can be used to tell the user to use the
diagnostic mode for reading files byte-by-byte. See the method
read_frm_files_diagnostic() above.
server[in] Server instance
temp_datadir[in] New data directory
frm_file[in] Tuple containing (db, table, path) for .frm file
version[in] Version string for the current server
options[in] Options from user
Returns string - None on success, path to frm file on error
"""
verbosity = int(options.get("verbosity", 0))
quiet = options.get("quiet", False)
new_engine = options.get("new_engine", None)
frm_dir = options.get("frm_dir", ".{0}".format(os.sep))
user = options.get('user', 'root')
if not quiet:
print "#\n# Reading the %s.frm file." % frm_file[1]
try:
# 1) copy the file
db = frm_file[0]
if not db or db == ".":
db = "test"
db_name = db + "_temp"
new_path = os.path.normpath(os.path.join(temp_datadir, db_name))
if not os.path.exists(new_path):
os.mkdir(new_path)
new_frm = os.path.join(new_path, frm_file[1] + ".frm")
# Check name for decoding and decode
try:
if requires_decoding(frm_file[1]):
new_frm_file = decode(frm_file[1])
frm_file = (frm_file[0], new_frm_file, frm_file[2])
shutil.copy(frm_file[2], new_path)
# Check name for encoding and encode
elif requires_encoding(frm_file[1]):
new_frm_file = encode(frm_file[1]) + ".frm"
new_frm = os.path.join(new_path, new_frm_file)
shutil.copy(frm_file[2], new_frm)
else:
shutil.copy(frm_file[2], new_path)
except:
_, e, _ = sys.exc_info()
print("ERROR: {0}".format(e))
# Set permissons on copied file if user context in play
if user_change_as_root(options):
subprocess.call(['chown', '-R', user, new_path])
subprocess.call(['chgrp', '-R', user, new_path])
server.exec_query("CREATE DATABASE IF NOT EXISTS %s" % db_name)
frm = FrmReader(db_name, frm_file[1], new_frm, options)
frm_type = frm.get_type()
server.exec_query("FLUSH TABLES")
if frm_type == "TABLE":
# 2) change engine if it is a table
current_engine = frm.change_storage_engine()
# Abort read if restricted engine found
if current_engine[1].upper() in _CANNOT_READ_ENGINE:
print ("ERROR: Cannot process tables with the %s storage "
"engine. Please use the diagnostic mode to read the "
"%s file." % (current_engine[1].upper(), frm_file[1]))
return frm_file[2]
# Check server version
server_version = None
if version and len(current_engine) > 1 and current_engine[2]:
server_version = (int(current_engine[2][0]),
int(current_engine[2][1:3]),
int(current_engine[2][3:]))
if verbosity > 1 and not quiet:
print ("# Server version in file: %s.%s.%s" %
server_version)
if not server.check_version_compat(server_version[0],
server_version[1],
server_version[2]):
versions = (server_version[0], server_version[1],
server_version[2], version[0], version[1],
version[2])
print ("ERROR: The server version for this "
"file is too low. It requires a server version "
"%s.%s.%s or higher but your server is version "
"%s.%s.%s. Try using a newer server or use "
"diagnostic mode." % versions)
return frm_file[2]
# 3) show CREATE TABLE
res = server.exec_query("SHOW CREATE TABLE `%s`.`%s`" %
(db_name, frm_file[1]))
create_str = res[0][1]
if new_engine:
create_str = create_str.replace("ENGINE=MEMORY",
"ENGINE=%s" % new_engine)
elif not current_engine[1].upper() == "MEMORY":
create_str = create_str.replace("ENGINE=MEMORY",
"ENGINE=%s" %
current_engine[1])
if frm_file[0] and not frm_file[0] == ".":
create_str = create_str.replace("CREATE TABLE ",
"CREATE TABLE `%s`." %
frm_file[0])
# if requested, generate the new .frm with the altered engine
if new_engine:
server.exec_query("ALTER TABLE `{0}`.`{1}` "
"ENGINE={2}".format(db_name,
frm_file[1],
new_engine))
new_frm_file = os.path.join(frm_dir,
"{0}.frm".format(frm_file[1]))
if os.path.exists(new_frm_file):
print("#\n# WARNING: Unable to create new .frm file. "
"File exists.")
else:
try:
shutil.copyfile(new_frm, new_frm_file)
print("# Copy of .frm file with new storage "
"engine saved as {0}.".format(new_frm_file))
except (IOError, OSError, shutil.Error) as e:
print("# WARNING: Unable to create new .frm file. "
"Error: {0}".format(e))
elif frm_type == "VIEW":
# 5) show CREATE VIEW
res = server.exec_query("SHOW CREATE VIEW %s.%s" %
(db_name, frm_file[1]))
create_str = res[0][1]
if frm_file[0]:
create_str = create_str.replace("CREATE VIEW ",
"CREATE VIEW `%s`." %
frm_file[0])
# Now we must replace the string for storage engine!
print "#\n# CREATE statement for %s:\n#\n" % frm_file[2]
print create_str
print
if frm_type == "TABLE" and options.get("show_stats", False):
frm.show_statistics()
except:
print ("ERROR: Failed to correctly read the .frm file. Please try "
"reading the file with the --diagnostic mode.")
return frm_file[2]
return None
| 5,343,833
|
def delete_driver_vehicle(driver):
"""delete driver"""
try:
driver.vehicle = None
driver.save()
return driver, "success"
except Exception as err:
logger.error("deleteVehicleForDriverRecord@error")
logger.error(err)
return None, str(err)
| 5,343,834
|
def passivity(s: npy.ndarray) -> npy.ndarray:
"""
Passivity metric for a multi-port network.
A metric which is proportional to the amount of power lost in a
multiport network, depending on the excitation port. Specifically,
this returns a matrix who's diagonals are equal to the total
power received at all ports, normalized to the power at a single
excitement port.
mathematically, this is a test for unitary-ness of the
s-parameter matrix [#]_.
for two port this is
.. math::
\sqrt( |S_{11}|^2 + |S_{21}|^2 \, , \, |S_{22}|^2+|S_{12}|^2)
in general it is
.. math::
\\sqrt( S^H \\cdot S)
where :math:`H` is conjugate transpose of S, and :math:`\\cdot`
is dot product.
Note
----
The total amount of power dissipated in a network depends on the
port matches. For example, given a matched attenuator, this metric
will yield the attenuation value. However, if the attenuator is
cascaded with a mismatch, the power dissipated will not be equivalent
to the attenuator value, nor equal for each excitation port.
Returns
-------
passivity : :class:`numpy.ndarray` of shape fxnxn
References
------------
.. [#] http://en.wikipedia.org/wiki/Scattering_parameters#Lossless_networks
"""
if s.shape[-1] == 1:
raise (ValueError('Doesn\'t exist for one ports'))
pas_mat = s.copy()
for f in range(len(s)):
pas_mat[f, :, :] = npy.sqrt(npy.dot(s[f, :, :].conj().T, s[f, :, :]))
return pas_mat
| 5,343,835
|
def sample_distance(sampleA, sampleB, sigma):
"""
I know this isn't the best distance measure, alright.
"""
# RBF!
gamma = 1 / (2 * sigma**2)
similarity = np.exp(-gamma*(np.linalg.norm(sampleA - sampleB)**2))
distance = 1 - similarity
return distance
| 5,343,836
|
def test_electronic_type(fixture_code, generate_structure):
"""Test ``PwBandsWorkChain.get_builder_from_protocol`` with ``electronic_type`` keyword."""
code = fixture_code('quantumespresso.pw')
structure = generate_structure()
with pytest.raises(NotImplementedError):
for electronic_type in [ElectronicType.AUTOMATIC]:
PwBandsWorkChain.get_builder_from_protocol(code, structure, electronic_type=electronic_type)
builder = PwBandsWorkChain.get_builder_from_protocol(code, structure, electronic_type=ElectronicType.INSULATOR)
for namespace in [builder.relax['base'], builder.scf, builder.bands]:
parameters = namespace['pw']['parameters'].get_dict()
assert parameters['SYSTEM']['occupations'] == 'fixed'
assert 'degauss' not in parameters['SYSTEM']
assert 'smearing' not in parameters['SYSTEM']
| 5,343,837
|
def replace_non_unique(
in_filename, out_filename, search_string="text_to_replace", prefix=""
):
"""parse the document 'in_filename' and replace all
instances of the string 'search_string' with the
unique string 'prefix_x'.
Parameters
----------
in_filename : name of the file to parse
out_filename : name of the file to write. If
out_filename is the same as in_filename
then the old document will be overwritten.
search_string : target string to search and replace. Note:
when applying this function to svg layout documents
take care that you don't target strings that are
common strings in the SVG language. e.g. 'label'
prefix : the prefix will be appended with a number and used
to replace each occurance of the search string.
"""
with open(in_filename, "rt") as inf:
strdta = inf.read()
slist = strdta.split(search_string)
outstr = "".join([s + prefix + "_%s" % (i) for i, s in enumerate(slist[:-1])])
outstr += slist[-1]
with open(out_filename, "wt") as outf:
outf.write(outstr)
| 5,343,838
|
def copy_slice(src, dst, iproc, parameter):
"""
Copies SPECFEM model slice
:type src: str
:param src: source location to copy slice from
:type dst: str
:param dst: destination location to copy slice to
:type parameter: str
:param parameter: parameters to copy, e.g. 'vs', 'vp'
:type iproc: int
:param iproc: processor/slice number to copy
"""
filename = os.path.basename(_get_filename(src, iproc))
copyfile(os.path.join(src, filename),
os.path.join(dst, filename))
| 5,343,839
|
def read_sequence_item(fp, is_implicit_VR, is_little_endian, encoding,
offset=0):
"""Read and return a single sequence item, i.e. a Dataset"""
seq_item_tell = fp.tell() + offset
if is_little_endian:
tag_length_format = "<HHL"
else:
tag_length_format = ">HHL"
try:
bytes_read = fp.read(8)
group, element, length = unpack(tag_length_format, bytes_read)
except BaseException:
raise IOError("No tag to read at file position "
"{0:05x}".format(fp.tell() + offset))
tag = (group, element)
if tag == SequenceDelimiterTag: # No more items, time to stop reading
logger.debug(
"{0:08x}: {1}".format(fp.tell() - 8 + offset, "End of Sequence"))
if length != 0:
logger.warning("Expected 0x00000000 after delimiter, found 0x%x, "
"at position 0x%x" % (
length, fp.tell() - 4 + offset))
return None
if tag != ItemTag:
logger.warning("Expected sequence item with tag %s at file position "
"0x%x" % (ItemTag, fp.tell() - 4 + offset))
else:
logger.debug("{0:08x}: {1} Found Item tag (start of item)".format(
fp.tell() - 4 + offset, bytes2hex(bytes_read)))
if length == 0xFFFFFFFF:
ds = read_dataset(fp, is_implicit_VR, is_little_endian,
bytelength=None, parent_encoding=encoding)
ds.is_undefined_length_sequence_item = True
else:
ds = read_dataset(fp, is_implicit_VR, is_little_endian, length,
parent_encoding=encoding)
ds.is_undefined_length_sequence_item = False
logger.debug("%08x: Finished sequence item" % (fp.tell() + offset,))
ds.seq_item_tell = seq_item_tell
return ds
| 5,343,840
|
def check_setup():
"""Check the global parameters."""
global MESHIFY_USERNAME, MESHIFY_PASSWORD, MESHIFY_AUTH, MESHIFY_BASE_URL
if not MESHIFY_USERNAME or not MESHIFY_PASSWORD:
print("Simplify the usage by setting the meshify username and password as environment variables MESHIFY_USERNAME and MESHIFY_PASSWORD")
MESHIFY_USERNAME = input("Meshify Username: ")
MESHIFY_PASSWORD = getpass.getpass("Meshify Password: ")
MESHIFY_AUTH = requests.auth.HTTPBasicAuth(MESHIFY_USERNAME, MESHIFY_PASSWORD)
if not MESHIFY_BASE_URL:
print("Simplify the usage by setting the environment variable MESHIFY_BASE_URL")
MESHIFY_BASE_URL = input("Meshify Base URL: ")
| 5,343,841
|
def main():
"""
Run the script.
"""
| 5,343,842
|
def get_filters(query_metadata: QueryMetadataTable) -> Dict[VertexPath, Set[FilterInfo]]:
"""Get the filters at each VertexPath."""
filters: Dict[VertexPath, Set[FilterInfo]] = {}
for location, _ in query_metadata.registered_locations:
filter_infos = query_metadata.get_filter_infos(location)
filters.setdefault(_get_location_vertex_path(location), set()).update(filter_infos)
return filters
| 5,343,843
|
def get_shuffled_matrix(pssm_mat, iterations, return_dict, ShufflingType):
"""
The functuion generate massive of shuffled matrix.
Parameters
----------
pssm_mat : pandas DataFrame
PSSM profile.
iterations : int
Number of iterations of shuffling.
return_dict : dict
Manager of multitreating.
ShufflingType : str
Variant of shuffling.
"""
shuffled_matrix = []
for i in range(iterations):
shuffled_matrix.append(shuffle_matrix(pssm_mat, ShufflingType))
return_dict[os.getpid()] = shuffled_matrix
| 5,343,844
|
def auxiliary_equations(*, F, T_degC, I_sc_A_0, I_rs_1_A_0, n_1_0, I_rs_2_0_A, n_2_0, R_s_Ohm_0, G_p_S_0, E_g_eV_0, N_s,
T_degC_0=T_degC_stc):
"""
Computes the auxiliary equations at F and T_degC for the 8-parameter DDM-G.
Inputs (any broadcast-compatible combination of scalars and numpy arrays):
Same as current_sum_at_diode_node().
Outputs (device-level, at each combination of broadcast inputs, return type is numpy.float64 for all scalar inputs):
dict containing:
I_ph_A photocurrent
I_rs_1_A first diode reverse-saturation current
n_1 first diode ideality factor
I_rs_2_A second diode reverse-saturation current
n_2 second diode ideality factor
R_s_Ohm series resistance
G_p_S parallel conductance
N_s integer number of cells in series in each parallel string
T_degC temperature
"""
# Temperatures must be in Kelvin.
T_K = convert_temperature(T_degC, 'Celsius', 'Kelvin')
T_K_0 = convert_temperature(T_degC_0, 'Celsius', 'Kelvin')
# Optimization.
V_therm_factor_V_0 = (N_s * k_B_J_per_K * T_K_0) / q_C
# Compute variables at operating condition.
# Compute band gap (constant).
E_g_eV = E_g_eV_0
# Compute first diode ideality factor (constant).
n_1 = n_1_0
# Compute first reverse-saturation current at T_degC (this is independent of F, I_sc_A_0, R_s_Ohm_0, and G_p_S_0).
I_rs_1_A = I_rs_1_A_0 * (T_K / T_K_0)**3 * numpy.exp(E_g_eV / (n_1 * k_B_eV_per_K) * (1 / T_K_0 - 1 / T_K))
# Compute first diode ideality factor (constant).
n_2 = n_2_0
# Compute first reverse-saturation current at T_degC (this is independent of F, I_sc_A_0, R_s_Ohm_0, and G_p_S_0).
I_rs_2_A = I_rs_2_0_A * (T_K / T_K_0)**(5/2) * numpy.exp(E_g_eV / (n_2 * k_B_eV_per_K) * (1 / T_K_0 - 1 / T_K))
# Compute series resistance (constant).
R_s_Ohm = R_s_Ohm_0
# Compute parallel conductance (constant).
G_p_S = G_p_S_0
# Compute parallel conductance (photo-conductive shunt).
# G_p_S = F * G_p_S_0
# Compute photo-generated current at F and T_degC (V=0 with I=Isc for this).
expr1 = I_sc_A_0 * F
expr2 = expr1 * R_s_Ohm
I_ph_A = expr1 + I_rs_1_A * numpy.expm1(expr2 / (V_therm_factor_V_0 * n_1)) + \
I_rs_2_A * numpy.expm1(expr2 / (V_therm_factor_V_0 * n_2)) + G_p_S * expr2
return {'I_ph_A': I_ph_A, 'I_rs_1_A': I_rs_1_A, 'n_1': n_1, 'I_rs_2_A': I_rs_2_A, 'n_2': n_2, 'R_s_Ohm': R_s_Ohm,
'G_p_S': G_p_S, 'N_s': N_s, 'T_degC': T_degC}
| 5,343,845
|
def detector(name_file: str, chk_video_det, xy_coord: list, frame_zoom: int, size_detect: int,
lab_o_proc, window, frame_shift, play_speed, but_start, but_pause) -> str:
"""Данная функция производит поиск движения в заданной области, в текущем файле.
name_file - Имя файла, который передается в обработку
chk_video_det - Флаг отображения окна воспроизведения при поиске
xy_coord - Список координат зоны поиска
frame_zoom - Коэффициент сжатия видео при отображении
size_detect - Размер детектируемого объекта
lab_o_proc - Ссылка на метку для отображения прогресса
window - Ссылка на окно
frame_shift - Сдвиг фреймов при обнаружении движения
play_speed - Пропуск фреймов для ускорения
but_start - Кнопка Старт
but_pause - Кнопка Пауза
"""
if but_start['text'] == 'Старт':
return "OK"
none_frame: int = 0 # Счетчик для проверки пустых фреймов
start_detect = time.time() # Получение времени начала обработки видео файла
cap = cv2.VideoCapture(name_file) # Захватываем видео с файла
# cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('H', '2', '6', '4'))
off_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Получаем общее количество фреймов
frame_width_det = (cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # Получаем размер исходного видео
frame_height_det = (cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
output = cv2.VideoWriter(name_file[:-4] + "_detect" + name_file[len(name_file) - 4:],
cv2.VideoWriter_fourcc('H', '2', '6', '4'), 20,
(int(frame_width_det), int(frame_height_det))) # Параметры выгрузки MJPG PIM1 XVID
if chk_video_det:
cv2.namedWindow(name_file, 0) # Определяем окно вывода
_, x_win, y_win = window.geometry().split('+')
cv2.moveWindow(name_file, int(x_win)+350, int(y_win))
while True: # Вывод кадров производится в цикле
if but_pause['text'] == 'Продолжить':
cap.release()
output.release()
cv2.destroyAllWindows()
return 'Pause'
if but_start['text'] == 'Старт':
cap.release()
output.release()
cv2.destroyAllWindows()
break
ret1, frame1 = cap.read()
# Данное смещение позволяет сгруппировать очертания двигающегося объекта
for _ in range(frame_shift):
cap.read()
ret2, frame2 = cap.read()
# Данное смещение служит для ускорения
for _ in range(play_speed):
cap.read()
if cap.get(cv2.CAP_PROP_POS_FRAMES) == off_frames:
break
if not ret1 * ret2:
none_frame += 1
if none_frame > 10:
print('Превышено допустимое количество пустых фреймов. Начато восстановление файла.')
output.release() # Закрываем файл для вывода
cv2.destroyAllWindows()
os.remove(f'{name_file[:-4]}_detect{name_file[len(name_file) - 4:]}') # Удаляем его
return 'Correct' # Возвращаем флаг, что надо запустить восстановление
continue
# frame1=frame1[y1_search:y2_search,x1_search:x2_search] #Обрезка фрейма до нужного размера. Может пригодиться
# frame2=frame2[y1_search:y2_search,x1_search:x2_search]
# Вывод в процентах прогресса
lab_o_proc["text"] = str(cap.get(cv2.CAP_PROP_POS_FRAMES) * 100 // off_frames + 1) + " %"
window.update() # Обновление окна для отрисовки прогресса
if ret2:
if chk_video_det:
# Метод для визуализации массива кадров
frame1 = algorithm_detector_1(frame1, frame2, xy_coord, frame_zoom, size_detect, output)
cv2.imshow(name_file, frame1)
cv2.resizeWindow(name_file, int(frame_width_det) // 2,
int(frame_height_det) // 2) # Устанавливаем размер окна вывода
else:
break
if chk_video_det and cv2.getWindowProperty(name_file, 1) == 1: # Выход из программы по закрытию окна
break
if cv2.waitKey(2) == 27: # Выход по ESC
break
cap.release()
output.release()
# Проверяем количество сохраненных фреймов
output = cv2.VideoCapture(name_file[:-4] + "_detect" + name_file[len(name_file) - 4:])
frames_output = int(output.get(cv2.CAP_PROP_FRAME_COUNT))
output.release()
cv2.destroyAllWindows()
if frames_output == 0: # Если сохраненных фреймов нет, то удаляем файл
os.remove(f'{name_file[:-4]}_detect{name_file[len(name_file) - 4:]}') # Удаляем его
end_detect = time.time() # Время завершения обработки видео файла
# Выводит время затраченное на обработку файла
print(name_file, '->', str(time.strftime("%M:%S", time.localtime(end_detect - start_detect))))
return 'OK'
| 5,343,846
|
def get_highest_confidence_transcript_for_each_session(
transcripts: List[db_models.Transcript],
) -> List[db_models.Transcript]:
"""
Filter down a list transcript documents to just a single transcript
per session taking the highest confidence transcript document.
Parameters
----------
transcripts: List[db_models.Transcript]
List of transcript database documents.
Returns
-------
transcripts: List[db_models.Transcript]
Filtered list of transcript database documents where only a single transcript
exists for each referenced session.
"""
# We can't use pandas groupby because sessions objects can't be naively compared
# Instead we create a Dict of session id to document model
# We update as we iterate through list of all transcripts
selected_transcripts: Dict[str, pd.Series] = {}
for transcript in transcripts:
referenced_session_id = transcript.session_ref.ref.id
if referenced_session_id not in selected_transcripts:
selected_transcripts[referenced_session_id] = transcript
# Multiple transcripts for a single session
# pick the higher confidence
elif (
transcript.confidence
> selected_transcripts[referenced_session_id].confidence
):
selected_transcripts[referenced_session_id] = transcript
return list(selected_transcripts.values())
| 5,343,847
|
def load_precip_legacy(data, valid, tile_bounds):
"""Compute a Legacy Precip product for dates prior to 1 Jan 2014"""
LOG.debug("called")
ts = 12 * 24 # 5 minute
midnight, tomorrow = get_sts_ets_at_localhour(valid, 0)
now = midnight
m5 = np.zeros((ts, *data["solar"].shape), np.float16)
tidx = 0
filenames = []
indices = []
# Load up the n0r data, every 5 minutes
while now < tomorrow:
utcvalid = now.astimezone(UTC)
fn = utcvalid.strftime(
"/mesonet/ARCHIVE/data/%Y/%m/%d/GIS/uscomp/n0r_%Y%m%d%H%M.png"
)
if os.path.isfile(fn):
if tidx >= ts:
# Abort as we are in CST->CDT
break
filenames.append(fn)
indices.append(tidx)
else:
LOG.warning("missing: %s", fn)
now += datetime.timedelta(minutes=5)
tidx += 1
for tidx, filename in zip(indices, filenames):
m5[tidx, :, :] = _reader(filename, tile_bounds)
LOG.debug("finished loading N0R Composites")
m5 = np.transpose(m5, (1, 2, 0)).copy()
LOG.debug("transposed the data!")
m5total = np.sum(m5, 2)
LOG.debug("computed sum(m5)")
wm5 = m5 / m5total[:, :, None]
LOG.debug("computed weights of m5")
minute2 = np.arange(0, 60 * 24, 2)
minute5 = np.arange(0, 60 * 24, 5)
def _compute(yidx, xidx):
"""expensive computation that needs vectorized, somehow"""
s4total = data["stage4"][yidx, xidx]
# any stage IV totals less than 0.4mm are ignored, so effectively 0
if s4total < 0.4:
return
# Interpolate weights to a 2 minute interval grid
# we divide by 2.5 to downscale the 5 minute values to 2 minute
weights = np.interp(minute2, minute5, wm5[yidx, xidx, :]) / 2.5
# Now apply the weights to the s4total
data["precip"][yidx, xidx, :] = weights * s4total
for x in range(data["solar"].shape[1]):
for y in range(data["solar"].shape[0]):
_compute(y, x)
LOG.debug("finished precip calculation")
| 5,343,848
|
def get_groups_data():
"""
Get all groups, get all users for each group and sort groups by users
:return:
"""
groups = [group["name"] for group in jira.get_groups(limit=200)["groups"]]
groups_and_users = [get_all_users(group) for group in groups]
groups_and_users = [sort_users_in_group(group) for group in groups_and_users]
return groups_and_users
| 5,343,849
|
def get_sep():
"""Returns the appropriate filepath separator char depending on OS and
xonsh options set
"""
if ON_WINDOWS and builtins.__xonsh__.env.get("FORCE_POSIX_PATHS"):
return os.altsep
else:
return os.sep
| 5,343,850
|
def hindu_lunar_holiday(l_month, l_day, g_year):
"""Return the list of fixed dates of occurrences of Hindu lunar
month, month, day, day, in Gregorian year, g_year."""
l_year = hindu_lunar_year(
hindu_lunar_from_fixed(gregorian_new_year(g_year)))
date1 = hindu_date_occur(l_month, l_day, l_year)
date2 = hindu_date_occur(l_month, l_day, l_year + 1)
return list_range([date1, date2], gregorian_year_range(g_year))
| 5,343,851
|
def identify_all_failure_paths(network_df_in,edge_failure_set,flow_dataframe,path_criteria):
"""Identify all paths that contain an edge
Parameters
---------
network_df_in - Pandas DataFrame of network
edge_failure_set - List of string edge ID's
flow_dataframe - Pandas DataFrame of list of edge paths
path_criteria - String name of column of edge paths in flow dataframe
Outputs
-------
network_df - Pandas DataFrame of network
With removed edges
edge_path_index - List of integer indexes
Of locations of paths in flow dataframe
"""
edge_path_index = []
network_df = copy.deepcopy(network_df_in)
for edge in edge_failure_set:
network_df = network_df[network_df.edge_id != edge]
edge_path_index += flow_dataframe.loc[flow_dataframe[path_criteria].str.contains(
"'{}'".format(edge))].index.tolist()
edge_path_index = list(set(edge_path_index))
return network_df, edge_path_index
| 5,343,852
|
def wikipedia_wtap_setup():
"""
A commander has 5 tanks, 2 aircraft and 1 sea vessel and is told to
engage 3 targets with values 5,10,20 ...
"""
tanks = ["tank-{}".format(i) for i in range(5)]
aircrafts = ["aircraft-{}".format(i) for i in range(2)]
ships = ["ship-{}".format(i) for i in range(1)]
weapons = tanks + aircrafts + ships
target_values = {1: 5, 2: 10, 3: 20}
tank_probabilities = [
(1, 0.3),
(2, 0.2),
(3, 0.5),
]
aircraft_probabilities = [
(1, 0.1),
(2, 0.6),
(3, 0.5),
]
sea_vessel_probabilities = [
(1, 0.4),
(2, 0.5),
(3, 0.4)
]
category_and_probabilities = [
(tanks, tank_probabilities),
(aircrafts, aircraft_probabilities),
(ships, sea_vessel_probabilities)
]
probabilities = []
for category, probs in category_and_probabilities:
for vehicle in category:
for prob in probs:
probabilities.append((vehicle,) + prob)
g = Graph(from_list=probabilities)
return g, weapons, target_values
| 5,343,853
|
def allowed_file(filename: str) -> bool:
"""Determines whether filename is allowable
Parameters
----------
filename : str
a filename
Returns
-------
bool
True if allowed
"""
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
| 5,343,854
|
def share_is_mounted(details):
"""Check if dev/share/etc is mounted, returns bool."""
mounted = False
if PLATFORM == 'Darwin':
# Weak and naive text search
proc = run_program(['mount'], check=False)
for line in proc.stdout.splitlines():
if f'{details["Address"]}/{details["Share"]}' in line:
mounted = True
break
elif PLATFORM == 'Linux':
cmd = [
'findmnt',
'--list',
'--json',
'--invert',
'--types', (
'autofs,binfmt_misc,bpf,cgroup,cgroup2,configfs,debugfs,devpts,'
'devtmpfs,hugetlbfs,mqueue,proc,pstore,securityfs,sysfs,tmpfs'
),
'--output', 'SOURCE',
]
mount_data = get_json_from_command(cmd)
for row in mount_data.get('filesystems', []):
if row['source'] == f'//{details["Address"]}/{details["Share"]}':
mounted = True
break
#elif PLATFORM == 'Windows':
# Done
return mounted
| 5,343,855
|
def update_bitweights(realization, asgn, tileids, tg_ids, tg_ids2idx, bitweights):
"""
Update bit weights for assigned science targets
"""
for tileid in tileids:
try: # Find which targets were assigned
adata = asgn.tile_location_target(tileid)
for loc, tgid in adata.items():
idx = tg_ids2idx[tgid]
bitweights[realization * len(tg_ids) + idx] = True
except:
pass
return bitweights
| 5,343,856
|
def load_from_input_flags(params, params_source, input_flags):
"""Update params dictionary with input flags.
Args:
params: Python dictionary of hyperparameters.
params_source: Python dictionary to record source of hyperparameters.
input_flags: All the flags with non-null value of overridden
hyperparameters.
Returns:
Python dict of hyperparameters.
"""
if params is None:
raise ValueError(
'Input dictionary is empty. It is expected to be loaded with default '
'values')
if not isinstance(params, dict):
raise ValueError(
'The base parameter set must be a Python dict, was: {}'.format(
type(params)))
for key in params:
flag_value = input_flags.get_flag_value(key, None)
if flag_value is not None:
params[key] = flag_value
params_source[key] = 'Command-line flags'
return params, params_source
| 5,343,857
|
def find_tiles_reverse(catalog):
"""Lookup the tile name given an RA, DEC pair."""
N = len(tiles.index)
i = 0
catalog['TILENAME'] = "NONE"
catalog['STATUS'] = "new"
for tile in tiles[["URAMIN", "URAMAX", "UDECMIN", "UDECMAX",
"TILENAME"]].itertuples():
pb(i + 1, N)
idx, ramin, ramax, decmin, decmax, tilename = tile
if ramin > ramax:
found = catalog[((catalog.RA > ramin) | (catalog.RA < ramax)) &
(catalog.DEC > decmin) & (catalog.DEC < decmax)]
else:
found = catalog[(catalog.RA > ramin) & (catalog.RA < ramax) & \
(catalog.DEC > decmin) & (catalog.DEC < decmax)]
catalog.loc[found.index, 'TILENAME'] = tilename
i += 1
| 5,343,858
|
def hal(_module_patch):
"""Simulated hal module"""
import hal
return hal
| 5,343,859
|
def test_load_plugins(use_plugin_file, workflow):
"""
test loading plugins
"""
plugins_files = [inspect.getfile(PushImagePlugin)] if use_plugin_file else []
runner = PluginsRunner(workflow, [], plugin_files=plugins_files)
assert runner.plugin_classes is not None
assert len(runner.plugin_classes) > 0
# Randomly verify the plugin existence
assert AddFilesystemPlugin.key in runner.plugin_classes
assert TagAndPushPlugin.key in runner.plugin_classes
if use_plugin_file:
assert PushImagePlugin.key in runner.plugin_classes
assert CleanupPlugin.key in runner.plugin_classes
| 5,343,860
|
def train(args):
""" Function used for training any of the architectures, given an input parse.
"""
def validate(epoch):
model.to_test()
disparities = np.zeros((val_n_img, 256, 512), dtype=np.float32)
model.set_new_loss_item(epoch, train=False)
# For a WGAN architecture we need to access gradients.
if 'wgan' not in args.architecture:
torch.set_grad_enabled(False)
for i, data in enumerate(val_loader):
# Get the losses for the model for this epoch.
model.set_input(data)
model.forward()
model.add_running_loss_val(epoch)
if 'wgan' not in args.architecture:
torch.set_grad_enabled(True)
# Store the running loss for the validation images.
model.make_running_loss(epoch, val_n_img, train=False)
return
n_img, loader = prepare_dataloader(args, 'train')
val_n_img, val_loader = prepare_dataloader(args, 'val')
model = create_architecture(args)
model.set_data_loader(loader)
if not args.resume:
# We keep track of the aggregated losses per epoch in a dict. For
# now the pre-training train loss is set to zero. The pre-training
# validation loss will be computed.
best_val_loss = float('Inf')
# Compute loss per image (computation keeps number of images over
# batch size in mind, to compensate for partial batches being forwarded.
validate(-1)
pre_validation_update(model.losses[-1]['val'])
else:
best_val_loss = min([model.losses[epoch]['val']['G'] for epoch in model.losses.keys()])
running_val_loss = 0.0
for epoch in range(model.start_epoch, args.epochs):
model.update_learning_rate(epoch, args.learning_rate)
c_time = time.time()
model.to_train()
model.set_new_loss_item(epoch)
# Run a single training epoch. Generalizes to WGAN variants as well.
model.run_epoch(epoch, n_img)
# The validate can return either a dictionary with metrics or None.
validate(epoch)
# Print an update of training, val losses. Possibly also do full evaluation of depth maps.
print_epoch_update(epoch, time.time() - c_time, model.losses)
# Make a checkpoint, so training can be resumed.
running_val_loss = model.losses[epoch]['val']['G']
is_best = running_val_loss < best_val_loss
if is_best:
best_val_loss = running_val_loss
model.save_checkpoint(epoch, is_best, best_val_loss)
print('Finished Training. Best validation loss:\t{:.3f}'.format(best_val_loss))
# Save the model of the final epoch. If another model was better, also save it separately as best.
model.save_networks('final')
if running_val_loss != best_val_loss:
model.save_best_networks()
model.save_losses()
| 5,343,861
|
def test_register_context_decl_cls1(collector, context_decl):
"""Test handling context class issues : failed import no such module.
"""
tb = {}
context_decl.context = 'exopy_pulses.foo:BaseContext'
context_decl.register(collector, tb)
assert 'exopy_pulses.BaseContext' in tb
assert 'import' in tb['exopy_pulses.BaseContext']
| 5,343,862
|
def elements(all_isotopes=True):
"""
Loads a DataFrame of all elements and isotopes.
Scraped from https://www.webelements.com/
Returns
-------
pandas DataFrame with columns (element, atomic_number, isotope, atomic_weight, percent)
"""
el = pd.read_pickle(pkgrs.resource_filename('latools', 'resources/elements.pkl'))
if all_isotopes:
return el.set_index('element')
else:
def wmean(g):
return (g.atomic_weight * g.percent).sum() / 100
iel = el.groupby('element').apply(wmean)
iel.name = 'atomic_weight'
return iel
| 5,343,863
|
def prefixes(seq):
"""
Generate all prefixes of a sequence.
Examples
========
>>> from sympy.utilities.iterables import prefixes
>>> list(prefixes([1,2,3,4]))
[[1], [1, 2], [1, 2, 3], [1, 2, 3, 4]]
"""
n = len(seq)
for i in xrange(n):
yield seq[:i+1]
| 5,343,864
|
def sample_colors(config, image):
"""
Samples the source and target colors for replacement
"""
for entry in config['palette']:
sample_color(config, image, entry, 'source_color_box', 'source_color')
sample_color(config, image, entry, 'target_color_box', 'target_color')
| 5,343,865
|
def datas(draw):
"""TODO expand to include all optional parameters."""
metric = draw(ascii())
filter = draw(filters())
return flow.Data(metric, filter=filter)
| 5,343,866
|
def create_steps_sequence(num_steps: Numeric, axis: str) -> typing.List[typing.Tuple[float, str]]:
"""
Returns a list of num_steps tuples: [float, str], with given string parameter, and
the floating-point parameter increasing lineairly from 0 to 1.
Example:
>>> create_steps_sequence(5, 'X')
[(0.0, 'X'), (0.2, 'X'), (0.4, 'X'), (0.6, 'X'), (0.8, 'X')]
"""
if isinstance(num_steps, float):
num_steps = int(num_steps)
if num_steps == 0:
return []
sequence = []
for step in range(num_steps):
sequence.append((step * 1.0 / num_steps, axis))
return sequence
| 5,343,867
|
def disable_doze_light(ad):
"""Force the device not in doze light mode.
Args:
ad: android device object.
Returns:
True if device is not in doze light mode.
False otherwise.
"""
ad.adb.shell("dumpsys battery reset")
ad.adb.shell("cmd deviceidle disable light")
adb_shell_result = ad.adb.shell("dumpsys deviceidle get light").decode(
'utf-8')
if not adb_shell_result.startswith(DozeModeStatus.ACTIVE):
info = ("dumpsys deviceidle get light: {}".format(adb_shell_result))
print(info)
return False
return True
| 5,343,868
|
def jobs():
""" List all jobs """
return jsonify(job.get_jobs())
| 5,343,869
|
def test_failover(ReplicationGroupId=None, NodeGroupId=None):
"""
Represents the input of a TestFailover operation which test automatic failover on a specified node group (called shard in the console) in a replication group (called cluster in the console).
For more information see:
Also see, Testing Multi-AZ with Automatic Failover in the ElastiCache User Guide .
See also: AWS API Documentation
:example: response = client.test_failover(
ReplicationGroupId='string',
NodeGroupId='string'
)
:type ReplicationGroupId: string
:param ReplicationGroupId: [REQUIRED]
The name of the replication group (console: cluster) whose automatic failover is being tested by this operation.
:type NodeGroupId: string
:param NodeGroupId: [REQUIRED]
The name of the node group (called shard in the console) in this replication group on which automatic failover is to be tested. You may test automatic failover on up to 5 node groups in any rolling 24-hour period.
:rtype: dict
:return: {
'ReplicationGroup': {
'ReplicationGroupId': 'string',
'Description': 'string',
'Status': 'string',
'PendingModifiedValues': {
'PrimaryClusterId': 'string',
'AutomaticFailoverStatus': 'enabled'|'disabled'
},
'MemberClusters': [
'string',
],
'NodeGroups': [
{
'NodeGroupId': 'string',
'Status': 'string',
'PrimaryEndpoint': {
'Address': 'string',
'Port': 123
},
'Slots': 'string',
'NodeGroupMembers': [
{
'CacheClusterId': 'string',
'CacheNodeId': 'string',
'ReadEndpoint': {
'Address': 'string',
'Port': 123
},
'PreferredAvailabilityZone': 'string',
'CurrentRole': 'string'
},
]
},
],
'SnapshottingClusterId': 'string',
'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string',
'ClusterEnabled': True|False,
'CacheNodeType': 'string'
}
}
:returns:
Viewing ElastiCache Events in the ElastiCache User Guide
DescribeEvents in the ElastiCache API Reference
"""
pass
| 5,343,870
|
def get_pipelines():
"""Get pipelines."""
return PIPELINES
| 5,343,871
|
def retry_lost_downloader_jobs() -> None:
"""Retry downloader jobs that went too long without being started.
Idea: at some point this function could integrate with the spot
instances to determine if jobs are hanging due to a lack of
instances. A naive time-based implementation like this could end
up retrying every single queued job if there were a long period
during which the price of spot instance is higher than our bid
price.
"""
minimum_creation_time = timezone.now() - MAX_QUEUE_TIME
lost_jobs = DownloaderJob.objects.filter(
success=None,
retried=False,
start_time=None,
end_time=None,
created_at__lt=minimum_creation_time
)
handle_downloader_jobs(lost_jobs)
| 5,343,872
|
def tokenize(s):
"""
Tokenize on parenthesis, punctuation, spaces and American units followed by a slash.
We sometimes give American units and metric units for baking recipes. For example:
* 2 tablespoons/30 mililiters milk or cream
* 2 1/2 cups/300 grams all-purpose flour
The recipe database only allows for one unit, and we want to use the American one.
But we must split the text on "cups/" etc. in order to pick it up.
"""
return filter(None, re.split(r"([,()])?\s+", clump_fractions(normalise(s))))
| 5,343,873
|
def makeColorMatrix(n, bg_color, bg_alpha, ix=None,
fg_color=[228/255.0, 26/255.0, 28/255.0], fg_alpha=1.0):
"""
Construct the RGBA color parameter for a matplotlib plot.
This function is intended to allow for a set of "foreground" points to be
colored according to integer labels (e.g. according to clustering output),
while "background" points are all colored something else (e.g. light gray).
It is used primarily in the interactive plot tools for DeBaCl but can also
be used directly by a user to build a scatterplot from scratch using more
complicated DeBaCl output. Note this function can be used to build an RGBA
color matrix for any aspect of a plot, including point face color, edge
color, and line color, despite use of the term "points" in the descriptions
below.
Parameters
----------
n : int
Number of data points.
bg_color : list of floats
A list with three entries, specifying a color in RGB format.
bg_alpha : float
Specifies background point opacity.
ix : list of ints, optional
Identifies foreground points by index. Default is None, which does not
distinguish between foreground and background points.
fg_color : list of ints or list of floats, optional
Only relevant if 'ix' is specified. If 'fg_color' is a list of integers
then each entry in 'fg_color' indicates the color of the corresponding
foreground point. If 'fg_color' is a list of 3 floats, then all
foreground points will be that RGB color. The default is to color all
foreground points red.
fg_alpha : float, optional
Opacity of the foreground points.
Returns
-------
rgba : 2D numpy array
An 'n' x 4 RGBA array, where each row corresponds to a plot point.
"""
rgba = np.zeros((n, 4), dtype=np.float)
rgba[:, 0:3] = bg_color
rgba[:, 3] = bg_alpha
if ix is not None:
if np.array(fg_color).dtype.kind == 'i':
palette = Palette()
fg_color = palette.applyColorset(fg_color)
rgba[ix, 0:3] = fg_color
rgba[ix, 3] = fg_alpha
return rgba
| 5,343,874
|
def closest_pair(points):
"""
最近点対 O(N log N)
Verify: http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=CGL_5_A&lang=ja
:param list of Point points:
:rtype: (float, (Point, Point))
:return: (距離, 点対)
"""
assert len(points) >= 2
def _rec(xsorted):
"""
:param list of Point xsorted:
:rtype: (float, (Point, Point))
"""
n = len(xsorted)
if n <= 2:
return xsorted[0].dist(xsorted[1]), (xsorted[0], xsorted[1])
if n <= 3:
# 全探索
d = INF
pair = None
for p, q in itertools.combinations(xsorted, r=2):
if p.dist(q) < d:
d = p.dist(q)
pair = p, q
return d, pair
# 分割統治
# 両側の最近点対
ld, lp = _rec(xsorted[:n // 2])
rd, rp = _rec(xsorted[n // 2:])
if ld <= rd:
d = ld
ret_pair = lp
else:
d = rd
ret_pair = rp
mid_x = xsorted[n // 2].x
# 中央から d 以内のやつを集める
mid_points = []
for p in xsorted:
# if abs(p.x - mid_x) < d:
if abs(p.x - mid_x) - d < -EPS:
mid_points.append(p)
# この中で距離が d 以内のペアがあれば更新
mid_points.sort(key=lambda p: p.y)
mid_n = len(mid_points)
for i in range(mid_n - 1):
j = i + 1
p = mid_points[i]
q = mid_points[j]
# while q.y - p.y < d
while (q.y - p.y) - d < -EPS:
pq_d = p.dist(q)
if pq_d < d:
d = pq_d
ret_pair = p, q
j += 1
if j >= mid_n:
break
q = mid_points[j]
return d, ret_pair
return _rec(list(sorted(points, key=lambda p: p.x)))
| 5,343,875
|
def updateParamsPTC(lattice, bunch):
"""
Updates Twiss parameters of lattice.
Updates element parameters.
Updates synchronous particle parameters of the bunch.
"""
(betax, betay, alphax, alphay, etax, etapx) =\
ptc_get_twiss_init_()
lattice.betax0 = betax
lattice.betay0 = betay
lattice.alphax0 = alphax
lattice.alphay0 = alphay
lattice.etax0 = etax
lattice.etapx0 = etapx
(nNodes, nHarm, lRing, gammaT) = ptc_get_ini_params_()
lattice.nNodes = nNodes
lattice.nHarm = nHarm
lattice.lRing = lRing
lattice.gammaT = gammaT
for node in lattice.getNodes():
node_index = node.getParam("node_index")
length = node.getLength()
ptc_get_twiss_for_node_(node_index)
node.setparams(node_index, length,\
betax, betay, alphax, alphay,\
etax, etapx)
setBunchParamsPTC(bunch)
| 5,343,876
|
def less_equals(l,r):
"""
| Forms constraint :math:`l \leq r`.
:param l: number,
:ref:`scalar object<scalar_ref>` or
:ref:`multidimensional object<multi_ref>`.
:param r: number,
:ref:`scalar object<scalar_ref>` or
:ref:`multidimensional object<multi_ref>`.
:return: :ref:`constraint<constr_obj>` or
:ref:`list of constraints<constr_list_obj>`.
"""
return compare(l,LESS_EQUALS,r)
| 5,343,877
|
def ensureImageMode(tex : Image, mode="RGBA") -> Image:
"""Ensure the passed image is in a given mode. If it is not, convert it.
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
:param Image tex: The image whose mode to check
:param str mode: The mode to ensure and convert to if needed
:return: tex if it is of the given mode. tex converted to mode otherwise.
:rtype: Image
"""
return tex if tex.mode == mode else tex.convert(mode)
| 5,343,878
|
def generate_dada_filelist(filename):
""" Generate a list of DADA files from start filename
Args:
filename (str): Path to file. e.g.
/data/dprice/2020-07-23-02:33:07.587_0000000000000000.000000.dada
Returns:
flist (list): A list of all associated files
"""
bn = os.path.basename(filename)
dn = os.path.dirname(filename)
bn_root = '_'.join(bn.split('_')[:-1]) # Strips off _000.000.dada bit
flist = sorted(glob.glob(os.path.join(dn, bn_root + '_*.dada')))
return flist
| 5,343,879
|
def buildWheels(buildDir, requirements):
"""build wheels
:param buildDir: directory to put wheels in (under 'wheelhouse')
:type buildDir: string
:param requirements: name of file holding names of Python packages
:type requirements: string
"""
wheelhouse = os.path.join(buildDir, 'wheelhouse')
if os.path.exists(wheelhouse):
shutil.rmtree(wheelhouse)
subprocess.check_call(['pip', 'wheel', '--requirement', requirements, '--wheel-dir', wheelhouse])
subprocess.check_call(['pip', 'wheel', 'setuptools==15.2', '--wheel-dir', wheelhouse])
subprocess.check_call(['pip', 'wheel', '.', '--wheel-dir', wheelhouse])
return wheelhouse
| 5,343,880
|
def code2name(code: int) -> str:
""" Convert prefecture code to name """
return __code2name[code]
| 5,343,881
|
def newton(backward_differences, max_num_iters, newton_coefficient, ode_fn_vec,
order, step_size, time, tol, unitary, upper):
"""Runs Newton's method to solve the BDF equation."""
initial_guess = tf.reduce_sum(
tf1.where(
tf.range(MAX_ORDER + 1) <= order,
backward_differences[:MAX_ORDER + 1],
tf.zeros_like(backward_differences)[:MAX_ORDER + 1]),
axis=0)
rhs_constant_term = newton_coefficient * tf.reduce_sum(
tf1.where(
tf.range(1, MAX_ORDER + 1) <= order, RECIPROCAL_SUMS[1:, np.newaxis] *
backward_differences[1:MAX_ORDER + 1],
tf.zeros_like(backward_differences)[1:MAX_ORDER + 1]),
axis=0)
next_time = time + step_size
step_size_cast = tf.cast(step_size, backward_differences.dtype)
real_dtype = tf.abs(backward_differences).dtype
def newton_body(iterand):
"""Performs one iteration of Newton's method."""
next_backward_difference = iterand.next_backward_difference
next_state_vec = iterand.next_state_vec
rhs = newton_coefficient * step_size_cast * ode_fn_vec(
next_time,
next_state_vec) - rhs_constant_term - next_backward_difference
delta = tf.squeeze(
tf.linalg.triangular_solve(
upper,
tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]),
lower=False))
num_iters = iterand.num_iters + 1
next_backward_difference += delta
next_state_vec += delta
delta_norm = tf.cast(tf.norm(delta), real_dtype)
lipschitz_const = delta_norm / iterand.prev_delta_norm
# Stop if method has converged.
approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm
close_to_sol = approx_dist_to_sol < tol
delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype))
converged = close_to_sol | delta_norm_is_zero
finished = converged
# Stop if any of the following conditions are met:
# (A) We have hit the maximum number of iterations.
# (B) The method is converging too slowly.
# (C) The method is not expected to converge.
too_slow = lipschitz_const > 1.
finished = finished | too_slow
if max_num_iters is not None:
too_many_iters = tf.equal(num_iters, max_num_iters)
num_iters_left = max_num_iters - num_iters
num_iters_left_cast = tf.cast(num_iters_left, real_dtype)
wont_converge = (
approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol)
finished = finished | too_many_iters | wont_converge
return [
_NewtonIterand(
converged=converged,
finished=finished,
next_backward_difference=next_backward_difference,
next_state_vec=next_state_vec,
num_iters=num_iters,
prev_delta_norm=delta_norm)
]
iterand = _NewtonIterand(
converged=False,
finished=False,
next_backward_difference=tf.zeros_like(initial_guess),
next_state_vec=tf.identity(initial_guess),
num_iters=0,
prev_delta_norm=tf.constant(np.array(-0.), dtype=real_dtype))
[iterand] = tf.while_loop(lambda iterand: tf.logical_not(iterand.finished),
newton_body, [iterand])
return (iterand.converged, iterand.next_backward_difference,
iterand.next_state_vec, iterand.num_iters)
| 5,343,882
|
def mock_handler(
handler: RequestHandler, uri: str = 'https://hub.example.com', method: str = 'GET', **settings: dict
) -> RequestHandler:
"""Instantiate a Handler in a mock application"""
application = Application(
hub=Mock(base_url='/hub/', server=Mock(base_url='/hub/'),),
cookie_secret=os.urandom(32),
db=Mock(rollback=Mock(return_value=None)),
**settings,
)
request = HTTPServerRequest(method=method, uri=uri, connection=Mock(),)
handler = RequestHandler(application=application, request=request,)
handler._transforms = []
return handler
| 5,343,883
|
def input(channel):
"""
To read the value of a GPIO pin:
:param channel:
:return:
"""
return LOW if random.random() < 0.5 else HIGH
| 5,343,884
|
def kml_start(params):
"""Define basic kml
header string"""
kmlstart = '''
<Document>
<name>%s</name>
<open>1</open>
<description>%s</description>
'''
return kmlstart % (params[0], params[1])
| 5,343,885
|
def add_item_to_do_list():
"""
Asks users to keep entering items to add to a new To Do list until they enter the word 'stop'
:return: to do list with new items
"""
### TO COMPLETE ###
return to_do_list
| 5,343,886
|
def handle_connect_event():
"""
A new web client established a connection.
"""
LOGGER.info('[%s] connected' % request.remote_addr)
global thread
if thread == None:
thread = SOCKETIO.start_background_task(target=ack)
emit('connected')
| 5,343,887
|
def parse(tokens):
"""Currently parse just supports fn, variable and constant definitions."""
context = Context()
context.tokens = tokens
while tokens:
parse_token(context)
if context.stack:
raise CompileError("after parsing, there are still words on the stack!!:\n{0}".format(
context.stack))
return context
| 5,343,888
|
def fetch_params_check():
"""If is_fetch is ticked, this function checks that all the necessary parameters for the fetch are entered."""
str_error = [] # type:List
if TIME_FIELD == '' or TIME_FIELD is None:
str_error.append("Index time field is not configured.")
if FETCH_INDEX == '' or FETCH_INDEX is None:
str_error.append("Index is not configured.")
if FETCH_QUERY == '' or FETCH_QUERY is None:
str_error.append("Query by which to fetch incidents is not configured.")
if len(str_error) > 0:
return_error("Got the following errors in test:\nFetches incidents is enabled.\n" + '\n'.join(str_error))
| 5,343,889
|
def _create_rpc_callback(label, result_counter):
"""Creates RPC callback function.
Args:
label: The correct label for the predicted example.
result_counter: Counter for the prediction result.
Returns:
The callback function.
"""
def _callback(result_future):
"""Callback function.
Calculates the statistics for the prediction result.
Args:
result_future: Result future of the RPC.
"""
exception = result_future.exception()
if exception:
result_counter.inc_error()
print(exception)
else:
sys.stdout.write('.')
sys.stdout.flush()
response = numpy.array(
result_future.result().outputs['scores'].float_val)
prediction = numpy.argmax(response)
if label != prediction:
result_counter.inc_error()
result_counter.inc_done()
result_counter.dec_active()
return _callback
| 5,343,890
|
def get_file_iterator(options):
"""
returns a sequence of files
raises IOError if problemmatic
raises ValueError if problemmatic
"""
# -------- BUILD FILE ITERATOR/GENERATOR --------
if options.f is not None:
files = options.f
elif options.l is not None:
try:
lfile = open(options.l, 'r')
# make a generator of non-blank lines
files = (line.strip() for line in lfile if line.strip())
except IOError:
msg = "{0} does not exist.".format(options.l)
raise IOError(msg)
else:
msg = "Must provide input files or file list."
raise ValueError(msg)
return files
| 5,343,891
|
def entropy_from_CT(SA, CT):
"""
Calculates specific entropy of seawater.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
Returns
-------
entropy : array_like
specific entropy [J kg :sup:`-1` K :sup:`-1`]
Examples
--------
>>> import gsw
>>> SA = [34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324]
>>> CT = [28.8099, 28.4392, 22.7862, 10.2262, 6.8272, 4.3236]
>>> gsw.entropy_from_CT(SA, CT)
array([ 400.38916315, 395.43781023, 319.86680989, 146.79103279,
98.64714648, 62.79185763])
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See appendix A.10.
"""
SA = np.maximum(SA, 0)
pt0 = pt_from_CT(SA, CT)
return -gibbs(n0, n1, n0, SA, pt0, 0)
| 5,343,892
|
def checking_log(input_pdb_path: str, output_log_path: str, properties: dict = None, **kwargs) -> int:
"""Create :class:`CheckingLog <model.checking_log.CheckingLog>` class and
execute the :meth:`launch() <model.checking_log.CheckingLog.launch>` method."""
return CheckingLog(input_pdb_path=input_pdb_path,
output_log_path=output_log_path,
properties=properties, **kwargs).launch()
| 5,343,893
|
def _repoint_files_json_dir(filename: str, source_folder: str, target_folder: str, working_folder: str) -> Optional[str]:
""" Repoints the DIR entry in the JSON file to the target folder
Arguments:
filename: the file to load and process
source_folder: the source folder to replace with target folder; if empty or None, a best guess is applied
target_folder: the target folder for the DIR entries
working_folder: the working folder to place the updated file in
Return:
The name of the adjusted JSON file when successful. Otherwise the None is returned
Notes:
The new file will be have the same name as the original, but will be in the working folder. If a file by that name
already exists in the working folder, it will be overwritten.
"""
# Check parameters
if not os.path.isfile(filename):
msg = 'Invalid file specified to repoint files JSON "%s"' % filename
logging.warning(msg)
return None
if not os.path.isdir(working_folder):
msg = 'Invalid working folder specified to repoint files JSON "%s"' % working_folder
logging.warning(msg)
return None
# Load the JSON
file_json = _load_json_file(filename)
if file_json is None:
msg = 'Unable to load JSON file when repointing files JSON "%s"' % filename
logging.warning(msg)
return None
if not isinstance(file_json, dict):
msg = 'Unknown JSON format when repointing files JSON "%s"' % filename
logging.warning(msg)
return None
if 'FILE_LIST' not in file_json:
msg = 'JSON missing FILE_LIST key when repointing files JSON "%s"' % filename
logging.warning(msg)
return None
new_file = os.path.join(working_folder, os.path.basename(filename))
all_files = file_json['FILE_LIST']
if not isinstance(all_files, list) and not isinstance(all_files, tuple) and not isinstance(all_files, set):
msg = 'FILE_LIST value is not a list of files for repointing files JSON "%s"' % filename
logging.warning(msg)
return None
try:
# Make sure we have a source folder to work with
if not source_folder:
cur_path = all_files[0]['DIR']
if cur_path[-1:] =='/' or cur_path[-1:] =='\\':
cur_path = cur_path[:len(cur_path) - 1]
source_folder = os.path.dirname(cur_path)
# Run through the files that we have
new_files = []
for one_file in all_files:
cur_file = {**one_file}
if cur_file['DIR'].startswith(source_folder):
cur_file['DIR'] = _replace_folder_path(cur_file['DIR'], source_folder, target_folder)
new_files.append(cur_file)
with open(new_file, 'w', encoding='utf8') as out_file:
json.dump({"FILE_LIST": new_files}, out_file, indent=2)
except Exception:
msg = 'Exception caught while repointing files JSON: "%s"' % filename
logging.exception(msg)
new_file = None
return new_file
| 5,343,894
|
def get_registry_by_name(cli_ctx, registry_name, resource_group_name=None):
"""Returns a tuple of Registry object and resource group name.
:param str registry_name: The name of container registry
:param str resource_group_name: The name of resource group
"""
resource_group_name = get_resource_group_name_by_registry_name(
cli_ctx, registry_name, resource_group_name)
client = cf_acr_registries(cli_ctx)
return client.get(resource_group_name, registry_name), resource_group_name
| 5,343,895
|
def plot_karyotype_summary(haploid_coverage,
chromosomes,
chrom_length,
output_dir,
bed_filename,
bed_file_sep=',',
binsize=1000000,
overlap=50000,
cov_min=5,
cov_max=200,
min_PL_length=3000000,
chroms_with_text=None):
"""
Plots karyotype summary for the whole genome with data preparation.
:param haploid_coverage: the average coverage of haploid regions (or the half of that of diploid regions)
:param chromosomes: list of chromosomes in the genome (list of str)
:param chrom_length: list of chromosome lengths (list of int)
:param output_dir: the path to the directory where PE_fullchrom_[chrom].txt files are located (str)
:param bed_filename: the path to the bed file of the sample with ploidy and LOH information (str)
:param bed_file_sep: bed file separator (default: ',') (str)
:param binsize: the binsize used for moving average (default: 1000000) (int)
:param overlap: the overlap used for moving average (default: 50000) (int, smaller than binsize)
:param cov_min: the minimum coverage for a position to be included (default: 5) (int)
:param cov_max: the maximum coverage for a position to be included (default: 2000) (int)
:param min_PL_length: the minimal length of a region to be plotted (default: 3000000) (int)
:param chroms_with_text: the list of chromosomes to be indicated with text on the plot (list of str) (If there are many short chromosomes or they have long names, it is useful to only indicate a few with text on the plot.)
:returns: a matplotlib figure
"""
real_pos, dr, dr_25, dr_75, baf, baf_25, baf_75 = __get_BAF_and_DR(avg_dip_cov=haploid_coverage * 2,
chroms=chromosomes,
chrom_length_list=chrom_length,
datadir=output_dir,
binsize=binsize,
overlap=overlap,
cov_min=cov_min,
cov_max=cov_max)
s0, s1, loh_pos, loh = __get_PL_and_LOH(bed_filename=bed_filename,
chroms=chromosomes,
chrom_lenght_list=chrom_length,
bed_file_sep=bed_file_sep,
numtoplot=5000,
minlength=min_PL_length)
f = __plot_karyotype(real_pos=real_pos,
dr=dr,
dr_25=dr_25,
dr_75=dr_75,
baf=baf,
baf_25=baf_25,
baf_75=baf_75,
s0=s0,
s1=s1,
loh_pos=loh_pos,
loh=loh,
all_chroms=chromosomes,
chrom_length_list=chrom_length,
chroms_with_text=chroms_with_text)
return f
| 5,343,896
|
def create_percentile_rasters(
raster_path, output_path, units_short, units_long, start_value,
percentile_list, aoi_shape_path):
"""Creates a percentile (quartile) raster based on the raster_dataset. An
attribute table is also constructed for the raster_dataset that
displays the ranges provided by taking the quartile of values.
The following inputs are required:
raster_path - A uri to a gdal raster dataset with data of type integer
output_path - A String for the destination of new raster
units_short - A String that represents the shorthand for the units
of the raster values (ex: kW/m)
units_long - A String that represents the description of the units
of the raster values (ex: wave power per unit width of
wave crest length (kW/m))
start_value - A String representing the first value that goes to the
first percentile range (start_value - percentile_one)
percentile_list - a python list of the percentiles ranges
ex: [25, 50, 75, 90]
aoi_shape_path - a uri to an OGR polygon shapefile to clip the
rasters to
return - Nothing """
LOGGER.debug('Create Perctile Rasters')
# If the output_path is already a file, delete it
if os.path.isfile(output_path):
os.remove(output_path)
def raster_percentile(band):
"""Operation to use in vectorize_datasets that takes
the pixels of 'band' and groups them together based on
their percentile ranges.
band - A gdal raster band
returns - An integer that places each pixel into a group
"""
return bisect(percentiles, band)
# Get the percentile values for each percentile
percentiles = calculate_percentiles_from_raster(
raster_path, percentile_list)
LOGGER.debug('percentiles_list : %s', percentiles)
# Get the percentile ranges as strings so that they can be added to a output
# table
percentile_ranges = create_percentile_ranges(
percentiles, units_short, units_long, start_value)
# Add the start_value to the beginning of the percentiles so that any value
# before the start value is set to nodata
percentiles.insert(0, int(start_value))
# Set nodata to a very small negative number
nodata = -9999919
pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(raster_path)
# Classify the pixels of raster_dataset into groups and write
# then to output
pygeoprocessing.geoprocessing.vectorize_datasets(
[raster_path], raster_percentile, output_path, gdal.GDT_Int32,
nodata, pixel_size, 'intersection',
assert_datasets_projected=False, aoi_uri=aoi_shape_path)
# Create percentile groups of how percentile ranges are classified
# using bisect function on a raster
percentile_groups = np.arange(1, len(percentiles) + 1)
# Get the pixel count for each group
pixel_count = count_pixels_groups(output_path, percentile_groups)
LOGGER.debug('number of pixels per group: : %s', pixel_count)
# Initialize a dictionary where percentile groups map to a string
# of corresponding percentile ranges. Used to create RAT
perc_dict = {}
for index in xrange(len(percentile_groups)):
perc_dict[percentile_groups[index]] = percentile_ranges[index]
col_name = "Val_Range"
pygeoprocessing.geoprocessing.create_rat_uri(output_path, perc_dict, col_name)
# Initialize a dictionary to map percentile groups to percentile range
# string and pixel count. Used for creating CSV table
table_dict = {}
for index in xrange(len(percentile_groups)):
table_dict[index] = {}
table_dict[index]['id'] = percentile_groups[index]
table_dict[index]['Value Range'] = percentile_ranges[index]
table_dict[index]['Pixel Count'] = pixel_count[index]
attribute_table_uri = output_path[:-4] + '.csv'
column_names = ['id', 'Value Range', 'Pixel Count']
create_attribute_csv_table(attribute_table_uri, column_names, table_dict)
| 5,343,897
|
def probs_to_mu_sigma(probs):
"""Calculate mean and covariance matrix for each channel of probs
tensor of keypoint probabilites [N, C, H, W]
mean calculated on a grid of scale [-1, 1]
Parameters
----------
probs : torch.Tensor
tensor of shape [N, C, H, W] where each channel along axis 1
is interpreted as a probability density.
Returns
-------
mu : torch.Tensor
tensor of shape [N, C, 2] representing partwise mean coordinates
of x and y for each item in the batch
sigma : torch.Tensor
tensor of shape [N, C, 2, 2] representing covariance matrix
for each item in the batch
"""
bn, nk, h, w = shape_as_list(probs)
y_t = tile(torch.linspace(-1, 1, h).view(h, 1), w, 1)
x_t = tile(torch.linspace(-1, 1, w).view(1, w), h, 0)
y_t = torch.unsqueeze(y_t, dim=-1)
x_t = torch.unsqueeze(x_t, dim=-1)
meshgrid = torch.cat([y_t, x_t], dim=-1)
if probs.is_cuda:
meshgrid = meshgrid.to(probs.device)
mu = torch.einsum("ijl,akij->akl", meshgrid, probs)
mu_out_prod = torch.einsum("akm,akn->akmn", mu, mu)
mesh_out_prod = torch.einsum("ijm,ijn->ijmn", meshgrid, meshgrid)
sigma = torch.einsum("ijmn,akij->akmn", mesh_out_prod, probs) - mu_out_prod
return mu, sigma
| 5,343,898
|
def fetch_biomart_genes_mm9():
"""Fetches mm9 genes from Ensembl via biomart."""
return _fetch_genes_biomart(
host='http://may2012.archive.ensembl.org',
gene_name_attr='external_gene_id')
| 5,343,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.