content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def birth(sim):
"""Similar to create agent, but just one individual"""
age = 0
qualification = int(sim.seed.gammavariate(3, 3))
qualification = [qualification if qualification < 21 else 20][0]
money = sim.seed.randrange(20, 40)
month = sim.seed.randrange(1, 13, 1)
gender = sim.seed.choice(['Male', 'Female'])
sim.total_pop += 1
a = Agent((sim.total_pop - 1), gender, age, qualification, money, month)
return a
| 23,200
|
def offer_better_greeting():
"""Give player optional compliments."""
player = request.args["person"]
# if they didn't tick box, `wants_compliments` won't be
# in query args -- so let's use safe `.get()` method of
# dict-like things
wants = request.args.get("wants_compliments")
nice_things = sample(COMPLIMENTS, 3) if wants else []
return render_template("compliments.html",
compliments=nice_things,
name=player)
| 23,201
|
def get_trip_length(grouped_counts):
"""
Gets the frequency of the length of a trip for a customer
Args:
grouped_counts (Pandas.DataFrame): The grouped dataframe returned from
a get_trips method call
Returns:
Pandas.DataFrame: the dataframe containing the frequencies for each
trip length (in days)
"""
return frequency(grouped_counts, 0)
| 23,202
|
def extract_begin_end(data):
""" Finds nif:beginIndex and nif:endIndex values.
:param data: Data sent by the client.
:return: Begin index and end index, -1 if error.
"""
try:
begin = data.split("nif:beginIndex")[1].split("\"")[1]
end = data.split("nif:endIndex")[1].split("\"")[1]
return int(begin), int(end)
except IndexError:
return -1, -1
| 23,203
|
def setup_parameters():
"""
Helper routine to fill in all relevant parameters
Note that this file will be used for all versions of SDC, containing more than necessary for each individual run
Returns:
description (dict)
controller_params (dict)
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['dt'] = 1E-02
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU']
sweeper_params['QE'] = ['EE']
sweeper_params['initial_guess'] = 'zero'
# This comes as read-in for the problem class
problem_params = dict()
problem_params['nu'] = 2
problem_params['L'] = 1.0
problem_params['nvars'] = [(256, 256), (64, 64)]
problem_params['eps'] = [0.04, 0.16]
problem_params['radius'] = 0.25
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
controller_params['hook_class'] = monitor
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = None # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = None # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh_fft2d
return description, controller_params
| 23,204
|
def create_scope(api_client, scope, initial_manage_principal):
"""
Creates a new secret scope with given name.
"""
SecretApi(api_client).create_scope(scope, initial_manage_principal)
| 23,205
|
def add_quick_closing(request_id, days, date, tz_name, content):
"""Create and store an acknowledgement-determination response followed by a closing-determination response for
the specified request and update the request accordingly.
Args:
request_id: FOIL request ID
days: days until request completion
date: date of request completion
tz_name: client's timezone name
content: body text associated with the acknowledgment/closing
"""
# Acknowledgement actions
request = Requests.query.filter_by(id=request_id).one()
if not request.was_acknowledged:
previous_due_date = {'due_date': request.due_date.isoformat()}
previous_status = request.status
new_due_date = _get_new_due_date(request_id, days, date, tz_name)
update_object(
{'due_date': new_due_date,
'status': request_status.IN_PROGRESS},
Requests,
request_id
)
privacy = RELEASE_AND_PUBLIC
acknowledgement_response = Determinations(
request_id,
privacy,
determination_type.ACKNOWLEDGMENT,
None,
new_due_date,
)
create_object(acknowledgement_response)
create_response_event(event_type.REQ_ACKNOWLEDGED, acknowledgement_response, previous_value=previous_due_date)
create_request_info_event(
request_id,
type_=event_type.REQ_STATUS_CHANGED,
previous_value={'status': previous_status},
new_value={'status': request.status}
)
else:
raise UserRequestException(action='acknowledge',
request_id=request_id,
reason='Request has already been acknowledged')
# Closing actions
if request.status != request_status.CLOSED and (
request.was_acknowledged or request.was_reopened):
previous_status = request.status
previous_date_closed = request.date_closed.isoformat() if request.date_closed else None
update_vals = {'status': request_status.CLOSED}
if not calendar.isbusday(datetime.utcnow()) or datetime.utcnow().date() < request.date_submitted.date():
update_vals['date_closed'] = get_next_business_day()
else:
update_vals['date_closed'] = datetime.utcnow()
if not request.privacy['agency_request_summary'] and request.agency_request_summary is not None:
update_vals['agency_request_summary_release_date'] = calendar.addbusdays(datetime.utcnow(),
RELEASE_PUBLIC_DAYS)
update_object(
update_vals,
Requests,
request_id,
es_update=False
)
else:
update_object(
update_vals,
Requests,
request_id,
es_update=False
)
create_request_info_event(
request_id,
type_=event_type.REQ_STATUS_CHANGED,
previous_value={'status': previous_status, 'date_closed': previous_date_closed},
new_value={'status': request.status, 'date_closed': request.date_closed.isoformat()}
)
reason = Reasons.query.filter_by(title='Fulfilled via Walk In').one()
if not calendar.isbusday(datetime.utcnow()) or datetime.utcnow().date() < request.date_submitted.date():
# push the closing date to the next business day if it is a weekend/holiday
# or if it is before the date submitted
closing_response = Determinations(
request_id,
RELEASE_AND_PUBLIC,
determination_type.CLOSING,
format_determination_reasons([reason.id]),
date_modified=get_next_business_day()
)
else:
closing_response = Determinations(
request_id,
RELEASE_AND_PUBLIC,
determination_type.CLOSING,
format_determination_reasons([reason.id])
)
create_object(closing_response)
create_response_event(event_type.REQ_CLOSED, closing_response)
request.es_update()
else:
raise UserRequestException(action='close',
request_id=request_id,
reason='Request is already closed or has not been acknowledged')
email_id = _send_response_email(request_id,
privacy,
content,
'Request {} Acknowledged and Closed'.format(request_id))
# Create 2 CommunicationMethod objects, one for each determination
_create_communication_method(acknowledgement_response.id, email_id, response_type.EMAIL)
_create_communication_method(closing_response.id, email_id, response_type.EMAIL)
| 23,206
|
def get_operation(op, inplanes, outplanes, stride, conv_type):
"""Set up conv and pool operations."""
kernel_size = Ops.ops_to_kernel_size[op]
padding = [(k - 1) // 2 for k in kernel_size]
if op in Ops.pooling_ops:
if inplanes == outplanes:
return nn.AvgPool2d(kernel_size, stride=stride, padding=padding)
else:
return nn.Sequential(nn.Conv2d(inplanes, outplanes, 1, 1, 0),
nn.AvgPool2d(kernel_size, stride=stride, padding=padding))
else:
if conv_type == 'depthwise_separable':
return depthwise_separable_conv_general(inplanes, outplanes, stride, kernel_size, padding)
else:
return nn.Conv2d(inplanes, outplanes, kernel_size, stride, padding=padding)
| 23,207
|
def test_create_subscription_metadata(
caplog, auth_client, user, session, mock_stripe_customer
):
"""Successful checkout session updates metadata on Stripe Customer"""
mock_stripe_customer.retrieve.return_value.metadata = {}
mock_stripe_customer.retrieve.return_value.email = user.email
url = reverse("billing:checkout_success")
query_params = {"session_id": factories.id("sess")}
with caplog.at_level("ERROR"):
response = auth_client.get(url, query_params)
assert 302 == response.status_code
assert settings.CHECKOUT_SUCCESS_URL == response.url
assert mock_stripe_customer.retrieve.call_count == 1
assert mock_stripe_customer.modify.call_count == 1
assert len(caplog.records) == 0
| 23,208
|
def encode(
structure_klifs_ids, fingerprints_filepath=None, local_klifs_download_path=None, n_cores=1
):
"""
Encode structures.
Parameters
----------
structure_klifs_ids : list of int
Structure KLIFS IDs.
fingerprints_filepath : str or pathlib.Path
Path to output json file. Default None.
local_klifs_download_path : str or None
If path to local KLIFS download is given, set up local KLIFS session.
If None is given, set up remote KLIFS session.
n_cores : int
Number of cores used to generate fingerprints.
Returns
-------
kissim.encoding.FingerprintGenerator
Fingerprints.
"""
# Set up KLIFS session
klifs_session = _setup_klifs_session(local_klifs_download_path)
# Generate fingerprints
fingerprints = FingerprintGenerator.from_structure_klifs_ids(
structure_klifs_ids, klifs_session, n_cores
)
# Optionally: Save fingerprints to json file
if fingerprints_filepath:
logger.info(f"Write fingerprints to file: {fingerprints_filepath}")
fingerprints.to_json(fingerprints_filepath)
return fingerprints
| 23,209
|
def log_like_repressed(params, data_rep):
"""Conv wrapper for log likelihood for 2-state promoter w/
transcription bursts and repression.
data_rep: a list of arrays, each of which is n x 2, of form
data[:, 0] = SORTED unique mRNA counts
data[:, 1] = frequency of each mRNA count
Note the data pre-processing here, credit to Manuel for this observation:
'The likelihood asks for unique mRNA entries and their corresponding
counts to speed up the process of computing the probability distribution.
Instead of computing the probability of 3 mRNAs n times, it computes it
once and multiplies the value by n.'
This also reduces the size of the data arrays by ~10-fold,
which reduces the time penalty of emcee's pickling
to share the data within the multiprocessing Pool.
"""
# kR_list contains, in order, kRon_0p5, kRon_1, kRon_2, kRon_10, kRoff
k_burst, mean_burst, *kR_list = params
params_local = np.array([k_burst, mean_burst, 0, kR_list[-1]])
target = 0
for i, expt in enumerate(data_rep):
max_m = expt[0].max()
# kRoff is never plugged in below b/c loop terminates first
params_local[2] = kR_list[i]
# note log_probs contains values for ALL m < max_m,
# not just those in the data set...
log_probs = srep.models.log_prob_m_bursty_rep(max_m, *params_local)
# ...so extract just the ones we want & * by their occurence
target += np.sum(expt[1] * log_probs[expt[0]])
return target
| 23,210
|
def test_wrapper_calls_of_on_non_wrapper():
"""
The ExtensionClass protocol is respected even for non-Acquisition
objects.
>>> class MyBase(ExtensionClass.Base):
... def __of__(self, other):
... print("Of called")
... return 42
>>> class Impl(Acquisition.Implicit):
... pass
If we have a wrapper around an object that is an extension class,
but not an Acquisition wrapper:
>>> root = Impl()
>>> wrapper = Acquisition.ImplicitAcquisitionWrapper(MyBase(), root)
And access that object itself through a wrapper:
>>> root.child = Impl()
>>> root.child.wrapper = wrapper
The `__of__` protocol is respected implicitly:
>>> root.child.wrapper
Of called
42
Here it is explicitly:
>>> wrapper.__of__(root.child)
Of called
42
"""
| 23,211
|
def selectBroadcastData(request):
"""
检索黑广播
:param request:
:return:
"""
pass
| 23,212
|
def freq_mask(spec, F=30, num_masks=1, pad_value=0.):
"""Frequency masking
Args:
spec (torch.Tensor): input tensor of shape `(dim, T)`
F (int): maximum width of each mask
num_masks (int): number of masks
pad_value (float): value for padding
Returns:
freq masked tensor (torch.Tensor): output tensor of shape `(dim, T)`
"""
cloned = spec.clone()
num_mel_channels = cloned.size(0)
for i in range(num_masks):
f = np.random.randint(0, F + 1)
f_zero = np.random.randint(0, num_mel_channels - f + 1)
if f == 0:
continue
cloned[f_zero:f_zero + f] = pad_value
return cloned
| 23,213
|
def prepare_file_hierarchy(path):
"""
Create a temporary folder structure like the following:
test_find_dotenv0/
└── child1
├── child2
│ └── child3
│ └── child4
└── .env
Then try to automatically `find_dotenv` starting in `child4`
"""
curr_dir = path
dirs = []
for f in ['child1', 'child2', 'child3', 'child4']:
curr_dir /= f
dirs.append(curr_dir)
curr_dir.mkdir()
return (dirs[0], dirs[-1])
| 23,214
|
def test_config_rule_errors(): # pylint: disable=too-many-statements
"""Test various error conditions in ConfigRule instantiation."""
client = boto3.client("config", region_name=TEST_REGION)
# Missing fields (ParamValidationError) caught by botocore and not
# tested here: ConfigRule.Source, ConfigRule.ConfigRuleName
managed_rule = managed_config_rule()
managed_rule["ConfigRuleArn"] = "arn"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
"ConfigRule Arn and Id can not be specified when creating a new "
"ConfigRule." in err["Message"]
)
managed_rule = managed_config_rule()
bad_json_string = "{'name': 'test', 'type': null, }"
managed_rule["InputParameters"] = bad_json_string
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
f"Invalid json {bad_json_string} passed in the InputParameters field"
in err["Message"]
)
managed_rule = managed_config_rule()
managed_rule["MaximumExecutionFrequency"] = "HOUR"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert (
"Member must satisfy enum value set: {One_Hour, Six_Hours, "
"Three_Hours, Twelve_Hours, TwentyFour_Hours}" in err["Message"]
)
managed_rule = managed_config_rule()
managed_rule["ConfigRuleState"] = "BOGUS"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert (
"Value 'BOGUS' at 'configRule.configRuleState' failed to satisfy "
"constraint: Member must satisfy enum value set: {ACTIVE, "
"DELETING, DELETING_RESULTS, EVALUATING}" in err["Message"]
)
managed_rule = managed_config_rule()
managed_rule["ConfigRuleState"] = "DELETING"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
"The ConfigRuleState DELETING is invalid. Only the following values "
"are permitted: ACTIVE" in err["Message"]
)
managed_rule = managed_config_rule()
managed_rule["CreatedBy"] = "tester"
with pytest.raises(ClientError) as exc:
client.put_config_rule(ConfigRule=managed_rule)
err = exc.value.response["Error"]
assert err["Code"] == "InvalidParameterValueException"
assert (
"AWS Config populates the CreatedBy field for ServiceLinkedConfigRule. "
"Try again without populating the CreatedBy field" in err["Message"]
)
| 23,215
|
def collateReadCounts(inputs, outputs):
"""
Collate read counts from samtools flagstat output into a table.
"""
# Note expected input and output directories are effectively hard-coded
in_dir = sambam_dir
out_dir = results_dir
flag_file = outputs[-1]
print "Collating read counts"
runStageCheck('collateReadcounts', flag_file, in_dir, out_dir)
| 23,216
|
def cmd2dict(cmd):
"""Returns a dictionary of what to replace each value by."""
pixel_count = cmd[cmd.shape[0] - 1, cmd.shape[1] - 1]
scaling_dict = dict()
for i in range(0, cmd.shape[0]):
scaling_dict[cmd[i, 0]] = round(
((cmd[i, 1] - cmd[0, 1]) / (pixel_count - cmd[0, 1])) * 255
)
return scaling_dict
| 23,217
|
def cached_part(query, cache=None):
"""Get cached part of the query.
Use either supplied cache object or global cache object (default).
In the process, query is into two parts: the beginning of the query
and the remainder. Function tries to find longest possible beginning of the query
which is cached, then returns the cached state and the remainder of the query.
(query == state.query + "/" + remainder)
"""
if cache is None:
cache = get_cache()
if isinstance(
cache, NoCache
): # Just an optimization - to avoid looping over all query splits
return State(), encode(decode(query))
for key, remainder in all_splits(query):
if key == "":
return State(), remainder
if cache.contains(key):
state = cache.get(key)
if state is None:
continue
return state, remainder
# Should never get here, but this is a sensible default:
return State(), encode(decode(query))
| 23,218
|
def exist_key(bucket: str, key: str) -> bool:
"""Exist key or not.
Args:
bucket (str): S3 bucket name.
key (str): Object key.
Returns:
bool: Exist or not.
"""
try:
s3.Object(bucket, key).get()
except s3.meta.client.exceptions.NoSuchKey:
return False
return True
| 23,219
|
def load_all_files(dir):
"""Returns all of the csharp source files."""
result = []
for root, dirnames, filenames in os.walk(dir):
if 'obj\\' not in root and 'bin\\' not in root:
for name in fnmatch.filter(filenames, '*.cs'):
result.append(SourceFile(os.path.join(root, name)))
return result
| 23,220
|
def glove4gensim(file_dir):
"""
A function that modifies the pretrained GloVe file so it could be integrated with this framework
[Note] You can download the vectors used in this code at
https://nlp.stanford.edu/projects/glove/ (make sure to unzip the files)
:param file_dir: file directory of the downloaded file
e.g., file_dir='/home/USERNAME/embeddings/word2vec/GoogleNews-vectors-negative300.bin'
:return: None
"""
from gensim.scripts.glove2word2vec import glove2word2vec
# load the vectors on gensim
assert file_dir.endswith('.txt'), "For downloaded GloVe, the input file should be a .txt"
glove2word2vec(file_dir,file_dir.replace('.txt','.vec'))
file_dir = file_dir.replace('.txt','.vec')
model = KeyedVectors.load_word2vec_format(file_dir,binary=file_dir.endswith('.bin'))
# save only the .wv part of the model, it's much faster
new_file_dir = file_dir.replace('.vec','.wv')
model.wv.save(new_file_dir)
# delete the original .bin file
os.remove(file_dir)
print("Removed previous file ",file_dir)
# try loading the new file
model = KeyedVectors.load(new_file_dir, mmap='r')
print("Loaded in gensim! %d word embeddings, %d dimensions"%(len(model.vocab),len(model['a'])))
return
| 23,221
|
def calculate_bleu_score(candidate_file: str, reference_file: str) -> float:
"""
Calculates the average BLEU score of the given files, interpreting each line as a sentence.
Partially taken from https://stackoverflow.com/a/49886758/3918865.
Args:
candidate_file: the name of the file that contains the candidate sentences (hypotheses)
reference_file: the name of the file that contains the reference sentences (targets)
Returns:
the average BLEU score
"""
candidate = open(candidate_file, 'r').readlines()
reference = open(reference_file, 'r').readlines()
num_candidates = len(candidate)
reference = reference[:num_candidates]
assert len(reference) == len(candidate), 'Make sure there are at least as many references as candidates.'
score = 0.
for i in range(len(reference)):
ref = reference[i].strip()
cand = candidate[i].strip()
score_i = sentence_bleu([ref.split()], cand.split(), weights=(0.5, 0.5))
score += score_i
score /= num_candidates
return score
| 23,222
|
def _read_id_not_in_dict(read_ids, read_dict):
"""Return True if all read_ids in a list are not in the read_dict keys, otherwise False"""
for read_id in read_ids:
if read_id not in read_dict.keys():
return True
return False
| 23,223
|
def generate_spectra_products(dataset, prdcfg):
"""
generates spectra products. Accepted product types:
'AMPLITUDE_PHASE_ANGLE_DOPPLER': Makes an angle Doppler plot of
complex spectra or IQ data. The plot can be along azimuth or along
range. It is plotted separately the module and the phase of the
signal.
User defined parameters:
along_azi : bool
If true the plot is performed along azimuth, otherwise
along elevation. Default true
ang : float
The fixed angle (deg). Default 0.
rng : float
The fixed range (m). Default 0.
ang_tol : float
The fixed angle tolerance (deg). Default 1.
rng_tol : float
The fixed range tolerance (m). Default 50.
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
'AMPLITUDE_PHASE_DOPPLER': Plots a complex Doppler spectrum or IQ data
making two separate plots for the module and phase of the signal
User defined parameters:
azi, ele, rng : float
azimuth and elevation (deg) and range (m) of the ray to
plot
azi_to, ele_tol, rng_tol : float
azimuth and elevation (deg) and range (m) tolerance
respect to nominal position to plot. Default 1, 1, 50.
ind_ray, ind_rng : int
index of the ray and range to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
'AMPLITUDE_PHASE_RANGE_DOPPLER': Plots a complex spectra or IQ data
range-Doppler making two separate plots for the module and phase
of the signal User defined parameters:
azi, ele : float
azimuth and elevation (deg) of the ray to plot
azi_to, ele_tol : float
azimuth and elevation (deg) tolerance respect to nominal
position to plot. Default 1, 1.
ind_ray : int
index of the ray to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
'AMPLITUDE_PHASE_TIME_DOPPLER': Plots a complex spectra or IQ data
time-Doppler making two separate plots for the module and phase of
the signal
User defined parameters:
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity' or
'Doppler frequency'
ampli_vmin, ampli_vmax, phase_vmin, phase_vmax : float or None
Minimum and maximum of the color scale for the module and
phase
plot_type : str
Can be 'final' or 'temporal'. If final the data is only
plotted at the end of the processing
'ANGLE_DOPPLER': Makes an angle Doppler plot. The plot can be along
azimuth or along range
User defined parameters:
along_azi : bool
If true the plot is performed along azimuth, otherwise
along elevation. Default true
ang : float
The fixed angle (deg). Default 0.
rng : float
The fixed range (m). Default 0.
ang_tol : float
The fixed angle tolerance (deg). Default 1.
rng_tol : float
The fixed range tolerance (m). Default 50.
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_ANGLE_DOPPLER': Makes an angle Doppler plot of complex
spectra or IQ data. The plot can be along azimuth or along range.
The real and imaginary parts are plotted separately
User defined parameters:
along_azi : bool
If true the plot is performed along azimuth, otherwise
along elevation. Default true
ang : float
The fixed angle (deg). Default 0.
rng : float
The fixed range (m). Default 0.
ang_tol : float
The fixed angle tolerance (deg). Default 1.
rng_tol : float
The fixed range tolerance (m). Default 50.
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_DOPPLER': Plots a complex Doppler spectrum or IQ data making
two separate plots for the real and imaginary parts
User defined parameters:
azi, ele, rng : float
azimuth and elevation (deg) and range (m) of the ray to
plot
azi_to, ele_tol, rng_tol : float
azimuth and elevation (deg) and range (m) tolerance
respect to nominal position to plot. Default 1, 1, 50.
ind_ray, ind_rng : int
index of the ray and range to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_RANGE_DOPPLER': Plots the complex spectra or IQ data
range-Doppler making two separate plots for the real and imaginary
parts
User defined parameters:
azi, ele : float
azimuth and elevation (deg) of the ray to plot
azi_to, ele_tol : float
azimuth and elevation (deg) tolerance respect to nominal
position to plot. Default 1, 1.
ind_ray : int
index of the ray to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'COMPLEX_TIME_DOPPLER': Plots the complex spectra or IQ data
time-Doppler making two separate plots for the real and imaginary
parts
User defined parameters:
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity' or
'Doppler frequency'
vmin, vmax : float or None
Minimum and maximum of the color scale
plot_type : str
Can be 'final' or 'temporal'. If final the data is only
plotted at the end of the processing
'DOPPLER': Plots a Doppler spectrum variable or IQ data variable
User defined parameters:
azi, ele, rng : float
azimuth and elevation (deg) and range (m) of the ray to
plot
azi_to, ele_tol, rng_tol : float
azimuth and elevation (deg) and range (m) tolerance
respect to nominal position to plot. Default 1, 1, 50.
ind_ray, ind_rng : int
index of the ray and range to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'RANGE_DOPPLER': Makes a range-Doppler plot of spectral or IQ data
User defined parameters:
azi, ele : float
azimuth and elevation (deg) of the ray to plot
azi_to, ele_tol : float
azimuth and elevation (deg) tolerance respect to nominal
position to plot. Default 1, 1.
ind_ray : int
index of the ray to plot. Alternative to
defining its antenna coordinates
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
'SAVEALL': Saves radar spectra or IQ volume data including all or a
list of userdefined fields in a netcdf file
User defined parameters:
datatypes: list of str or None
The list of data types to save. If it is None, all fields
in the radar object will be saved
physical: Bool
If True the data will be saved in physical units (floats).
Otherwise it will be quantized and saved as binary
'SAVEVOL': Saves one field of a radar spectra or IQ volume data in a
netcdf file
User defined parameters:
physical: Bool
If True the data will be saved in physical units (floats).
Otherwise it will be quantized and saved as binary
'TIME_DOPPLER': Makes a time-Doppler plot of spectral or IQ data at a
point of interest.
User defined parameters:
xaxis_info : str
The xaxis type. Can be 'Doppler_velocity',
'Doppler_frequency' or 'pulse_number'
vmin, vmax : float or None
Minimum and maximum of the color scale
plot_type : str
Can be 'final' or 'temporal'. If final the data is only
plotted at the end of the processing
Parameters
----------
dataset : spectra
spectra object
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
None or name of generated files
"""
dssavedir = prdcfg['dsname']
if 'dssavename' in prdcfg:
dssavedir = prdcfg['dssavename']
if prdcfg['type'] == 'RANGE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
if azi is None or ele is None:
ind_ray = prdcfg.get('ind_ray', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray is None:
warn('Ray azi='+str(azi)+', ele='+str(ele) +
' out of radar coverage')
return None
gateinfo = 'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].ngates == 1:
plot_Doppler(
dataset['radar_out'], field_name, ind_ray, 0, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_range_Doppler(
dataset['radar_out'], field_name, ind_ray, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'ANGLE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
along_azi = prdcfg.get('along_azi', True)
ang = prdcfg.get('ang', 0)
rng = prdcfg.get('rng', 0)
ang_tol = prdcfg.get('ang_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No data at rng='+str(rng))
return None
if along_azi:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].elevation['data'] <= ang+ang_tol,
dataset['radar_out'].elevation['data'] >= ang-ang_tol))[0]
else:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].azimuth['data'] <= ang+ang_tol,
dataset['radar_out'].azimuth['data'] >= ang-ang_tol))[0]
if ind_rays.size == 0:
warn('No data for angle '+str(ang))
return None
# sort angles
if along_azi:
ang_selected = dataset['radar_out'].azimuth['data'][ind_rays]
else:
ang_selected = dataset['radar_out'].elevation['data'][ind_rays]
ind_rays = ind_rays[np.argsort(ang_selected)]
if along_azi:
gateinfo = 'azi'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
else:
gateinfo = 'ele'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if ind_rays.size == 1:
plot_Doppler(
dataset['radar_out'], field_name, ind_rays, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_angle_Doppler(
dataset['radar_out'], field_name, ang, ind_rays, ind_rng,
prdcfg, fname_list, xaxis_info=xaxis_info,
along_azi=along_azi, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'TIME_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
xmin = prdcfg.get('xmin', None)
xmax = prdcfg.get('xmax', None)
ymin = prdcfg.get('ymin', None)
ymax = prdcfg.get('ymax', None)
plot_type = prdcfg.get('plot_type', 'final')
if plot_type == 'final' and not dataset['final']:
return None
if 'antenna_coordinates_az_el_r' in dataset:
az = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][0])
el = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][1])
r = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][2])
gateinfo = ('az'+az+'r'+r+'el'+el)
else:
lon = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][0])
lat = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][1])
alt = '{:.1f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][2])
gateinfo = ('lon'+lon+'lat'+lat+'alt'+alt)
time_info = datetime_from_radar(dataset['radar_out'])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=time_info)
fname_list = make_filename(
'time_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=time_info, runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].nrays == 1:
plot_Doppler(
dataset['radar_out'], field_name, 0, 0, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_time_Doppler(
dataset['radar_out'], field_name, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax, xmin=xmin,
xmax=xmax, ymin=ymin, ymax=ymax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
rng = prdcfg.get('rng', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
if azi is None or ele is None or rng is None:
ind_ray = prdcfg.get('ind_ray', 0)
ind_rng = prdcfg.get('ind_rng', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
rng = dataset['radar_out'].range['data'][ind_rng]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None or ind_ray is None:
warn('Point azi='+str(azi)+', ele='+str(ele)+', rng='+str(rng) +
' out of radar coverage')
return None
gateinfo = (
'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele) +
'r'+'{:.1f}'.format(rng))
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
plot_Doppler(
dataset['radar_out'], field_name, ind_ray, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_RANGE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
if azi is None or ele is None:
ind_ray = prdcfg.get('ind_ray', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray is None:
warn('Ray azi='+str(azi)+', ele='+str(ele) +
' out of radar coverage')
return None
gateinfo = 'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'c_range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].ngates == 1:
plot_complex_Doppler(
dataset['radar_out'], field_name, ind_ray, 0, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_complex_range_Doppler(
dataset['radar_out'], field_name, ind_ray, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_ANGLE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
along_azi = prdcfg.get('along_azi', True)
ang = prdcfg.get('ang', 0)
rng = prdcfg.get('rng', 0)
ang_tol = prdcfg.get('ang_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No data at rng='+str(rng))
return None
if along_azi:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].elevation['data'] <= ang+ang_tol,
dataset['radar_out'].elevation['data'] >= ang-ang_tol))[0]
else:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].azimuth['data'] <= ang+ang_tol,
dataset['radar_out'].azimuth['data'] >= ang-ang_tol))[0]
if ind_rays.size == 0:
warn('No data for angle '+str(ang))
return None
# sort angles
if along_azi:
ang_selected = dataset['radar_out'].azimuth['data'][ind_rays]
else:
ang_selected = dataset['radar_out'].elevation['data'][ind_rays]
ind_rays = ind_rays[np.argsort(ang_selected)]
if along_azi:
gateinfo = 'azi'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
else:
gateinfo = 'ele'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if ind_rays.size == 1:
plot_complex_Doppler(
dataset['radar_out'], field_name, ind_rays, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_complex_angle_Doppler(
dataset['radar_out'], field_name, ang, ind_rays, ind_rng,
prdcfg, fname_list, xaxis_info=xaxis_info,
along_azi=along_azi, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_TIME_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
plot_type = prdcfg.get('plot_type', 'final')
if plot_type == 'final' and not dataset['final']:
return None
if 'antenna_coordinates_az_el_r' in dataset:
az = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][0])
el = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][1])
r = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][2])
gateinfo = ('az'+az+'r'+r+'el'+el)
else:
lon = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][0])
lat = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][1])
alt = '{:.1f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][2])
gateinfo = ('lon'+lon+'lat'+lat+'alt'+alt)
time_info = datetime_from_radar(dataset['radar_out'])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=time_info)
fname_list = make_filename(
'c_time_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=time_info, runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].nrays == 1:
plot_complex_Doppler(
dataset['radar_out'], field_name, 0, 0, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
else:
plot_complex_time_Doppler(
dataset['radar_out'], field_name, prdcfg, fname_list,
xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'COMPLEX_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
rng = prdcfg.get('rng', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
if azi is None or ele is None or rng is None:
ind_ray = prdcfg.get('ind_ray', 0)
ind_rng = prdcfg.get('ind_rng', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
rng = dataset['radar_out'].range['data'][ind_rng]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None or ind_ray is None:
warn('Point azi='+str(azi)+', ele='+str(ele)+', rng='+str(rng) +
' out of radar coverage')
return None
gateinfo = (
'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele) +
'r'+'{:.1f}'.format(rng))
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
vmin = prdcfg.get('vmin', None)
vmax = prdcfg.get('vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'c_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
plot_complex_Doppler(
dataset['radar_out'], field_name, ind_ray, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, vmin=vmin, vmax=vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
rng = prdcfg.get('rng', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
if azi is None or ele is None or rng is None:
ind_ray = prdcfg.get('ind_ray', 0)
ind_rng = prdcfg.get('ind_rng', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
rng = dataset['radar_out'].range['data'][ind_rng]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None or ind_ray is None:
warn('Point azi='+str(azi)+', ele='+str(ele)+', rng='+str(rng) +
' out of radar coverage')
return None
gateinfo = (
'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele) +
'r'+'{:.1f}'.format(rng))
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'ap_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, ind_ray, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_RANGE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
azi = prdcfg.get('azi', None)
ele = prdcfg.get('ele', None)
azi_tol = prdcfg.get('azi_tol', 1.)
ele_tol = prdcfg.get('ele_tol', 1.)
if azi is None or ele is None:
ind_ray = prdcfg.get('ind_ray', 0)
azi = dataset['radar_out'].azimuth['data'][ind_ray]
ele = dataset['radar_out'].elevation['data'][ind_ray]
else:
ind_ray = find_ray_index(
dataset['radar_out'].elevation['data'],
dataset['radar_out'].azimuth['data'], ele, azi,
ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray is None:
warn('Ray azi='+str(azi)+', ele='+str(ele) +
' out of radar coverage')
return None
gateinfo = 'az'+'{:.1f}'.format(azi)+'el'+'{:.1f}'.format(ele)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'ap_range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].ngates == 1:
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, ind_ray, 0, prdcfg,
fname_list, xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
else:
plot_amp_phase_range_Doppler(
dataset['radar_out'], field_name, ind_ray, prdcfg, fname_list,
xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_ANGLE_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
along_azi = prdcfg.get('along_azi', True)
ang = prdcfg.get('ang', 0)
rng = prdcfg.get('rng', 0)
ang_tol = prdcfg.get('ang_tol', 1.)
rng_tol = prdcfg.get('rng_tol', 50.)
ind_rng = find_rng_index(
dataset['radar_out'].range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No data at rng='+str(rng))
return None
if along_azi:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].elevation['data'] <= ang+ang_tol,
dataset['radar_out'].elevation['data'] >= ang-ang_tol))[0]
else:
ind_rays = np.where(np.logical_and(
dataset['radar_out'].azimuth['data'] <= ang+ang_tol,
dataset['radar_out'].azimuth['data'] >= ang-ang_tol))[0]
if ind_rays.size == 0:
warn('No data for angle '+str(ang))
return None
# sort angles
if along_azi:
ang_selected = dataset['radar_out'].azimuth['data'][ind_rays]
else:
ang_selected = dataset['radar_out'].elevation['data'][ind_rays]
ind_rays = ind_rays[np.argsort(ang_selected)]
if along_azi:
gateinfo = 'azi'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
else:
gateinfo = 'ele'+'{:.1f}'.format(ang)+'rng'+'{:.1f}'.format(rng)
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname_list = make_filename(
'range_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if ind_rays.size == 1:
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, ind_rays, ind_rng, prdcfg,
fname_list, xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
else:
plot_amp_phase_angle_Doppler(
dataset['radar_out'], field_name, ang, ind_rays, ind_rng,
prdcfg, fname_list, xaxis_info=xaxis_info,
along_azi=along_azi, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'AMPLITUDE_PHASE_TIME_DOPPLER':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
# user defined values
xaxis_info = prdcfg.get('xaxis_info', 'Doppler_velocity')
ampli_vmin = prdcfg.get('ampli_vmin', None)
ampli_vmax = prdcfg.get('ampli_vmax', None)
phase_vmin = prdcfg.get('phase_vmin', None)
phase_vmax = prdcfg.get('phase_vmax', None)
plot_type = prdcfg.get('plot_type', 'final')
if plot_type == 'final' and not dataset['final']:
return None
if 'antenna_coordinates_az_el_r' in dataset:
az = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][0])
el = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][1])
r = '{:.1f}'.format(dataset['antenna_coordinates_az_el_r'][2])
gateinfo = ('az'+az+'r'+r+'el'+el)
else:
lon = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][0])
lat = '{:.3f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][1])
alt = '{:.1f}'.format(
dataset['point_coordinates_WGS84_lon_lat_alt'][2])
gateinfo = ('lon'+lon+'lat'+lat+'alt'+alt)
time_info = datetime_from_radar(dataset['radar_out'])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=time_info)
fname_list = make_filename(
'ap_time_Doppler', prdcfg['dstype'], prdcfg['voltype'],
prdcfg['imgformat'], prdcfginfo=gateinfo,
timeinfo=time_info, runinfo=prdcfg['runinfo'])
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
if dataset['radar_out'].nrays == 1:
plot_amp_phase_Doppler(
dataset['radar_out'], field_name, 0, 0, prdcfg, fname_list,
xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
else:
plot_amp_phase_time_Doppler(
dataset['radar_out'], field_name, prdcfg, fname_list,
xaxis_info=xaxis_info, ampli_vmin=ampli_vmin,
ampli_vmax=ampli_vmax, phase_vmin=phase_vmin,
phase_vmax=phase_vmax)
print('----- save to '+' '.join(fname_list))
return fname_list
if prdcfg['type'] == 'SAVEVOL':
field_name = get_fieldname_pyart(prdcfg['voltype'])
if field_name not in dataset['radar_out'].fields:
warn(
' Field type ' + field_name +
' not available in data set. Skipping product ' +
prdcfg['type'])
return None
file_type = prdcfg.get('file_type', 'nc')
physical = prdcfg.get('physical', True)
new_dataset = deepcopy(dataset['radar_out'])
new_dataset.fields = dict()
new_dataset.add_field(
field_name, dataset['radar_out'].fields[field_name])
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname = make_filename(
'savevol', prdcfg['dstype'], prdcfg['voltype'], [file_type],
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])[0]
fname = savedir+fname
pyart.aux_io.write_spectra(fname, new_dataset, physical=physical)
print('saved file: '+fname)
return fname
if prdcfg['type'] == 'SAVEALL':
file_type = prdcfg.get('file_type', 'nc')
datatypes = prdcfg.get('datatypes', None)
physical = prdcfg.get('physical', True)
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=prdcfg['timeinfo'])
fname = make_filename(
'savevol', prdcfg['dstype'], 'all_fields', [file_type],
timeinfo=prdcfg['timeinfo'], runinfo=prdcfg['runinfo'])[0]
fname = savedir+fname
field_names = None
if datatypes is not None:
field_names = []
for datatype in datatypes:
field_names.append(get_fieldname_pyart(datatype))
if field_names is not None:
radar_aux = deepcopy(dataset['radar_out'])
radar_aux.fields = dict()
for field_name in field_names:
if field_name not in dataset['radar_out'].fields:
warn(field_name+' not in radar object')
else:
radar_aux.add_field(
field_name,
dataset['radar_out'].fields[field_name])
else:
radar_aux = dataset['radar_out']
pyart.aux_io.write_spectra(fname, radar_aux, physical=physical)
print('saved file: '+fname)
return fname
warn(' Unsupported product type: ' + prdcfg['type'])
return None
| 23,224
|
def odds_or_evens(my_bool, nums):
"""Returns all of the odd or
even numbers from a list"""
return_list = []
for num in nums:
if my_bool:
if num % 2 == 0:
return_list.append(num)
else:
if num % 2 != 0:
return_list.append(num)
return return_list
| 23,225
|
def configure_db():
"""Configure database.
Establish the database, create an engine if needed, and register
the models.
"""
global _DB_ENGINE
if not _DB_ENGINE:
_DB_ENGINE = session.get_engine(sqlite_fk=True)
register_models()
| 23,226
|
def p_ir_header(p):
# type: (YaccProduction) -> None
"""
ir-header : ir-header-decl ir-header
"""
p[0] = [p[1]] + p[2]
| 23,227
|
def demo(event=None, context=None):
"""Shows how to use the role functions."""
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
print('-'*80)
print("AWS Identity and Account Management role cleanup.")
print('-'*80)
role = list_roles()
print("Roles which have not been used: ", len(role))
for r in [r for r in (role or [])]:
#remove-role-from-instance-profile
remove_instanceprofile_role(r)
#detach-role-policy
detach_policy(r)
#delete-role-policy
delete_role_policy(r)
delete_role(r)
print("Deleted ", r)
print("Thanks for using!")
| 23,228
|
def load_tas_lookup():
"""Load/update the TAS table to reflect the latest list."""
logger.info('Loading TAS')
loadTas()
| 23,229
|
def local_mass_diagonal(quad_data, basis):
"""Constructs the elemental mass matrix, diagonal version
Arguments:
quad_data - Quadrature points and weights
basis - Basis and respective derivatives
Returns:
Mass matrix M, where m_ii = \int_k psi_i psi_i
"""
return numpy.sum(quad_data.w*basis.psi.T**2, axis=1)
| 23,230
|
def xtrct_grid(
input_path: Path,
output_path: Path,
xmin: float,
xmax: float,
ymin: float,
ymax: float,
n_levels: int = None,
):
"""Export a cropped spatial subdomain from an ARL packed file."""
# Requires >=0.4 degree padding between the parent domain extent and crop domain
# extent on all sides or will fail reporting:
# At line 386 of file ../source/xtrct_grid.f
# Fortran runtime error: Bad value during integer read
input_dirname = _enforce_trailing_slash(str(input_path.parent))
input_basename = input_path.name
# Fetch number of vertical levels contained in file from header.
if not n_levels:
with open(input_path, "rb") as f:
metadata = f.read(166)
n_levels = int(metadata[149:152])
stdin = "\n".join(
(
input_dirname,
input_basename,
f"{ymin} {xmin}",
f"{ymax} {xmax}",
f"{n_levels}",
"",
)
).encode("utf-8")
with tempfile.TemporaryDirectory() as xtrct_wd:
logger.debug(f"Executing xtrct_grid: {input_basename}")
proc = subprocess.run(
XTRCT_GRID,
input=stdin,
cwd=xtrct_wd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
logger.info(f"Successfully executed xtrct_grid: {input_basename}")
if proc.returncode != 0:
raise XtrctGridException(proc.stdout + proc.stderr)
# Default filename output by xtrct_grid fortran utility.
extract_bin = Path(xtrct_wd) / "extract.bin"
extract_bin.rename(output_path)
| 23,231
|
def create_saved_group(uuid=None):
"""Create and save a Sample Group with all the fixings (plus gravy)."""
if uuid is None:
uuid = uuid4()
analysis_result = AnalysisResultMeta().save()
group_description = 'Includes factory-produced analysis results from all display_modules'
sample_group = SampleGroup(name='Fuzz Testing',
analysis_result=analysis_result,
description=group_description)
sample_group.id = uuid
db.session.add(sample_group)
db.session.commit()
# Add the results
analysis_result.average_genome_size = wrap_result(AGSFactory())
analysis_result.card_amr_genes = wrap_result(CARDGenesFactory())
analysis_result.functional_genes = wrap_result(FunctionalGenesFactory())
analysis_result.hmp = wrap_result(HMPFactory())
analysis_result.macrobe_abundance = wrap_result(MacrobeFactory())
analysis_result.methyltransferases = wrap_result(MethylsFactory())
analysis_result.microbe_directory = wrap_result(MicrobeDirectoryFactory())
analysis_result.pathways = wrap_result(PathwayFactory())
analysis_result.read_stats = wrap_result(ReadStatsFactory())
analysis_result.reads_classified = wrap_result(ReadsClassifiedFactory())
analysis_result.sample_similarity = wrap_result(create_mvp_sample_similarity())
# analysis_result.taxon_abundance =
analysis_result.virulence_factors = wrap_result(VFDBFactory())
analysis_result.save()
return sample_group
| 23,232
|
def words_to_indexes(tree):
"""Return a new tree based on the original tree, such that the leaf values
are replaced by their indexs."""
out = copy.deepcopy(tree)
leaves = out.leaves()
for index in range(0, len(leaves)):
path = out.leaf_treeposition(index)
out[path] = index + 1
return out
| 23,233
|
def get_stoplist_names():
"""Return list of stoplist names"""
config = configuration()
return [name for name, value in config.items('stoplists')]
| 23,234
|
def any_(criterions):
"""Return a stop criterion that given a list `criterions` of stop criterions
only returns True, if any of the criterions returns True.
This basically implements a logical OR for stop criterions.
"""
def inner(info):
return any(c(info) for c in criterions)
return inner
| 23,235
|
def make_movie_noah(imgname, movname, indexsz='05', framerate=10, imgdir=None, rm_images=False,
save_into_subdir=False, start_number=0, framestep=1):
"""Create a movie from a sequence of images using the ffmpeg supplied with ilpm.
Options allow for deleting folder automatically after making movie.
Will run './ffmpeg', '-framerate', str(int(framerate)), '-i', imgname + '%' + indexsz + 'd.png', movname + '.mov',
'-vcodec', 'libx264', '-profile:v', 'main', '-crf', '12', '-threads', '0', '-r', '100', '-pix_fmt', 'yuv420p'])
Parameters
----------
imgname : str
path and filename for the images to turn into a movie
movname : str
path and filename for output movie
indexsz : str
string specifier for the number of indices at the end of each image (ie 'file_000.png' would merit '03')
framerate : int (float may be allowed)
The frame rate at which to write the movie
imgdir : str or None
folder to delete if rm_images and save_into_subdir are both True, ie folder containing the images
rm_images : bool
Remove the images from disk after writing to movie
save_into_subdir : bool
The images are saved into a folder which can be deleted after writing to a movie, if rm_images is True and
imgdir is not None
"""
subprocess.call(
['/Users/stephane/Documents/git/takumi/library/image_processing/ffmpeg',
'-framerate', str(int(framerate)),
'-start_number', str(start_number),
'-i', imgname + '%' + indexsz + 'd.png',
movname + '.mov',
'-vcodec', 'libx264', '-profile:v', 'main', '-crf', '12', '-threads', '0', '-r', '100', '-pix_fmt', 'yuv420p'])
# Delete the original images
if rm_images:
print('Deleting the original images...')
if save_into_subdir and imgdir is not None:
print('Deleting folder ' + imgdir)
subprocess.call(['rm', '-r', imgdir])
else:
print('Deleting folder contents ' + imgdir + imgname + '*.png')
subprocess.call(['rm', '-r', imgdir + imgname + '*.png'])
| 23,236
|
def validate_basic(params, length, allow_infnan=False, title=None):
"""
Validate parameter vector for basic correctness.
Parameters
----------
params : array_like
Array of parameters to validate.
length : int
Expected length of the parameter vector.
allow_infnan : bool, optional
Whether or not to allow `params` to contain -np.Inf, np.Inf, and
np.nan. Default is False.
title : str, optional
Description of the parameters (e.g. "autoregressive") to use in error
messages.
Returns
-------
params : ndarray
Array of validated parameters.
Notes
-----
Basic check that the parameters are numeric and that they are the right
shape. Optionally checks for NaN / infinite values.
"""
title = '' if title is None else ' for %s' % title
# Check for invalid type and coerce to non-integer
try:
params = np.array(params, dtype=object)
is_complex = [isinstance(p, complex) for p in params.ravel()]
dtype = complex if any(is_complex) else float
params = np.array(params, dtype=dtype)
except TypeError:
raise ValueError('Parameters vector%s includes invalid values.'
% title)
# Check for NaN, inf
if not allow_infnan and (np.any(np.isnan(params)) or
np.any(np.isinf(params))):
raise ValueError('Parameters vector%s includes NaN or Inf values.'
% title)
params = np.atleast_1d(np.squeeze(params))
# Check for right number of parameters
if params.shape != (length,):
plural = '' if length == 1 else 's'
raise ValueError('Specification%s implies %d parameter%s, but'
' values with shape %s were provided.'
% (title, length, plural, params.shape))
return params
| 23,237
|
def unsorted_segment_sum(data, segment_ids, num_segments):
"""
Computes the sum along segments of a tensor. Analogous to tf.unsorted_segment_sum.
:param data: A tensor whose segments are to be summed.
:param segment_ids: The segment indices tensor.
:param num_segments: The number of segments.
:return: A tensor of same data type as the data argument.
"""
assert all([i in data.shape for i in segment_ids.shape]), "segment_ids.shape should be a prefix of data.shape"
# segment_ids is a 1-D tensor repeat it to have the same shape as data
if len(segment_ids.shape) == 1:
s = torch.prod(torch.tensor(data.shape[1:])).long()
segment_ids = segment_ids.repeat_interleave(s).view(segment_ids.shape[0], *data.shape[1:])
assert data.shape == segment_ids.shape, "data.shape and segment_ids.shape should be equal"
shape = [num_segments] + list(data.shape[1:])
tensor = torch.zeros(*shape).scatter_add(0, segment_ids, data.float())
tensor = tensor.type(data.dtype)
return tensor
| 23,238
|
def plot_classmap(VGGCAM_weight_path, img_path, label,
nb_classes, num_input_channels=1024, ratio=16):
"""
Plot class activation map of trained VGGCAM model
args: VGGCAM_weight_path (str) path to trained keras VGGCAM weights
img_path (str) path to the image for which we get the activation map
label (int) label (0 to nb_classes-1) of the class activation map to plot
nb_classes (int) number of classes
num_input_channels (int) number of conv filters to add
in before the GAP layer
ratio (int) upsampling ratio (16 * 14 = 224)
"""
# Load and compile model
model = VGGCAM(nb_classes, num_input_channels)
model.load_weights(VGGCAM_weight_path)
model.compile(loss="categorical_crossentropy", optimizer="sgd")
# Load and format data
im = cv2.resize(cv2.imread(img_path), (224, 224)).astype(np.float32)
# Get a copy of the original image
im_ori = im.copy().astype(np.uint8)
# VGG model normalisations
im[:,:,0] -= 103.939
im[:,:,1] -= 116.779
im[:,:,2] -= 123.68
im = im.transpose((2,0,1))
batch_size = 1
classmap = get_classmap(model,
im.reshape(1, 3, 224, 224),
nb_classes,
batch_size,
num_input_channels=num_input_channels,
ratio=ratio)
plt.imshow(im_ori)
plt.imshow(classmap[0, label, :, :],
cmap="jet",
alpha=0.5,
interpolation='nearest')
plt.show()
raw_input()
| 23,239
|
def create_parameters(address: str) -> dict:
"""Create parameters for address.
this function create parameters for having request from geocoder
and than return dictionary of parameters
Args:
address (str): the address for create parameters
Returns:
dict: takes the api key and Geocode from an other class and returns the dictionary
"""
address_to_string = address.replace(" ", "+")
params = {'apikey': developer_key,
'geocode': address_to_string}
return params
| 23,240
|
def test_remove_connected_item():
"""Test adding canvas constraint."""
canvas = Canvas()
from gaphas.aspect import ConnectionSink, Connector
l1 = Line(canvas.connections)
canvas.add(l1)
b1 = Box(canvas.connections)
canvas.add(b1)
number_cons1 = len(canvas.solver.constraints)
b2 = Box(canvas.connections)
canvas.add(b2)
number_cons2 = len(canvas.solver.constraints)
conn = Connector(l1, l1.handles()[0], canvas.connections)
sink = ConnectionSink(b1)
conn.connect(sink)
assert canvas.connections.get_connection(l1.handles()[0])
conn = Connector(l1, l1.handles()[1], canvas.connections)
sink = ConnectionSink(b2)
conn.connect(sink)
assert canvas.connections.get_connection(l1.handles()[1])
assert number_cons2 + 2 == len(canvas.solver.constraints)
canvas.remove(b1)
# Expecting a class + line connected at one end only
assert number_cons1 + 1 == len(canvas.solver.constraints)
| 23,241
|
def make_default_storage_backend_connected_handler():
"""Set the default storage-backend.connected state so that the default
handler in layer-openstack can run.
Convoluted, because charms.reactive will only run handlers in the reactive
or hooks directory.
"""
reactive.set_state('charms.openstack.do-default-storage-backend.connected')
| 23,242
|
def convert_upsample(builder, layer, input_names, output_names, keras_layer):
"""Convert convolution layer from keras to coreml.
Parameters
----------
keras_layer: layer
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_name, output_name = (input_names[0], output_names[0])
if isinstance(keras_layer, keras.layers.convolutional.UpSampling1D):
fh, fw = 1, keras_layer.length
else: # 2D
fh, fw = keras_layer.size
builder.add_upsample(
name=layer,
scaling_factor_h=fh,
scaling_factor_w=fw,
input_name=input_name,
output_name=output_name,
)
| 23,243
|
def standardizeName(name):
"""
Remove stuff not used by bngl
"""
name2 = name
sbml2BnglTranslationDict = {
"^": "",
"'": "",
"*": "m",
" ": "_",
"#": "sh",
":": "_",
"α": "a",
"β": "b",
"γ": "g",
" ": "",
"+": "pl",
"/": "_",
":": "_",
"-": "_",
".": "_",
"?": "unkn",
",": "_",
"(": "",
")": "",
"[": "",
"]": "",
# "(": "__",
# ")": "__",
# "[": "__",
# "]": "__",
">": "_",
"<": "_",
}
for element in sbml2BnglTranslationDict:
name = name.replace(element, sbml2BnglTranslationDict[element])
name = re.sub("[\W]", "", name)
return name
| 23,244
|
def _learning_rate_decay(hparams, warmup_steps=0):
"""Learning rate decay multiplier."""
scheme = hparams.learning_rate_decay_scheme
warmup_steps = tf.to_float(warmup_steps)
global_step = tf.to_float(tf.train.get_or_create_global_step())
if not scheme or scheme == "none":
return tf.constant(1.)
tf.logging.info("Applying learning rate decay: %s.", scheme)
if scheme == "exp":
decay_steps = hparams.learning_rate_decay_steps
p = (global_step - warmup_steps) / decay_steps
if hparams.learning_rate_decay_staircase:
p = tf.floor(p)
return tf.pow(hparams.learning_rate_decay_rate, p)
if scheme == "piecewise":
return _piecewise_learning_rate(global_step,
hparams.learning_rate_boundaries,
hparams.learning_rate_multiples)
if scheme == "cosine":
cycle_steps = hparams.learning_rate_cosine_cycle_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position)
return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps))
if scheme == "cyclelinear10x":
# Cycle the rate linearly by 10x every warmup_steps, up and down.
cycle_steps = warmup_steps
cycle_position = global_step % (2 * cycle_steps)
cycle_position = tf.to_float( # Normalize to the interval [-1, 1].
cycle_position - cycle_steps) / float(cycle_steps)
cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0.
return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3).
if scheme == "sqrt":
return _legacy_sqrt_decay(global_step - warmup_steps)
raise ValueError("Unrecognized learning rate decay scheme: %s" %
hparams.learning_rate_decay_scheme)
| 23,245
|
def nextjs_build_action(ctx, srcs, out):
"""Run a production build of the vite project
Args:
ctx: arguments description, can be
multiline with additional indentation.
srcs: source files
out: output directory
"""
# setup the args passed to vite
launcher_args = ctx.actions.args()
launcher_args.add_all([
"build",
])
launcher_args.add_all(ctx.attr.args)
outputs = []
outputs.append(out)
execution_requirements = {}
if "no-remote-exec" in ctx.attr.tags:
execution_requirements = {"no-remote-exec": "1"}
run_node(
ctx = ctx,
# progress_message = "Building nextjs project %s [nextjs]" % outputs[0].short_path,
inputs = depset(srcs),
outputs = outputs,
arguments = [launcher_args],
# execution_requirements = execution_requirements,
mnemonic = "next",
executable = "_next",
# link_workspace_root = True,
env = {"BUILD_DIR": out.path},
)
| 23,246
|
def new_model(save_dir, integer_tokens, batch_size=128,
vocab_size=50000, embedding_size=128,
num_negative=64, num_steps=100001,
num_skips=2, skip_window=1):
"""
Create a new Word2Vec model with token
vectors generated in the 'tokens' step.
Parameters
----------
save_dir : str
Path to the output directory where model will be saved
integer_tokens : str
Path to the 1D token vectors
"""
# Create TF graph
with tf.device('/gpu:0'):
graph = tf.Graph()
with graph.as_default():
# If we are on the first run, initialize everything as normal
train_inputs = tf.placeholder(tf.int32, shape=[batch_size],
name="train_inputs")
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1],
name="train_labels")
with tf.device('/cpu:0'):
# Start embeddings w/ values uniformly distributed
# between -1 and 1
embeddings = tf.Variable(tf.random_uniform([
vocab_size,
embedding_size
], -1.0, 1.0), name="embeddings")
# Translates the train_inputs into the corresponding embedding
embed = tf.nn.embedding_lookup(embeddings, train_inputs,
name="embedding_op")
# Construct the variables for the noise contrastive estimation
nce_weights = tf.Variable(tf.truncated_normal([
vocab_size,
embedding_size
], stddev=1.0 / math.sqrt(embedding_size)), name="nce_weights")
nce_biases = tf.Variable(tf.zeros([vocab_size]), name="nce_biases")
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(tf.nn.nce_loss(
weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_negative,
num_classes=vocab_size
), name="loss")
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1,
keep_dims=True), name="norm")
normalized_embeddings = embeddings / norm
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
init.run()
tf.add_to_collection('optimizer', optimizer)
data_index = 0
average_loss = 0
for step in xrange(num_steps):
good_batch = False
while not good_batch:
data_index, batch_inputs, batch_labels = generate_batch(
integer_tokens,
data_index,
batch_size,
num_skips,
skip_window
)
good_batch = is_batch_good(batch_inputs)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
final_embeddings = normalized_embeddings.eval()
saver.save(session, os.path.join(save_dir, 'embeddings_model'))
return final_embeddings
| 23,247
|
def read_junction_report(filename: Union[str, Path]) -> Tuple[str, List[str]]:
"""
tab-delimited with header:
chr left right strand num_transcript num_sample genome annotation label
return: dict of label --> records with that label
"""
with open(filename, "r") as fi:
reader = DictReader(open(fi), delimiter="\t")
r = next(reader)
cur_label, cur = r["label"], [r]
for r in reader:
if r["label"] != cur_label:
yield cur_label, cur
cur_label, cur = r["label"], [r]
else:
cur.append(r)
yield cur_label, cur
| 23,248
|
def Register_User():
"""Validates register form data and saves it to the database"""
# Check if the fields are filled out
if not (request.form['username'] and request.form['email'] and request.form['password'] and request.form['passwordConf']):
return redirect(url_for('Register', message = "Please fill out all the fields"))
else:
# Ensure passwords match
if request.form['password'] != request.form['passwordConf']:
return redirect(url_for('Register', message = "Passwords do not match"))
# Ensure name is only _, a-z, A-Z, 0-9, and space
if not re.search(r'^[\w_ ]+$', request.form['username']):
return redirect(url_for('Register', message = "Username can only contain _, a-z, A-Z, 0-9 and spaces."))
# Ensure a valid email
if not re.search(r'^[a-zA-Z0-9]+[\._]?[a-zA-Z0-9]+[@]\w+[.]\w+$', request.form['email']):
return redirect(url_for('Register', message = "Invalid email"))
# Connect to DB
with engine.connect() as con:
# Check if username is taken
try:
statement = text("SELECT COUNT(1) FROM user WHERE (username = :username)")
result = con.execute(statement, username = request.form['username']).scalar()
except SQLAlchemyError as e:
return redirect(url_for('Error', title = "Error: Validating user availability", msg = type(e), back = "Register_User"))
except:
return redirect(url_for('Error', title = "Error", msg = "<class 'blog.UnhandledError'>", back = "Register_User"))
if result > 0:
return redirect(url_for('Register', message = "Username is already taken"))
# Check if email is taken
try:
statement = text("SELECT COUNT(1) FROM user WHERE (email = :email)")
result = con.execute(statement, email = request.form['email']).scalar()
except SQLAlchemyError as e:
return redirect(url_for('Error', title = "Error: Validating user availability", msg = type(e), back = "Register_User"))
except:
return redirect(url_for('Error', title = "Error", msg = "<class 'blog.UnhandledError'>", back = "Register_User"))
if result > 0:
return redirect(url_for('Register', message = "Email is already taken"))
# Create new user and add to the database
try:
new_user = User(request.form['username'], request.form['email'], request.form['password'])
db.session.add(new_user)
db.session.commit()
except:
return redirect(url_for('Error', title = "Error", msg = "<class 'blog.UnhandledError'>", back = "Register_User"))
# Get the new user's ID to log them in
try:
statement = text("SELECT id FROM user WHERE (username = :username)")
result = con.execute(statement, username = request.form['username']).scalar()
except:
return redirect(url_for('Error', title = "Error: Login failed", msg = "REGISTRATION WAS SUCCESSFUL. Something went wrong loging you in. Please login."))
# Log the new user in with a session
session['user_id'] = result
# Redirect to the new user's profile
return redirect(url_for('Own_Profile'))
| 23,249
|
def check_events(ai_settings, screen, ship, bullets):
"""响应按键和鼠标事件"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ai_settings, screen, ship, bullets)
| 23,250
|
def test_x_wrong_ndims():
"""Test that a runtime error is thrown when x has the wrong shape."""
z = np.zeros((0, 2))
x = [[np.array([0., 0.]), np.array([[1., 0.]]), np.array([[0., 1.]]), np.array([[1., 1.]])],
[z, z, z, z], [z], []]
assert_failure(x=x)
| 23,251
|
def extreme_rank(df, col, n, bottom=True, keep=[]):
"""
Calculate the n top or bottom of a given series
"""
t = df[list(keep)+[col]].sort_values(col, ascending=bottom).iloc[:30]
count = t['NO_MUNICIPIO'].value_counts()
count.name = '#'
perc = t['NO_MUNICIPIO'].value_counts(normalize=True)
perc.name = '%'
return pd.concat([count, perc], axis=1), t
| 23,252
|
def ss_octile(y):
"""Obtain the octile summary statistic.
The statistic reaches the optimal performance upon a high number of
observations. According to Allingham et al. (2009), it is more stable than ss_robust.
Parameters
----------
y : array_like
Yielded points.
Returns
-------
array_like of the shape (batch_size, dim_ss=8, dim_ss_point)
"""
octiles = np.linspace(12.5, 87.5, 7)
E1, E2, E3, E4, E5, E6, E7 = np.percentile(y, octiles, axis=1)
# Combining the summary statistics.
ss_octile = np.hstack((E1, E2, E3, E4, E5, E6, E7))
ss_octile = ss_octile[:, :, np.newaxis]
return ss_octile
| 23,253
|
def englishToFrench(englishText):
"""Translates English to French"""
model_id='en-fr'
fr_text = language_translator.translate(
text=englishText,
model_id=model_id).get_result()
return(fr_text['translations'][0]['translation'])
| 23,254
|
def calc_out_of_plane_angle(a, b, c, d):
"""
Calculate the out of plane angle of the A-D vector
to the A-B-C plane
Returns the value in radians and a boolean telling if b-a-c are near-collinear
"""
collinear_cutoff = 175./180.
collinear = 0
if abs(calc_angle(b, a, c)) > np.pi * collinear_cutoff:
collinear = 1
rab = b - a
rac = c - a
rad = d - a
rab /= np.linalg.norm(rab)
rac /= np.linalg.norm(rac)
rad /= np.linalg.norm(rad)
n = np.cross(rab,rac)
n /= np.linalg.norm(n)
sin = np.dot(n,rad)
ang = np.arcsin(sin)
return ang, collinear
| 23,255
|
async def median(ctx, *args):
"""
Generate median from given dataset.
Returns the median for the given array or a dataframe containing means for every column
"""
param_def = {"fcai": "False", "tpose": "False", "dataframe": ""}
argchecked = check_args(param_rec=args, param_def=param_def)
if type(argchecked) == str:
await ctx.send(argchecked)
return
elif type(argchecked) == dict:
fcai = argchecked.get("fcai")
tpose = argchecked.get("tpose")
dataframe = argchecked.get("dataframe")
median = mediana(
dataframe=dataframe,
fcai=bool(distutils.util.strtobool(fcai)),
tpose=bool(distutils.util.strtobool(tpose)),
)
await ctx.send(median)
| 23,256
|
def compute_distances(X, Y):
"""
Computes the Mahalanobis distances between X and Y, for the special case
where covariance between components is 0.
Args:
X (np.ndarray):
3D array that represents our population of gaussians. It is
assumed that X[0] is the 2D matrix containing the coordinates
of the centroids and X[1] represents the 2D matrix of variances.
Y (np.ndarray):
2D or 3D array that can represent either a data matrix or a
DE population. If it represents a population, only the centroids
are taken into consideration.
Returns: np.ndarray
A matrix that contains all distances for each row of X to all rows
of Y, computed with the variances found in X.
"""
assert X.ndim == 3 and X.shape[0] == 2, \
'X must have shape (2,_,_)'
assert Y.ndim == 2 or (Y.ndim == 3 and Y.shape[0] == 2), \
'Y must have shape (_,_) or (2,_,_)'
m = X.shape[1]
if Y.ndim == 2:
n = Y.shape[0]
points = Y
else:
n = Y.shape[1]
points = Y[0]
centers = X[0]
sigmas = X[1]
dist_matrix = np.empty((m, n), dtype=X.dtype)
for i in range(m):
# Broadcasting
diff = (centers[i] - points) / sigmas[i]
# This computes the sum of the pairwise products of the rows. In other
# words, it computes sum([x[i] * y[i] for i in range(x.shape[0])]).
dist_matrix[i, :] = np.einsum('ij,ij->i', diff, diff)
return dist_matrix
| 23,257
|
def third_level_command_3():
"""Third level command under 2nd level 2"""
| 23,258
|
def test_assure_valid_device_configuration():
"""Test if invalid configurations are updated correctly"""
conf = {
CONF_USERNAME: "bbbrrrqqq@exa@mple.com",
CONF_PASSWORD: "PasswordPassword",
"test": "test2",
CONF_DEVICES: [
{CONF_TOKEN: "ABCDEF"},
{CONF_TOKEN: "12345", CONF_TOKEN_KEY: "4444", CONF_ID: "9876543210"},
{CONF_TOKEN: "12345", CONF_TOKEN_KEY: "4444", CONF_IP_ADDRESS: "192.0.2.3"},
{CONF_PASSWORD: "ABC"},
{
CONF_TOKEN: "12345",
CONF_TOKEN_KEY: "4444",
CONF_IP_ADDRESS: "192.0.2.3",
CONF_DISCOVERY: DISCOVERY_CLOUD,
},
{CONF_TOKEN: "ABCDEF"},
],
}
valid = _assure_valid_device_configuration(conf, conf[CONF_DEVICES][0])
assert not valid
assert conf[CONF_DEVICES][0][CONF_DISCOVERY] == DISCOVERY_CLOUD
valid = _assure_valid_device_configuration(conf, conf[CONF_DEVICES][0])
assert valid
valid = _assure_valid_device_configuration(conf, conf[CONF_DEVICES][1])
assert not valid
assert conf[CONF_DEVICES][1][CONF_DISCOVERY] == DISCOVERY_WAIT
valid = _assure_valid_device_configuration(conf, conf[CONF_DEVICES][1])
assert valid
valid = _assure_valid_device_configuration(conf, conf[CONF_DEVICES][2])
assert not valid
assert conf[CONF_DEVICES][2][CONF_DISCOVERY] == DISCOVERY_LAN
valid = _assure_valid_device_configuration(conf, conf[CONF_DEVICES][3])
assert not valid
assert conf[CONF_DEVICES][3][CONF_DISCOVERY] == DISCOVERY_CLOUD
valid = _assure_valid_device_configuration(conf, conf[CONF_DEVICES][4])
assert valid
assert conf[CONF_DEVICES][4][CONF_DISCOVERY] == DISCOVERY_CLOUD
conf.pop(CONF_PASSWORD)
valid = _assure_valid_device_configuration(conf, conf[CONF_DEVICES][5])
assert not valid
assert conf[CONF_DEVICES][5][CONF_DISCOVERY] == DISCOVERY_IGNORE
| 23,259
|
def op_item_info():
"""Helper that compiles item info spec and all common module specs
:return dict
"""
item_spec = dict(
item=dict(
type="str",
required=True
),
flatten_fields_by_label=dict(
type="bool",
default=True
),
# Direct users to field_info module instead
field=dict(
type="str",
removed_from_collection="onepassword.connect",
removed_in_version="3.0.0",
),
vault=dict(
type="str"
)
)
item_spec.update(common_options())
return item_spec
| 23,260
|
def unpack_condition(tup):
"""
Convert a condition to a list of values.
Notes
-----
Rules for keys of conditions dicts:
(1) If it's numeric, treat as a point value
(2) If it's a tuple with one element, treat as a point value
(3) If it's a tuple with two elements, treat as lower/upper limits and guess a step size.
(4) If it's a tuple with three elements, treat as lower/upper/step
(5) If it's a list, ndarray or other non-tuple ordered iterable, use those values directly.
"""
if isinstance(tup, tuple):
if len(tup) == 1:
return [float(tup[0])]
elif len(tup) == 2:
return np.arange(tup[0], tup[1], dtype=np.float)
elif len(tup) == 3:
return np.arange(tup[0], tup[1], tup[2], dtype=np.float)
else:
raise ValueError('Condition tuple is length {}'.format(len(tup)))
elif isinstance(tup, collections.Iterable):
return [float(x) for x in tup]
else:
return [float(tup)]
| 23,261
|
def run(doc, preset_mc: bool):
"""Create Graph through classfication values."""
mc = doc._.MajorClaim
adus = doc._.ADU_Sents
if isinstance(mc, list):
mc = mc[0]
if mc == []:
mc = adus.pop(0)
elif not mc:
mc = adus.pop(0)
relations = compare_all(adus, mc)
if config["adu"]["MC"]["method"] == "relations" and not preset_mc:
mc = mc_from_relations.run_spacy(adus, relations)
relations = compare_all(adus, mc)
graph = ag.Graph(name=doc._.key.split("/")[-1])
mc_node = ag.Node(
key=graph.keygen(), text=mc, category=ag.NodeCategory.I, major_claim=True
)
graph.add_node(mc_node)
outer_adus = [a for a in adus if not a == mc]
inner_adus = []
nodes = dict()
connections = dict()
connections[mc] = []
for adu in outer_adus:
main_rel = relations[adu]["main"]
if relations[adu][mc].probability >= main_rel.probability * 0.90:
logging.debug("MC Match")
if relations[adu][mc].classification == RelationClass.ATTACK:
snode = ag.Node(
key=graph.keygen(),
text="Default Conflict",
category=ag.NodeCategory.CA,
)
elif relations[adu][mc].classification == RelationClass.SUPPORT:
snode = ag.Node(
key=graph.keygen(),
text="Default Inference",
category=ag.NodeCategory.RA,
)
else:
snode = None
if snode:
cnode = ag.Node(
key=graph.keygen(), text=adu, category=ag.NodeCategory.I
)
nodes[adu] = cnode
graph.add_edge(ag.Edge(key=graph.keygen(), start=cnode, end=snode))
graph.add_edge(ag.Edge(key=graph.keygen(), start=snode, end=mc_node))
outer_adus.remove(adu)
inner_adus.append(adu)
if len(graph.incoming_nodes[mc_node]) == 0:
iterator = 0
snode = None
designated_adu = None
while snode == None and iterator < len(outer_adus):
designated_adu = outer_adus[iterator]
if relations[designated_adu][mc].classification == RelationClass.ATTACK:
snode = ag.Node(
key=graph.keygen(),
text="Default Conflict",
category=ag.NodeCategory.CA,
)
elif relations[designated_adu][mc].classification == RelationClass.SUPPORT:
snode = ag.Node(
key=graph.keygen(),
text="Default Inference",
category=ag.NodeCategory.RA,
)
else:
iterator += 1
snode = None
if not snode or not designated_adu:
if outer_adus == []:
logging.info("No ADUs classified, aborting")
return graph
else:
designated_adu = outer_adus[0]
snode = snode = ag.Node(
key=graph.keygen(),
text="Default Inference",
category=ag.NodeCategory.RA,
)
cnode = ag.Node(
key=graph.keygen(), text=designated_adu, category=ag.NodeCategory.I
)
nodes[designated_adu] = cnode
graph.add_edge(ag.Edge(key=graph.keygen(), start=cnode, end=snode))
graph.add_edge(ag.Edge(key=graph.keygen(), start=snode, end=mc_node))
outer_adus.remove(designated_adu)
inner_adus.append(designated_adu)
max_iter = 0
while len(outer_adus) > 0 and max_iter < 40000:
max_iter += 1
for adu in outer_adus:
inner_found = False
for adu2 in inner_adus:
if adu2 == adu:
pass
elif (
relations[adu][adu2].probability
>= relations[adu]["main"].probability * 0.98
):
logging.debug("Match")
if relations[adu][adu2].classification == RelationClass.ATTACK:
snode = ag.Node(
key=graph.keygen(),
text="Default Conflict",
category=ag.NodeCategory.CA,
)
elif relations[adu][adu2].classification == RelationClass.SUPPORT:
snode = ag.Node(
key=graph.keygen(),
text="Default Inference",
category=ag.NodeCategory.RA,
)
else:
snode = None
if snode:
if adu in nodes:
cnode1 = nodes[adu]
else:
cnode1 = ag.Node(
key=graph.keygen(),
text=adu,
category=ag.NodeCategory.I,
)
nodes[adu] = cnode1
if adu2 in nodes:
cnode2 = nodes[adu2]
else:
cnode2 = ag.Node(
key=graph.keygen(),
text=adu2,
category=ag.NodeCategory.I,
)
nodes[adu2] = cnode2
graph.add_edge(
ag.Edge(key=graph.keygen(), start=cnode1, end=snode)
)
graph.add_edge(
ag.Edge(key=graph.keygen(), start=snode, end=cnode2)
)
inner_found = True
if inner_found:
outer_adus.remove(adu)
inner_adus.append(adu)
if len(outer_adus) > 0:
for adu in outer_adus:
snode = ag.Node(
key=graph.keygen(),
text="Default Inference",
category=ag.NodeCategory.RA,
)
cnode = ag.Node(key=graph.keygen(), text=adu, category=ag.NodeCategory.I,)
graph.add_edge(ag.Edge(key=graph.keygen(), start=cnode, end=snode))
graph.add_edge(ag.Edge(key=graph.keygen(), start=snode, end=mc_node))
return graph
| 23,262
|
def get_wfs_with_parameter(parameter, wf_class='Workflow'):
"""
Find workflows of a given class, with a given parameter (which must be a
node)
:param parameter: an AiiDA node
:param wf_class: the name of the workflow class
:return: an AiiDA query set with all workflows that have this parameter
"""
from aiida.common.datastructures import wf_data_types
from aiida.orm.workflow import Workflow
try:
from aiida.backends.djsite.db import models
except ImportError:
from aiida.djsite.db import models
# Find attributes with this name
qdata = models.DbWorkflowData.objects.filter(aiida_obj=parameter,
data_type=wf_data_types.PARAMETER)
# Find workflows with those attributes
if wf_class == 'Workflow':
qwf = Workflow.query(data__in=qdata)
else:
qwf = Workflow.query(module_class=wf_class,data__in=qdata)
#q2 = wf_class.query(data__in=q1)
# return a Django QuerySet with the resulting class instances
return qwf.distinct().order_by('ctime')
| 23,263
|
def find_checkpoint_in_dir(model_dir):
"""tf.train.latest_checkpoint will find checkpoints if
'checkpoint' file is present in the directory.
"""
checkpoint_path = tf.train.latest_checkpoint(model_dir)
if checkpoint_path:
return checkpoint_path
# tf.train.latest_checkpoint did not find anything. Find .ckpt file
# manually.
files = glob.glob(os.path.join(model_dir, "*.ckpt*"))
if len(files) == 0:
return None
# Use last file for consistency if more than one (may not actually be
# "latest").
checkpoint_path = sorted(files)[-1]
# Trim after .ckpt-* segment. For example:
# model.ckpt-257706.data-00000-of-00002 -> model.ckpt-257706
parts = checkpoint_path.split(".")
ckpt_index = [i for i in range(len(parts)) if "ckpt" in parts[i]][0]
checkpoint_path = ".".join(parts[: ckpt_index + 1])
return checkpoint_path
| 23,264
|
def map_iou(boxes_true, boxes_pred, scores, thresholds = [0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75]):
"""
Mean average precision at differnet intersection over union (IoU) threshold
input:
boxes_true: Mx4 numpy array of ground true bounding boxes of one image.
bbox format: (x1, y1, w, h)
boxes_pred: Nx4 numpy array of predicted bounding boxes of one image.
bbox format: (x1, y1, w, h)
scores: length N numpy array of scores associated with predicted bboxes
thresholds: IoU shresholds to evaluate mean average precision on
output:
map: mean average precision of the image
"""
# According to the introduction, images with no ground truth bboxes will not be
# included in the map score unless there is a false positive detection (?)
# return None if both are empty, don't count the image in final evaluation (?)
if len(boxes_true) == 0 and len(boxes_pred) == 0:
return None
assert boxes_true.shape[1] == 4 or boxes_pred.shape[1] == 4, "boxes should be 2D arrays with shape[1]=4"
if len(boxes_pred):
assert len(scores) == len(boxes_pred), "boxes_pred and scores should be same length"
# sort boxes_pred by scores in decreasing order
boxes_pred = boxes_pred[np.argsort(scores)[::-1], :]
map_total = 0
# loop over thresholds
for t in thresholds:
matched_bt = set()
tp, fn = 0, 0
for i, bt in enumerate(boxes_true):
matched = False
for j, bp in enumerate(boxes_pred):
miou = calculate_iou(bt, bp)
if miou >= t and not matched and j not in matched_bt:
matched = True
tp += 1 # bt is matched for the first time, count as TP
matched_bt.add(j)
if not matched:
fn += 1 # bt has no match, count as FN
fp = len(boxes_pred) - len(matched_bt) # FP is the bp that not matched to any bt
m = tp / (tp + fn + fp)
map_total += m
return map_total / len(thresholds)
| 23,265
|
def draw_output_summary(model):
""" reads the data saved in the model class and depending on this data
chooses a visualization method to present the results with the help
of draw_optimization_overview """
if 'time_series' in model.log:
# no optimization has happend.
# hence, cost/predictions/parameters is 0-dim
fig = plt.figure()
ax = plt.subplot(1,1,1)
ax = draw_model_output(ax,model)
ax.title.set_text('Model Output')
else:
fig = draw_optimization_overview(model)
return fig
| 23,266
|
def _Userinfo(args):
"""Print the userinfo for this token, if possible."""
userinfo = apitools_base.GetUserinfo(
oauth2client.client.AccessTokenCredentials(args.access_token,
'oauth2l/1.0'))
if args.format == 'json':
print(_PrettyJson(userinfo))
else:
print(_CompactJson(userinfo))
| 23,267
|
def ShowSkmemArena(cmd_args=None) :
""" Show the global list of skmem arenas
"""
i = 1
arhead = kern.globals.skmem_arena_head
for ar in IterateTAILQ_HEAD(arhead, "ar_link") :
format_string = "{:>4d}: 0x{:<08x} {:<6s} {:>5d} KB \"{:<s}\""
print format_string.format(i, ar, SkmemArenaTypeAsString(ar.ar_type), ar.ar_mapsize >> 10, str(ar.ar_name))
i += 1
| 23,268
|
def general_spline_interpolation(xs, ys, p, knots=None):
"""
NOTE: SLOW SINCE IT USES B()
xs,ys: interpolation points
p: degree
knots: If None, use p+1-regular from xs[0] to slightly past x[1]
returns cs, knots
"""
# number of interpolation points (and also control points)
m = len(xs)
assert(len(ys) == m)
# use p+1-regular knot vector with ends equal to first sample and slightly
# past last sample
if knots == None:
knots = uniform_regular_knot_vector(m, p, t0=xs[0], t1=xs[-1]+0.001)
# create matrix A
A = np.zeros((m,m))
for row in range(m):
for col in range(m):
A[row, col] = B(col, p, xs[row], knots)
# compute control points
cs = np.linalg.inv(A).dot(np.array(ys))
return cs, knots
| 23,269
|
def number_from_string(s):
"""
Parse and return number from string.
Return float only if number is not an int. Assume number can be parsed from
string.
"""
try:
return int(s)
except ValueError:
return float(s)
| 23,270
|
def ennAvgPool(inplanes,
kernel_size=1,
stride=None,
padding=0,
ceil_mode=False):
"""enn Average Pooling."""
in_type = build_enn_divide_feature(inplanes)
return enn.PointwiseAvgPool(
in_type,
kernel_size,
stride=stride,
padding=padding,
ceil_mode=ceil_mode)
| 23,271
|
def acceptable(*args, acceptables):
"""
If the characters in StringVars passed as arguments are in acceptables return True, else returns False
"""
for arg in args:
for char in arg:
if char.lower() not in acceptables:
return False
return True
| 23,272
|
def get_confusion_matrix(
ground_truth: np.ndarray,
predictions: np.ndarray,
labels: Optional[List[Union[str, float]]] = None) -> np.ndarray:
"""
Computes a confusion matrix based on predictions and ground truth vectors.
The confusion matrix (a.k.a. contingency table) has predictions in rows
and ground truth in columns. If the value order is not provide via the
``labels`` parameter, the ordering is based on the alphanumeric sorting
of the unique values in both of the input arrays.
Parameters
----------
ground_truth : numpy.ndarray
An array holding the *true* target values.
predictions : numpy.ndarray
An array holding *predictions* of the target values.
labels : List[string, number], optional (default=None)
If a certain ordering of the labels in the confusion matrix is desired,
it can be specified via this parameter. By default alphanumeric sorting
is used.
Warns
-----
UserWarning
Some of the labels provided by the user are not present in either of
the input arrays.
Raises
------
IncorrectShapeError
The ``ground_truth`` and/or ``labels`` vectors are not 1-dimensional.
The length of these two arrays does not agree.
TypeError
The ``labels`` parameter is not a list.
ValueError
The ``labels`` list empty, it contains duplicate entries or some of the
labels present in either of the input array are not accounted for by
the ``labels`` list.
Returns
-------
confusion_matrix : numpy.ndarray
A confusion matrix.
"""
if not fuav.is_1d_array(ground_truth):
raise IncorrectShapeError('The ground truth vector has to be '
'1-dimensional numpy array.')
if not fuav.is_1d_array(predictions):
raise IncorrectShapeError('The predictions vector has to be '
'1-dimensional numpy array.')
if ground_truth.shape[0] != predictions.shape[0]:
raise IncorrectShapeError('Both the ground truth and the predictions '
'vectors have to have the same length.')
all_values = np.concatenate([ground_truth, predictions])
if labels is None:
ordering = np.sort(np.unique(all_values)).tolist()
elif isinstance(labels, list):
if not labels:
raise ValueError('The labels list cannot be empty.')
labels_set = set(labels)
if len(labels_set) != len(labels):
raise ValueError('The labels list contains duplicates.')
extra_labels = labels_set.difference(all_values)
if extra_labels:
warnings.warn(
'Some of the given labels are not present in either of the '
'input arrays: {}.'.format(extra_labels), UserWarning)
unaccounted_labels = set(all_values).difference(labels_set)
if unaccounted_labels:
raise ValueError('The following labels are present in the input '
'arrays but were not given in the labels '
'parameter: {}.'.format(unaccounted_labels))
ordering = labels
else:
raise TypeError('The labels parameter has to either a list or None.')
confusion_matrix_list = []
for pred in ordering:
pdt = predictions == pred
row = [np.logical_and(pdt, ground_truth == i).sum() for i in ordering]
confusion_matrix_list.append(row)
confusion_matrix = np.array(confusion_matrix_list)
return confusion_matrix
| 23,273
|
def atom_explicit_hydrogen_valences(gra):
""" explicit hydrogen valences, by atom
"""
return dict_.transform_values(atom_explicit_hydrogen_keys(gra), len)
| 23,274
|
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
Assumes you are returning in the order: [red, green, blue]
"""
# rgb of each pixel
pixel_r = 0
pixel_g = 0
pixel_b = 0
# how many pixels in the list[pixels]
n = 0
for pixel in pixels:
n += 1
pixel_r += pixel.red
pixel_g += pixel.green
pixel_b += pixel.blue
pixel_avg = [pixel_r//n, pixel_g//n, pixel_b//n]
return pixel_avg
| 23,275
|
def send_transaction(to, value, token):
"""Sends transaction."""
password = getpass.getpass('Password from keystore: ') # Prompt the user for a password of keystore file
configuration = Configuration().load_configuration()
api = get_api()
try:
if token is None:
# send ETH transaction
tx_hash, tx_cost_eth = api.send_transaction(configuration,
password,
to,
value)
else:
# send erc20 transaction
tx_hash, tx_cost_eth = api.send_transaction(configuration,
password,
to,
value,
token)
click.echo('Hash of the transaction: %s' % str(tx_hash.hex()))
click.echo('Transaction cost was: %sETH' % str(tx_cost_eth))
except InsufficientFundsException:
click.echo('Insufficient ETH funds! Check balance on your address.')
except InsufficientERC20FundsException:
click.echo('Insufficient ERC20 token funds! Check balance on your address.')
except InvalidAddress:
click.echo('Invalid recipient(to) address!')
except InvalidValueException:
click.echo('Invalid value to send!')
except InvalidPasswordException:
click.echo('Incorrect password!')
except InfuraErrorException:
click.echo('Wallet is not connected to Ethereum network!')
except ERC20NotExistsException:
click.echo('This token is not added to the wallet!')
| 23,276
|
def _leading_space_count(line):
"""Return number of leading spaces in line."""
i = 0
while i < len(line) and line[i] == ' ':
i += 1
return i
| 23,277
|
def _get_marker_indices(marker, line):
""" method to find the start and end parameter markers
on a template file line. Used by write_to_template()
"""
indices = [i for i, ltr in enumerate(line) if ltr == marker]
start = indices[0:-1:2]
end = [i + 1 for i in indices[1::2]]
assert len(start) == len(end)
return start, end
| 23,278
|
def get_number_of_images(dir):
"""
Returns number of files in given directory
Input:
dir - full path of directory
Output:
number of files in directory
"""
return len([name for name in os.listdir(dir) if os.path.isfile(os.path.join(dir, name))])
| 23,279
|
def seg_liver_train(config, train_df, val_df,
gpu_id, number_slices, batch_size, iter_mean_grad, max_training_iters_1,
max_training_iters_2, max_training_iters_3, save_step, display_step,
ini_learning_rate, boundaries, values):
"""
train_file: Training DF
val_file: Testing DF used to evaluate.
"""
task_name = 'seg_liver'
# \seg_liver_ck\networks\seg_liver.ckpt
### config constants ###
root_folder = config.root_folder
database_root = config.database_root
logs_path = config.get_log('seg_liver')
imagenet_ckpt = config.imagenet_ckpt
finetune = config.fine_tune
trained_weights = config.old_weights
print("finetune", finetune)
if finetune == 0:
print("loading weights path of vgg-16 or resnet",imagenet_ckpt)
print("logs_path", logs_path)
else:
print("trained weights", trained_weights)
# D:\L_pipe\liver_open\liverseg-2017-nipsws\train_files\seg_liver_ck\networks\seg_liver.ckpt
# train_file = os.path.join(root_folder, 'seg_DatasetList', 'training_volume_3.txt')
# val_file = os.path.join(root_folder, 'seg_DatasetList', 'testing_volume_3.txt')
dataset = Dataset(train_df, None, val_df, database_root, number_slices, store_memory=False)
# Train the network
with tf.Graph().as_default():
with tf.device('/gpu:' + str(gpu_id)):
global_step = tf.Variable(0, name='global_step', trainable=False)
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
segmentation.train_seg(dataset, trained_weights, imagenet_ckpt, 1, learning_rate, logs_path, max_training_iters_1, save_step,
display_step, global_step, number_slices=number_slices, iter_mean_grad=iter_mean_grad,
batch_size=batch_size, resume_training=False, finetune = finetune)
with tf.Graph().as_default():
with tf.device('/gpu:' + str(gpu_id)):
global_step = tf.Variable(0, name='global_step', trainable=False)
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
segmentation.train_seg(dataset, trained_weights, imagenet_ckpt, 2, learning_rate, logs_path, max_training_iters_2, save_step,
display_step, global_step, number_slices=number_slices, iter_mean_grad=iter_mean_grad,
batch_size=batch_size, resume_training=True, finetune = config.fine_tune)
with tf.Graph().as_default():
with tf.device('/gpu:' + str(gpu_id)):
global_step = tf.Variable(0, name='global_step', trainable=False)
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
segmentation.train_seg(dataset, trained_weights, imagenet_ckpt, 3, learning_rate, logs_path, max_training_iters_3, save_step,
display_step, global_step, number_slices=number_slices, iter_mean_grad=iter_mean_grad,
batch_size=batch_size, resume_training=True, finetune = config.fine_tune)
| 23,280
|
def test_keywords_and_flat_sections(keywords_and_flat_sections):
"""Test an input made of keywords and two unnested sections, interspersed."""
ref_dict = dict(reference)
ref_dict["topsect"] = dict(reference)
ref_dict["foo<bar>"] = dict(reference)
grammar = getkw.grammar()
tokens = lexer.parse_string_to_dict(grammar, keywords_and_flat_sections)
assert tokens == ref_dict
# dump to JSON
getkw_json = StringIO()
json.dump(tokens, getkw_json, indent=4)
del tokens
# load from JSON
tokens = json.loads(getkw_json.getvalue())
assert tokens == ref_dict
| 23,281
|
def get_wrapper_depth(wrapper):
"""Return depth of wrapper function.
.. versionadded:: 3.0
"""
return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__)
| 23,282
|
def apiTest():
"""Tests the API connection to lmessage. Returns true if it is connected."""
try:
result = api.add(2, 3)
except:
return False
return result == 5
| 23,283
|
def generations(pots, notes):
"""Return next generation of pots after rules in notes."""
next_gen = []
for pot in pots.keys():
pattern = ''
for i in range(-2, 3):
pattern += pots.get(pot + i, '.')
next_gen.append((pot, notes[pattern]))
# Check edges for new pots
left = min(pots)
pattern = ''
for i in range(left - 3, left + 2):
pattern += pots.get(i, '.')
if notes[pattern] == '#':
pots[left - 1] = '#'
right = max(pots)
pattern = ''
for i in range(right - 1, right + 4):
pattern += pots.get(i, '.')
if notes[pattern] == '#':
pots[right + 1] = '#'
for change in next_gen:
pots[change[0]] = change[1]
| 23,284
|
def signal_handler(sig, frame):
"""Handle ctrl+c interrupt.
Without this handler, everytime a ctrl+c interrupt is received, the server shutdowns and
proceeds to the next iteration in the loop rather than exiting the program altogether.
"""
print("************Received CTRL-C. Will exit in 1 second************")
time.sleep(1)
sys.exit(0)
| 23,285
|
def get_distribution(dist_name):
"""Fetches a scipy distribution class by name"""
from scipy import stats as dists
if dist_name not in dists.__all__:
return None
cls = getattr(dists, dist_name)
return cls
| 23,286
|
async def async_remove_entry(hass, config_entry):
"""Handle removal of an entry."""
# remove persistent data
config = config_entry.data
name = config.get(CONF_NAME)
filenames = give_persistent_filename(hass, name)
if os.path.isfile(filenames.get("curve_filename")):
os.remove(filenames.get("curve_filename"))
if os.path.isfile(filenames.get("persistent_data_filename")):
os.remove(filenames.get("persistent_data_filename"))
| 23,287
|
async def handle_countdown_reminders():
""" Handle countdowns after starting.
Countdowns created afterwards are handled by the cd create command.
"""
reminders = []
for tag, cd in dict(time_cfg.data["countdown"]).items():
dt = pendulum.parse(cd["time"], tz=cd["tz"])
cd = dict(cd)
cd["tag"] = tag
cd["dt"] = dt
reminders.append(cd)
if not reminders:
return
# Go through the reminders starting at the newest one
for cd in sorted(reminders, key=itemgetter("dt")):
# Find in how many seconds the countdown will finish
seconds = (cd["dt"].diff(pendulum.now(cd["tz"])).in_seconds())
# If the next reminder is in longer than a month, don't bother waiting,
if seconds > 60 * 60 * 24 * 30:
return
# In case of multiple countdowns at once, set a threshold at -10 seconds
# If below, remove the countdown and continue
if seconds < -10:
del time_cfg.data["countdown"][cd["tag"]]
await time_cfg.asyncsave()
continue
seconds = max(seconds, 0)
await wait_for_reminder(cd, seconds)
| 23,288
|
def ece(y_probs, y_preds, y_true, balanced=False, bins="fd", **bin_args):
"""Compute the expected calibration error (ECE).
Parameters:
y_probs (np.array): predicted class probabilities
y_preds (np.array): predicted class labels
y_true (np.array): true class labels
Returns:
exp_ce (float): expected calibration error
"""
sklearn.utils.check_consistent_length(y_preds, y_true)
# define the bin function
def bin_func(y_probs_bin, y_preds_bin, y_true_bin):
acc = (y_preds_bin == y_true_bin).mean()
conf = y_probs_bin.mean()
return abs(acc - conf)
# define the balanced bin function
def balanced_bin_func(y_probs_bin, y_preds_bin, y_true_bin):
balacc = sklearn.metrics.balanced_accuracy_score(y_true_bin, y_preds_bin)
conf = y_probs_bin.mean()
return abs(balacc - conf)
# compute the full result
bin_indices = utils.get_bin_indices(y_probs, bins=bins, lower=0, upper=1, **bin_args)
func = balanced_bin_func if balanced else bin_func
return utils.binning(y_probs, y_preds, y_true, bin_indices, func)
| 23,289
|
def _call_or_get(value, menu=None, choice=None, string=None, obj=None, caller=None):
"""
Call the value, if appropriate, or just return it.
Args:
value (any): the value to obtain. It might be a callable (see note).
Keyword Args:
menu (BuildingMenu, optional): the building menu to pass to value
if it is a callable.
choice (Choice, optional): the choice to pass to value if a callable.
string (str, optional): the raw string to pass to value if a callback.
obj (Object): the object to pass to value if a callable.
caller (Account or Object, optional): the caller to pass to value
if a callable.
Returns:
The value itself. If the argument is a function, call it with
specific arguments (see note).
Note:
If `value` is a function, call it with varying arguments. The
list of arguments will depend on the argument names in your callable.
- An argument named `menu` will contain the building menu or None.
- The `choice` argument will contain the choice or None.
- The `string` argument will contain the raw string or None.
- The `obj` argument will contain the object or None.
- The `caller` argument will contain the caller or None.
- Any other argument will contain the object (`obj`).
Thus, you could define callbacks like this:
def on_enter(menu, caller, obj):
def on_nomatch(string, choice, menu):
def on_leave(caller, room): # note that room will contain `obj`
"""
if callable(value):
# Check the function arguments
kwargs = {}
spec = getargspec(value)
args = spec.args
if spec.keywords:
kwargs.update(dict(menu=menu, choice=choice, string=string, obj=obj, caller=caller))
else:
if "menu" in args:
kwargs["menu"] = menu
if "choice" in args:
kwargs["choice"] = choice
if "string" in args:
kwargs["string"] = string
if "obj" in args:
kwargs["obj"] = obj
if "caller" in args:
kwargs["caller"] = caller
# Fill missing arguments
for arg in args:
if arg not in kwargs:
kwargs[arg] = obj
# Call the function and return its return value
return value(**kwargs)
return value
| 23,290
|
def div(a, b, num_threads=None, direction='left'):
"""Divide multithreaded
Args
a (np.ndarray or scalar): Numpy array or scalar
b (np.ndarray or scalar): Numpy array or scalar
num_threads : Number of threads to be used, overrides threads as set by
mtalg.set_num_threads()
direction : 'left' or 'right' to decide if a or b is modified
"""
__multithreaded_opr_direction(a, b, _div_inplace, num_threads, direction=direction)
| 23,291
|
def is_shared_object(s):
"""
Return True if s looks like a shared object file.
Example: librt.so.1
"""
so = re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE).match
return so(s)
| 23,292
|
def get_name(properties, lang):
"""Return the Place name from the properties field of the elastic response
Here 'name' corresponds to the POI name in the language of the user request (i.e. 'name:{lang}' field).
If lang is None or if name:lang is not in the properties
Then name receives the local name value
'local_name' corresponds to the name in the language of the country where the POI is located.
>>> get_name({}, 'fr') is None
True
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, None)
'spontini'
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, 'cz')
'spontini'
>>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, 'fr')
'spontinifr'
"""
name = properties.get(f"name:{lang}")
if name is None:
name = properties.get("name")
return name
| 23,293
|
def get_task(name):
"""Return the chosen task."""
tasks_json = load_json('tasks.json')
return tasks_json[name]
| 23,294
|
def exportToXml(stations):
"""
Export subRoutes data to XML
"""
root = ET.Element("stations")
for aStation in stations:
station = ET.SubElement(root, "station")
station.set('lat', aStation['lat'])
station.set('lon', aStation['lon'])
station.set('name', aStation['name'])
station.set('city', aStation['city'])
station.set('source', ",".join((str(x) for x in aStation['source'])))
station.set('type', aStation['type'])
if 'orientation' in aStation:
station.set('orientation', aStation['orientation'])
if 'road' in aStation:
station.set('road', aStation['road'])
tree = ET.ElementTree(root)
# Writing to file XML a valid XML encoded in UTF-8 (because Unicode FTW)
tree.write("stations.xml", pretty_print=True, encoding="utf-8", xml_declaration=True)
| 23,295
|
def aws_credentials(request: pytest.fixture, aws_utils: pytest.fixture, profile_name: str):
"""
Fixture for setting up temporary AWS credentials from assume role.
:param request: _pytest.fixtures.SubRequest class that handles getting
a pytest fixture from a pytest function/fixture.
:param aws_utils: aws_utils fixture.
:param profile_name: Named AWS profile to store temporary credentials.
"""
aws_credentials_obj = AwsCredentials(profile_name)
original_access_key, original_secret_access_key, original_token = aws_credentials_obj.get_aws_credentials()
aws_credentials_obj.set_aws_credentials_by_session(aws_utils.assume_session())
def teardown():
# Reset to the named profile using the original AWS credentials
aws_credentials_obj.set_aws_credentials(original_access_key, original_secret_access_key, original_token)
request.addfinalizer(teardown)
return aws_credentials_obj
| 23,296
|
def mean_predictions(predicted):
"""
Calculate the mean of predictions that overlaps. This is donne mostly to be able to plot what the model is doing.
-------------------------------------------------------
Args:
predicted : numpy array
Numpy array with shape (Number points to predict - prediction length -1, predictions length)
-------------------------------------------------------
return:
predictions_mean : list
list with len of number to predict where each position is the mean of all predictions to that step
"""
array_global = [[] for _ in range((predicted.shape[0] + predicted.shape[1]))]
for i in range(predicted.shape[0]):
for l, value in enumerate(predicted[i]):
array_global[i + l].append((float(value)))
predictions_mean = []
for i in range(len(array_global) - 1):
predictions_mean.append(np.array(array_global[i]).mean())
return predictions_mean
| 23,297
|
def test_serverspec_binary_file(host):
"""
Tests if shellcheck binary is file type.
"""
assert host.file(PACKAGE_BINARY).is_file
| 23,298
|
def update_diskspace(dmfilestat, cached=None):
"""Update diskspace field in dmfilestat object"""
try:
# search both results directory and raw data directory
search_dirs = [
dmfilestat.result.get_report_dir(),
dmfilestat.result.experiment.expDir,
]
if not cached:
cached = dm_utils.get_walk_filelist(
search_dirs, list_dir=dmfilestat.result.get_report_dir()
)
total_size = 0
# Create a list of files eligible to process
# exclude onboard_results folder if thumbnail or if fullchip was reanalyzed from signal processing
sigproc_results_dir = os.path.join(
dmfilestat.result.get_report_dir(), "sigproc_results"
)
exclude_onboard_results = dmfilestat.result.isThumbnail or (
"onboard_results" not in os.path.realpath(sigproc_results_dir)
)
for start_dir in search_dirs:
to_process = []
if os.path.isdir(start_dir):
to_process, _ = dm_utils._file_selector(
start_dir,
dmfilestat.dmfileset.include,
dmfilestat.dmfileset.exclude,
[],
exclude_onboard_results,
add_linked_sigproc=True,
cached=cached,
)
# process files in list
for path in to_process[1:]:
try:
# logger.debug("%d %s %s" % (j, 'diskspace', path), extra = logid)
if not os.path.islink(path):
total_size += os.lstat(path)[6]
except Exception as inst:
if inst.errno == errno.ENOENT:
pass
else:
errmsg = "update_diskspace %s" % (inst)
logger.error(errmsg, extra=logid)
diskspace = float(total_size) / (1024 * 1024)
except:
diskspace = None
raise
finally:
dmfilestat.diskspace = diskspace
dmfilestat.save()
return diskspace
| 23,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.