content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
beam_pipeline_args: Text) -> pipeline.Pipeline:
"""Custom component demo pipeline."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input=examples)
hello = component.HelloComponent(
input_data=example_gen.outputs['examples'], name=u'HelloWorld')
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=hello.outputs['output_data'])
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[example_gen, hello, statistics_gen],
enable_cache=True,
beam_pipeline_args=beam_pipeline_args
)
| 19,000
|
def toggleAction(*args, **kwargs):
"""A decorator which identifies a class method as a toggle action. """
return ActionFactory(ToggleAction, *args, **kwargs)
| 19,001
|
def getHistograph(dataset = {}, variable = ""):
"""
Calculates a histogram-like summary on a variable in a dataset
and returns a dictionary. The keys in the dictionary are unique items
for the selected variable. The values of each dictionary key, is the number
of times the unique item occured in the data set
"""
data = getDatalist(dataGraph = dataset['DATA'], varGraph = dataset['VARIABLES'], variable = variable)
return histograph(data)
| 19,002
|
async def test_update_system_data_v3(
event_loop, v3_server, v3_sensors_json, v3_settings_json, v3_subscriptions_json
):
"""Test getting updated data for a v3 system."""
async with v3_server:
v3_server.add(
"api.simplisafe.com",
f"/v1/users/{TEST_USER_ID}/subscriptions",
"get",
aresponses.Response(text=json.dumps(v3_subscriptions_json), status=200),
)
v3_server.add(
"api.simplisafe.com",
f"/v1/ss3/subscriptions/{TEST_SUBSCRIPTION_ID}/sensors",
"get",
aresponses.Response(text=json.dumps(v3_sensors_json), status=200),
)
v3_server.add(
"api.simplisafe.com",
f"/v1/ss3/subscriptions/{TEST_SUBSCRIPTION_ID}/settings/pins",
"get",
aresponses.Response(text=json.dumps(v3_settings_json), status=200),
)
async with aiohttp.ClientSession(loop=event_loop) as websession:
api = await API.login_via_credentials(TEST_EMAIL, TEST_PASSWORD, websession)
systems = await api.get_systems()
system = systems[TEST_SYSTEM_ID]
await system.update()
assert system.serial == TEST_SYSTEM_SERIAL_NO
assert system.system_id == TEST_SYSTEM_ID
assert system.api._access_token == TEST_ACCESS_TOKEN
assert len(system.sensors) == 21
| 19,003
|
def p_expr_list_not_empty(p):
"""
expr_list_not_empty : expr COMMA expr_list_not_empty
| expr
"""
if len(p) == 4:
p[0] = [p[1]] + p[3]
else:
p[0] = [p[1]]
| 19,004
|
def _get_client_by_settings(
client_cls, # type: Type[BaseClient]
bk_app_code=None, # type: Optional[str]
bk_app_secret=None, # type: Optional[str]
accept_language=None, # type: Optional[str]
**kwargs
):
"""Returns a client according to the django settings"""
client = client_cls(**kwargs)
client.update_bkapi_authorization(
bk_app_code=bk_app_code or settings.get(SettingKeys.APP_CODE),
bk_app_secret=bk_app_secret or settings.get(SettingKeys.APP_SECRET),
)
# disable global https verify
if settings.get(SettingKeys.BK_API_CLIENT_ENABLE_SSL_VERIFY):
client.disable_ssl_verify()
if accept_language:
client.session.set_accept_language(accept_language)
return client
| 19,005
|
def watch(process):
"""Watch the log output from a process using tail -f."""
with lcd(root_dir):
local('tail -f logs/%s.log -s 0.5' % process)
| 19,006
|
def create_and_configure_jinja_environment(
dirs, autoescape=True, handler=None, default_locale='en_US'):
"""Sets up an environment and gets jinja template."""
# Defer to avoid circular import.
from controllers import sites
locale = None
app_context = sites.get_course_for_current_request()
if app_context:
locale = app_context.get_current_locale()
if not locale:
locale = app_context.default_locale
if not locale:
locale = default_locale
jinja_environment = create_jinja_environment(
jinja2.FileSystemLoader(dirs), locale=locale, autoescape=autoescape)
jinja_environment.filters['gcb_tags'] = get_gcb_tags_filter(handler)
return jinja_environment
| 19,007
|
def bootstrap_cost(target_values, class_probability_matrix, cost_function,
num_replicates):
"""Bootstraps cost for one set of examples.
E = number of examples
K = number of classes
B = number of bootstrap replicates
:param target_values: length-E numpy array of target values (integers in
range 0...[K - 1]).
:param class_probability_matrix: E-by-K numpy array of predicted
probabilities.
:param cost_function: Cost function, used to evaluate predicted
probabilities. Must be negatively oriented (so that lower values are
better), with the following inputs and outputs.
Input: target_values: Same as input to this method.
Input: class_probability_matrix: Same as input to this method.
Output: cost: Scalar value.
:param num_replicates: Number of bootstrap replicates.
:return: cost_values: length-B numpy array of cost values.
"""
error_checking.assert_is_integer(num_replicates)
error_checking.assert_is_geq(num_replicates, 1)
cost_values = numpy.full(num_replicates, numpy.nan)
if num_replicates == 1:
cost_values[0] = cost_function(target_values,
class_probability_matrix)
else:
for k in range(num_replicates):
_, these_indices = bootstrapping.draw_sample(target_values)
cost_values[k] = cost_function(
target_values[these_indices],
class_probability_matrix[these_indices, ...]
)
print('Average cost = {0:.4f}'.format(numpy.mean(cost_values)))
return cost_values
| 19,008
|
def jdeblend_bob(src_fm, bobbed):
"""
Stronger version of jdeblend() that uses a bobbed clip to deblend.
Parameters:
clip src_fm: Source after field matching, must have field=3 and low cthresh.
clip src: Bobbed source.
Example:
src =
from havsfunc import QTGMC
qtg = QTGMC(src, TFF=True, SourceMatch=3)
vfm = src.vivtc.VFM(order=1, field=3, cthresh=3)
dblend = jdeblend_bob(vfm, qtg)
dblend = jdeblend_kf(dblend, vfm)
"""
bob0 = bobbed.std.SelectEvery(2, 0)
bob1 = bobbed.std.SelectEvery(2, 1)
ab0, bc0, c0 = bob0, bob0[1:] + bob0[-1], bob0[2:] + bob0[-2]
a1, ab1, bc1 = bob1[0] + bob1[:-1], bob1, bob1[1:] + bob1[-1]
dbd = core.std.Expr([a1, ab1, ab0, bc1, bc0, c0], 'y x - z + b c - a + + 2 /')
dbd = core.std.ShufflePlanes([bc0, dbd], [0, 1, 2], vs.YUV)
select_src = [src_fm.std.SelectEvery(5, i) for i in range(5)]
select_dbd = [dbd.std.SelectEvery(5, i) for i in range(5)]
inters = _inter_pattern(select_src, select_dbd)
return core.std.FrameEval(src_fm, partial(_jdeblend_eval, src=src_fm, inters=inters), [src_fm, src_fm[0]+src_fm[:-1]])
| 19,009
|
def ParseArgs():
"""Parse the command line options, returning an options object."""
usage = 'Usage: %prog [options] LIST|GET|LATEST'
option_parser = optparse.OptionParser(usage)
AddCommandLineOptions(option_parser)
log_helper.AddCommandLineOptions(option_parser)
options, args = option_parser.parse_args()
if not options.repo_url:
option_parser.error('--repo-url is required')
if len(args) == 1:
action = args[0].lower()
if action in ('list', 'latest', 'get'):
return options, action
option_parser.error(
'A single repository action (LIST, GET, or LATEST) is required')
| 19,010
|
def generate_table_definition(schema_and_table, column_info,
primary_key=None, foreign_keys=None,
diststyle=None, distkey=None, sortkey=None):
"""Return a CREATE TABLE statement as a string."""
if not column_info:
raise Exception('No columns specified for {}'.format(schema_and_table))
out = io.StringIO()
out.write('CREATE TABLE {} (\n'.format(schema_and_table))
columns_count = len(column_info)
for i, (column, type_) in enumerate(column_info):
out.write(' "{}" {}'.format(column, type_))
if (i < columns_count - 1) or primary_key or foreign_keys:
out.write(',')
out.write('\n')
if primary_key:
out.write(' PRIMARY KEY({})'.format(primary_key))
if foreign_keys:
out.write(',')
out.write('\n')
foreign_keys = foreign_keys or []
foreign_keys_count = len(foreign_keys)
for i, (key, reftable, refcolumn) in enumerate(foreign_keys):
out.write(' FOREIGN KEY({}) REFERENCES {}({})'.format(
key, reftable, refcolumn
))
if i < foreign_keys_count - 1:
out.write(',')
out.write('\n')
out.write(')\n')
if diststyle:
out.write('DISTSTYLE {}\n'.format(diststyle))
if distkey:
out.write('DISTKEY({})\n'.format(distkey))
if sortkey:
if isinstance(sortkey, str):
out.write('SORTKEY({})\n'.format(sortkey))
elif len(sortkey) == 1:
out.write('SORTKEY({})\n'.format(sortkey[0]))
else:
out.write('COMPOUND SORTKEY({})\n'.format(', '.join(sortkey)))
return out.getvalue()
| 19,011
|
def gradient_descent_update(x, gradx, learning_rate):
"""
Performs a gradient descent update.
"""
# Return the new value for x
return x - learning_rate * gradx
| 19,012
|
def nth_permutation(n, size=0):
"""nth permutation of 0..size-1
where n is from 0 to size! - 1
"""
lehmer = int_to_lehmer(n, size)
return lehmer_to_permutation(lehmer)
| 19,013
|
def signin(request):
"""
Method for log in of the user
"""
if request.user.is_authenticated:
return_var = render(request, '/')
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return_var = redirect('/')
else:
form = AuthenticationForm(request.POST)
return_var = render(request, 'registration/login.html', {'form': form})
else:
form = AuthenticationForm()
return_var = render(request, 'registration/login.html', {'form': form})
return return_var
| 19,014
|
def PlotDensity(data_list, ax, args):
""" Plot one dimensional data as density curves.
Args:
data_list: a list of Data objects
ax: a matplotlib axis object
args: an argparse arguments object
"""
# first create density list, then pass that to PlotLineScatter
density_list = []
for data in data_list:
d = Data()
d.label = data.label
density = gaussian_kde(data.y)
x_data = numpy.linspace(numpy.min(data.y),
numpy.max(data.y),
args.density_num_bins)
if args.density_covariance is not None:
density.covariance_factor = lambda : args.density_covariance
density._compute_covariance() # bad mojo calling privates like this
d.x = numpy.array(x_data)
d.y = numpy.array(density(x_data))
density_list.append(d)
PlotLineScatter(density_list, ax, args)
| 19,015
|
def write_video(filename, video_array, fps, video_codec='libx264', options=None):
"""
Writes a 4d tensor in [T, H, W, C] format in a video file
Parameters
----------
filename : str
path where the video will be saved
video_array : Tensor[T, H, W, C]
tensor containing the individual frames, as a uint8 tensor in [T, H, W, C] format
fps : Number
frames per second
"""
_check_av_available()
video_array = torch.as_tensor(video_array, dtype=torch.uint8).numpy()
container = av.open(filename, mode='w')
stream = container.add_stream(video_codec, rate=fps)
stream.width = video_array.shape[2]
stream.height = video_array.shape[1]
stream.pix_fmt = 'yuv420p' if video_codec != 'libx264rgb' else 'rgb24'
stream.options = options or {}
for img in video_array:
frame = av.VideoFrame.from_ndarray(img, format='rgb24')
frame.pict_type = 'NONE'
for packet in stream.encode(frame):
container.mux(packet)
# Flush stream
for packet in stream.encode():
container.mux(packet)
# Close the file
container.close()
| 19,016
|
def unconfigure_ip_prefix_list(device, prefix_list_name, seq, ip_address):
""" unconfigure prefix-list
Args:
device (`obj`): device to execute on
prefix_list_name (`int`): prefix-list name
seq (`int`): Sequence number of a prefix list
ip_address (`str`): ip address to be pass
Return:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
"no ip prefix-list {prefix_list_name} seq {seq} permit "
"{ip_address}/32".format(prefix_list_name=prefix_list_name,
seq=seq, ip_address=ip_address)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to unconfigure prefix-list {prefix_list_name} for "
"{ip_address}, Error: {error}".format(
prefix_list_name=prefix_list_name, ip_address=ip_address,
error=e
)
)
| 19,017
|
def compare_features(f1, f2):
"""Comparison method for feature sorting."""
def get_prefix(feature):
if feature.startswith('e1-'): return 'e1'
if feature.startswith('e2-'): return 'e2'
if feature.startswith('e-'): return 'e'
if feature.startswith('t-'): return 't'
return 'x'
prefixes = {'e': 1, 't': 2, 'e1': 3, 'e2': 4}
p1 = get_prefix(f1)
p2 = get_prefix(f2)
prefix_comparison = cmp(p1, p2)
return cmp(f1, f2) if prefix_comparison == 0 else prefix_comparison
| 19,018
|
def set_backup_count(backup_count):
"""
Set maximum number of files of logs to rotate for rotating logging. If parameter is not an int number then the
function does not change any value.
:param backup_count: int number of how many files to keep to rotate logs.
:return: None
"""
global _backup_count
try:
_backup_count = int(backup_count)
except ValueError:
pass
| 19,019
|
def localized_index(lang):
"""
Example view demonstrating rendering a simple HTML page.
"""
context = make_context()
context['lang'] = lang
context['content'] = context['COPY']['content-%s' % lang]
context['form'] = context['COPY']['form-%s' % lang]
context['share'] = context['COPY']['share-%s' % lang]
context['calendar'] = context['COPY']['calendar-%s' % lang]
context['initial_card'] = context['COPY']['config']['initial_card'].__unicode__()
context['cards'] = _make_card_list(lang)
context['us_states'] = us.states.STATES
return make_response(render_template('index.html', **context))
| 19,020
|
def build_output_unit_vqa(q_encoding, m_last, num_choices, apply_dropout,
scope='output_unit', reuse=None):
"""
Apply a 2-layer fully-connected network to predict answers. Apply dropout
if specified.
Input:
q_encoding: [N, d], tf.float32
m_last: [N, d], tf.float32
Return:
vqa_scores: [N, num_choices], tf.float32
"""
output_dim = cfg.MODEL.VQA_OUTPUT_DIM
with tf.variable_scope(scope, reuse=reuse):
if cfg.MODEL.VQA_OUTPUT_USE_QUESTION:
fc1 = fc_elu(
'fc1', tf.concat([q_encoding, m_last], axis=1),
output_dim=output_dim)
else:
fc1 = fc_elu('fc1_wo_q', m_last, output_dim=output_dim)
if apply_dropout:
fc1 = tf.nn.dropout(fc1, cfg.TRAIN.DROPOUT_KEEP_PROB)
fc2 = fc('fc2', fc1, output_dim=num_choices,
biases_initializer=tf.constant_initializer(
cfg.TRAIN.VQA_SCORE_INIT))
vqa_scores = fc2
return vqa_scores
| 19,021
|
def coding_problem_16(length):
"""
You run a sneaker website and want to record the last N order ids in a log. Implement a data structure to
accomplish this, with the following API:
record(order_id): adds the order_id to the log
get_last(i): gets the ith last element from the log. i is guaranteed to be smaller than or equal to N.
You should be as efficient with time and space as possible.
Example:
>>> log = coding_problem_16(10)
>>> for id in xrange(20):
... log.record(id)
>>> log.get_last(0)
[]
>>> log.get_last(1)
[19]
>>> log.get_last(5)
[15, 16, 17, 18, 19]
>>> log.record(20)
>>> log.record(21)
>>> log.get_last(1)
[21]
>>> log.get_last(3)
[19, 20, 21]
"""
class OrdersLog(object):
def __init__(self, num):
self.circular_buffer = [None] * num
self.current_index = 0
def record(self, order_id):
self.circular_buffer[self.current_index] = order_id
self.current_index += 1
if self.current_index == len(self.circular_buffer):
self.current_index = 0
def get_last(self, num):
start_index = self.current_index - num
if start_index < 0: # wrap around
return self.circular_buffer[start_index:] + self.circular_buffer[:self.current_index]
else: # no wrapping required
return self.circular_buffer[start_index:self.current_index]
return OrdersLog(length)
| 19,022
|
def _check_dimensions(n_grobs, nrow = None, ncol = None):
"""
Internal function to provide non-Null nrow and ncol numbers
given a n_number of images and potentially some information about the
desired nrow/ncols.
Arguments:
----------
n_grobs: int, number of images to be organized
nrow: int, number of rows user wants (Default is None)
ncol: int, number of columns user wants (Default is None)
Returns:
--------
(nrow, ncol) tuple that meets user desires or errors if cannot meet
users expectation
"""
if nrow is None and ncol is None:
nrow = int(np.ceil(np.sqrt(n_grobs)))
ncol = int(np.ceil(n_grobs/nrow))
if nrow is None:
nrow = int(np.ceil(n_grobs/ncol))
if ncol is None:
ncol = int(np.ceil(n_grobs/nrow))
assert n_grobs <= nrow * ncol, \
"nrow * ncol < the number of grobs, please correct"
return nrow, ncol
| 19,023
|
def year_parse(s: str) -> int:
"""Parses a year from a string."""
regex = r"((?:19|20)\d{2})(?:$|[-/]\d{2}[-/]\d{2})"
try:
year = int(re.findall(regex, str(s))[0])
except IndexError:
year = None
return year
| 19,024
|
def sampleDistribution(d):
"""
Expects d to be a list of tuples
The first element should be the probability
If the tuples are of length 2 then it returns the second element
Otherwise it returns the suffix tuple
"""
# {{{
import random
z = float(sum(t[0] for t in d))
if z == 0.0:
eprint("sampleDistribution: z = 0")
eprint(d)
r = random.random()
u = 0.0
for index, t in enumerate(d):
p = t[0] / z
# This extra condition is needed for floating-point bullshit
if r <= u + p or index == len(d) - 1:
if len(t) <= 2:
return t[1]
else:
return t[1:]
u += p
assert False
| 19,025
|
def logout():
"""
`/register` endpoint
Logs out a user and redirects to the index page.
"""
logout_user()
flash("You are logged out.", "info")
return redirect(url_for("main.index"))
| 19,026
|
def convertExcelToUe():
""" Convert CSV exported file from Excel to a CSV file readable by UE4 for String Tables """
for f in csvFiles:
fName = splitext(f)[0]
try:
file = open(join(inputPath, f), "r", encoding="utf8")
fContent = file.read()
lines = fContent.split("\n")
newF = open(join(outputPath, fName + newFileSuffix), "w", encoding="utf-16le")
for l in lines:
l = reformatLine(l)
newLine = l.replace(';', ',', 1) + '\n'
newF.write(newLine)
# Close files
file.close()
newF.close()
# File created message
print("[+] " + fName + newFileSuffix + " created in Output folder.")
except Exception as e:
print('[-] An error occured: ' + e)
quit()
| 19,027
|
def players_season_totals(season_end_year, playoffs=False, skip_totals=False,
output_type=None, output_file_path=None, output_write_option=None, json_options=None):
"""
scrape the "Totals" stats of all players from a single year
Args:
season_end_year (int): year in which the season ends, e.g. 2019 for 2018-2019 season
playoffs (bool): whether to grab the playoffs (True) or regular season (False) table
skip_totals (bool): whether (True) or not (False) to skip the rows representing for
the complete year of a player that is traded (no effect for the playoffs)
output_type (str): either csv or json, if you want to save that type of file
output_file_path (str): file you want to save to
output_write_option (str): whether to write (default) or append
json_options (dict): dictionary of options to pass to the json writer
Returns:
a list of rows; each row is a dictionary with items named from COLUMN_RENAMER
"""
try:
values = http_client.players_season_totals(season_end_year,
skip_totals=skip_totals, playoffs=playoffs)
except requests.exceptions.HTTPError as http_error:
if http_error.response.status_code == requests.codes.not_found:
raise InvalidSeason(season_end_year=season_end_year)
else:
raise http_error
return output.output(
values=values,
output_type=output_type,
output_file_path=output_file_path,
output_write_option=output_write_option,
csv_writer=output.players_season_totals_to_csv,
encoder=BasketballReferenceJSONEncoder,
json_options=json_options,
)
| 19,028
|
def array_shuffle(x,axis = 0, random_state = 2020):
"""
对多维度数组,在任意轴打乱顺序
:param x: ndarray
:param axis: 打乱的轴
:return:打乱后的数组
"""
new_index = list(range(x.shape[axis]))
random.seed(random_state)
random.shuffle(new_index)
x_new = np.transpose(x, ([axis]+[i for i in list(range(len(x.shape))) if i is not axis]))
x_new = x_new[new_index][:]
new_dim = list(np.array(range(axis))+1)+[0]+list(np.array(range(len(x.shape)-axis-1))+axis+1)
x_new = np.transpose(x_new, tuple(new_dim))
return x_new
| 19,029
|
def cat(out_media_fp, l_in_media_fp):
"""
Args:
out_media_fp(str): Output Media File Path
l_in_media_fp(list): List of Media File Path
Returns:
return_code(int):
"""
ref_vcodec = get_video_codec(l_in_media_fp[0])
ref_acodec = get_audio_codec(l_in_media_fp[0])
ref_vscale = get_video_scale(l_in_media_fp[0])
for f in l_in_media_fp:
if ref_vcodec != get_video_codec(f):
logger.error('Video Codecs are different.')
return -1
if ref_acodec != get_audio_codec(f):
logger.error('Audio Codecs are different.')
return -1
if ref_vscale != get_video_scale(f):
logger.error('Video Scales are different.')
return -1
ffmpeg = FFmpeg.FFmpeg()
ffmpeg.set_output_file(out_media_fp)
ffmpeg.set_input_format('concat')
ffmpeg.set_video_encoder('copy')
ffmpeg.set_audio_encoder('copy')
ffmpeg.set_safe(0)
try:
fpath = tempfile.mkstemp()[1]
with open(fpath, 'w') as fp:
for media_fp in l_in_media_fp:
fp.write('file \'{}\'\n'.format(os.path.abspath(media_fp)))
ffmpeg.add_input_file(fpath)
with ffmpeg.build().run() as proc:
out, err = proc.communicate()
logger.error(err.decode("utf-8"))
os.remove(fpath)
return proc.returncode
except subprocess.CalledProcessError:
logger.error('FFmpeg failed.')
| 19,030
|
def get_recent_activity_rows(chase_driver):
"""Return the 25 most recent CC transactions, plus any pending
transactions.
Returns:
A list of lists containing the columns of the Chase transaction list.
"""
_goto_link(chase_driver, "See activity")
time.sleep(10)
rows = chase_driver.find_elements_by_css_selector("tr.summary")
trans_list = []
for row in rows:
tds = row.find_elements_by_tag_name('td')
tds = tds[1:] # skip the link in first cell
trans_list.append([td.text for td in tds])
return trans_list
| 19,031
|
def test_forecast_url_good():
"""
Test valid and invalid sets of data passed to the create forecast url
"""
assert get_noaa_forecast_url(44099) is not None
| 19,032
|
def evaluate(test_loader, model, test_img_num):
"""
Evaluate.
:param test_loader: DataLoader for test data
:param model: model
"""
# Make sure it's in eval mode
model.eval()
# Lists to store detected and true boxes, labels, scores
det_boxes = list()
det_labels = list()
det_scores = list()
true_boxes = list()
true_labels = list()
true_difficulties = list() # it is necessary to know which objects are 'difficult', see 'calculate_mAP' in utils.py
with torch.no_grad():
f1_max = 0
ap_max = 0
ar_max = 0
for min_score in [0.1]:
for max_overlap in [0.8]:
# Batches
f1 = 0
ap = 0
ar = 0
images_num = 0
for i, (images, boxes, labels) in enumerate(tqdm(test_loader, desc='Evaluating')):
if i < test_img_num:
images = images.to(device) # (N, 3, 300, 300)
print(images)
print(images.size)
# Forward prop.
predicted_locs, predicted_scores = model(images)
# Detect objects in SSD output
det_boxes_batch, det_labels_batch, det_scores_batch = model.detect_objects(predicted_locs, predicted_scores,
min_score=min_score, max_overlap=max_overlap,
top_k=200, original_image = images, max_OCR_overlap=0.2, max_OCR_pixel=245)
# Evaluation MUST be at min_score=0.01, max_overlap=0.45, top_k=200 for fair comparision with the paper's results and other repos
# Store this batch's results for mAP calculation
boxes = [b.to(device) for b in boxes]
labels = [l.to(device) for l in labels]
det_boxes.extend(det_boxes_batch)
det_labels.extend(det_labels_batch)
det_scores.extend(det_scores_batch)
true_boxes.extend(boxes)
true_labels.extend(labels)
f1_0, ap_0, ar_0 = calc_f1(det_boxes_batch[0], boxes[0], iou_thresh=0.5)
# print()
# print("F1: ", f1_0)
# print("AP: ", ap_0)
# print("AR: ", ar_0)
f1 += f1_0
ap += ap_0
ar += ar_0
images_num += 1
if f1 / images_num > f1_max:
f1_max = f1/ images_num
f1_max_par = [min_score, max_overlap]
print('f1 max:' ,f1_max)
print('f1 max par:', f1_max_par )
if ap / images_num > ap_max:
ap_max = ap/ images_num
ap_max_par = [min_score, max_overlap]
print('ap max:' ,ap_max)
print('ap max par:', ap_max_par )
if ar / images_num > ar_max:
ar_max = ar/ images_num
ar_max_par = [min_score, max_overlap]
print('ar max:' ,ar_max)
print('ar max par:', ar_max_par )
# Calculate mAP
# APs, mAP = calculate_mAP(det_boxes, det_labels, det_scores, true_boxes, true_labels)
# print("Final F1: ", f1 / images_num)
# print("Final AP: ", ap / images_num)
# print("Final AR: ", ar / images_num)
# Print AP for each class
# pp.pprint(APs)
#print('\nMean Average Precision (mAP): %.3f' % mAP)
| 19,033
|
def loglikelihood(x, mean, var, pi):
"""
式(9.28)
"""
lkh = []
for mean_k, var_k, pi_k in zip(mean, var, pi):
lkh.append(pi_k * gaussian_pdf(x, mean_k, var_k))
return np.sum(np.log(np.sum(lkh, 0)))
| 19,034
|
def calc_E_ST_GJ(E_star_ST):
"""基準一次エネルギー消費量(GJ/年)の計算 (2)
Args:
E_star_ST(float): 基準一次エネルギー消費量(J/年)
Returns:
float: 基準一次エネルギー消費量(GJ/年)
"""
# 小数点以下一位未満の端数があるときはこれを切り上げる
return ceil(E_star_ST / 100) / 10
| 19,035
|
def next_line(grd_file):
"""
next_line
Function returns the next line in the file
that is not a blank line, unless the line is
'', which is a typical EOF marker.
"""
done = False
while not done:
line = grd_file.readline()
if line == '':
return line, False
elif line.strip():
return line, True
| 19,036
|
def look_for(room, position, main, sub=None):
"""
:type room: rooms.room_mind.RoomMind
"""
if not room.look_at:
raise ValueError("Invalid room argument")
if position.pos:
position = position.pos
if sub:
return _.find(room.look_at(LOOK_FLAGS, position),
lambda f: f.color == main_to_flag_primary[main] and
f.secondaryColor == sub_to_flag_secondary[sub])
else:
flag_def = flag_definitions[main]
if not flag_def:
# TODO: This is a hack because a common pattern is
# look_for(room, pos, flags.MAIN_DESTRUCT, flags.structure_type_to_flag_sub[structure_type])
# if there is no flag for a given structure, sub will be undefined, and thus this side will be called
# and not the above branch.
return []
return _.find(room.look_at(LOOK_FLAGS, position),
lambda f: f.color == flag_def[0] and f.secondaryColor == flag_def[1])
| 19,037
|
def test_measure_simple(mcp):
"""
Asserts that the result of a measurement is a set of three bytes, and that no more than 3 bytes are returned
"""
desiredMeasurements = 1
measuredData = mcp['device'].measure()
assert_equal(len(measuredData), 3)
| 19,038
|
def test_searchparam_type_date_period_le(store: FHIRStore, index_resources):
"""Handle date search parameters on FHIR data type "period"
The date format is the standard XML format, though other formats may be supported
prefix le: the range below the search value intersects (i.e. overlaps)
with the range of the target value or the range of the search
value fully contains the range of the target value
"""
result = store.search("Encounter", query_string="date=le2015-01-17T16:15:00")
assert result.total == 1
result = store.search("Encounter", query_string="date=le2015-01-17")
assert result.total == 1
result = store.search("Encounter", query_string="date=le2015-01-17T15:15:00")
assert_empty_bundle(result)
| 19,039
|
def grayScaleHist(image):
"""
this function will convert color image into grayscale and show the histogram for that grayscale image
"""
gray_scale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('Grayscale Image', gray_scale)
hist = cv2.calcHist([gray_scale],[0], None, [256], [0,256])
_ = plt.figure()
_ = plt.title('Grayscale Histogram')
_ = plt.xlabel('Bins from 0 - 255')
_ = plt.ylabel('# of pixels')
_ = plt.plot(hist)
_ = plt.xlim([0,256])
plt.show()
cv2.waitKey(0)
| 19,040
|
def get_proto(proto):
"""
Returns a protocol number (in the /etc/protocols sense, e.g. 6 for
TCP) for the given input value. For the protocols that have
PROTO_xxx constants defined, this can be provided textually and
case-insensitively, otherwise the provided value gets converted to
an integer and returned.
Returns None if this conversion failed.
"""
protos = {
"ICMP": PROTO_ICMP,
"ICMP6": PROTO_ICMP6,
"SCTP": PROTO_SCTP,
"TCP": PROTO_TCP,
"UDP": PROTO_UDP,
}
try:
return protos[proto.upper()]
except (KeyError, AttributeError):
pass
try:
return int(proto)
except ValueError:
pass
return None
| 19,041
|
def f5_update_policy_cookie_command(client: Client, policy_md5: str, cookie_id: str,
cookie_name: str,
perform_staging: bool, parameter_type: str,
enforcement_type: str,
attack_signatures_check: bool) -> CommandResults:
"""
Update a given cookie of a specific policy
Args:
client (Client): f5 client.
policy_md5 (str): MD5 hash of the policy.
cookie_id (str): ID of the cookie.
cookie_name (str): The new cookie name to add.
perform_staging (bool): Indicates if the user wishes the new file type to be at staging.
parameter_type (str): Type of the new parameter.
enforcement_type (str): Enforcement type.
attack_signatures_check (bool): Should attack signatures be checked. If enforcement type
is set to 'enforce', this field will not get any value.
"""
result = client.update_policy_cookie(policy_md5, cookie_id, cookie_name, perform_staging,
parameter_type, enforcement_type, attack_signatures_check)
outputs, headers = build_output(OBJECT_FIELDS, result)
readable_output = tableToMarkdown('f5 data for updating cookie:',
outputs, headers, removeNull=True)
command_results = CommandResults(
outputs_prefix='f5.Cookies',
outputs_key_field='id',
readable_output=readable_output,
outputs=remove_empty_elements(outputs),
raw_response=result
)
return command_results
| 19,042
|
def test_value_error():
""" Test value error """
with pytest.raises(ValueError):
snell_angle(0, 0, 0)
| 19,043
|
def ping():
"""
Determine if the container is working and healthy. In this sample container, we declare
it healthy if we can load the model successfully.
:return:
"""
health = False
try:
health = model is not None # You can insert a health check here
except:
pass
status = 200 if health else 404
return flask.Response(response='\n', status=status, mimetype='application/json')
| 19,044
|
def random(
shape,
density=0.01,
random_state=None,
data_rvs=None,
format='coo'
):
""" Generate a random sparse multidimensional array
Parameters
----------
shape: Tuple[int]
Shape of the array
density: float, optional
Density of the generated array.
random_state : Union[numpy.random.RandomState, int], optional
Random number generator or random seed. If not given, the
singleton numpy.random will be used. This random state will be used
for sampling the sparsity structure, but not necessarily for sampling
the values of the structurally nonzero entries of the matrix.
data_rvs : Callable
Data generation callback. Must accept one single parameter: number of
:code:`nnz` elements, and return one single NumPy array of exactly
that length.
format: str
The format to return the output array in.
Returns
-------
SparseArray
The generated random matrix.
See Also
--------
:obj:`scipy.sparse.rand`
Equivalent Scipy function.
:obj:`numpy.random.rand`
Similar Numpy function.
Examples
--------
>>> from sparse import random
>>> from scipy import stats
>>> rvs = lambda x: stats.poisson(25, loc=10).rvs(x, random_state=np.random.RandomState(1))
>>> s = random((2, 3, 4), density=0.25, random_state=np.random.RandomState(1), data_rvs=rvs)
>>> s.todense() # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 0, 0, 0],
[ 0, 34, 0, 0],
[33, 34, 0, 29]],
<BLANKLINE>
[[30, 0, 0, 34],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0]]])
"""
# Copied, in large part, from scipy.sparse.random
# See https://github.com/scipy/scipy/blob/master/LICENSE.txt
from .coo import COO
elements = np.prod(shape)
nnz = int(elements * density)
if random_state is None:
random_state = np.random
elif isinstance(random_state, Integral):
random_state = np.random.RandomState(random_state)
if data_rvs is None:
data_rvs = random_state.rand
# Use the algorithm from python's random.sample for k < mn/3.
if elements < 3 * nnz:
ind = random_state.choice(elements, size=nnz, replace=False)
else:
ind = np.empty(nnz, dtype=np.min_scalar_type(elements - 1))
selected = set()
for i in range(nnz):
j = random_state.randint(elements)
while j in selected:
j = random_state.randint(elements)
selected.add(j)
ind[i] = j
data = data_rvs(nnz)
ar = COO(ind[None, :], data, shape=nnz).reshape(shape)
return ar.asformat(format)
| 19,045
|
def file_md5_is_valid(fasta_file: Path, checksum: str) -> bool:
"""
Checks if the FASTA file matches the MD5 checksum argument.
Returns True if it matches and False otherwise.
:param fasta_file: Path object for the FASTA file.
:param checksum: MD5 checksum string.
:return: boolean indicating if the file validates.
"""
md5_hash = hashlib.md5()
with fasta_file.open(mode="rb") as fh:
# Read in small chunks to avoid memory overflow with large files.
while chunk := fh.read(8192):
md5_hash.update(chunk)
return md5_hash.hexdigest() == checksum
| 19,046
|
def combine_to_int(values):
"""Combine several byte values to an integer"""
multibyte_value = 0
for byte_id, byte in enumerate(values):
multibyte_value += 2**(4 * byte_id) * byte
return multibyte_value
| 19,047
|
def zip_images(image_dir, out_dir, threshold=700_000, factor=0.75):
"""Shrink and rotate images and then put them into a zip file."""
os.makedirs(out_dir, exist_ok=True)
manifest = out_dir / (out_dir.name + '_manifest.csv')
with open(manifest, 'w') as out_file:
writer = csv.writer(out_file)
writer.writerow(['name'])
for src in tqdm(image_dir.iterdir()):
writer.writerow([src.name])
dst = out_dir / src.name
copy(src, dst)
# There has to be a better way
while dst.stat().st_size > threshold:
image = Image.open(dst)
image = image.resize((
int(image.size[0] * factor),
int(image.size[1] * factor)))
image.save(dst)
| 19,048
|
def loss_fn(x, results, is_valtest=False, **kwargs):
"""
Loss weight (MCAE):
- sni: snippet reconstruction loss
- seg: segment reconstruction loss
- cont: smooth regularization
- reg: sparsity regularization
- con: constrastive loss
- cls: auxilliary classification loss <not used for MCAE-MP>
Loss weight (joint):
- skcon: contrastive loss on the concatenated representation of all joints
- skcls: auxilliary classification loss
"""
default_lsw = dict.fromkeys(
[
'sni', 'seg', 'cont', 'reg', 'con', 'skcon', 'skcls'
], 1.0)
loss_weights = kwargs.get('loss_weights', default_lsw)
losses = {}
mcae_losses = []
sk_pres = results['sk_pres']
sk_lgts = results['sk_lgts']
sk_y = kwargs.get('y', None)
if 'mcae' in results.keys():
mcae_results = results['mcae']
for r in mcae_results:
mcae_losses.append(
mcae_loss(r['x'], r, loss_weights=loss_weights, is_valtest=is_valtest))
for key in loss_weights.keys():
losses[key] = 0
if key in mcae_losses[0][0].keys():
for i in range(len(mcae_results)):
losses[key] += mcae_losses[i][0][key]
else:
losses.pop(key)
elif 'mcae_3d' in results.keys():
r = results['mcae_3d']
mcae_loss_ = mcae_loss(r['x'], r, loss_weights=loss_weights, is_valtest=is_valtest)[0]
for key in loss_weights.keys():
losses[key] = 0
if key in mcae_loss_.keys():
losses[key] += mcae_loss_[key]
else:
losses.pop(key)
if loss_weights.get('skcon', 0) > 0 and not is_valtest:
B = sk_pres.shape[0]
_L = int(B/2)
tau = 0.1
trj_pres = sk_pres.reshape(B, -1)
ori, aug = trj_pres.split(_L, 0)
dist_grid = 1 - cosine_distance(ori, aug)
dist_grid_exp = torch.exp(dist_grid/tau)
losses['skcon'] = -torch.log(
torch.diag(dist_grid_exp) / dist_grid_exp.sum(1)).mean()
if loss_weights.get('skcls', 0) > 0:
losses['skcls'] = F.nll_loss(F.log_softmax(sk_lgts, -1), sk_y)
return losses, default_lsw
| 19,049
|
def _as_uint32(x: int) -> QVariant:
"""Convert the given int to an uint32 for DBus."""
variant = QVariant(x)
successful = variant.convert(QVariant.UInt)
assert successful
return variant
| 19,050
|
def svcs_tang_u(Xcp,Ycp,Zcp,gamma_t,R,m,Xcyl,Ycyl,Zcyl,ntheta=180, Ground=False):
"""
Computes the velocity field for nCyl*nr cylinders, extending along z:
nCyl: number of main cylinders
nr : number of concentric cylinders within a main cylinder
INPUTS:
Xcp,Ycp,Zcp: cartesian coordinates of control points where the velocity field is not be computed
gamma_t: array of size (nCyl,nr), distribution of gamma for each cylinder as function of radius
R : array of size (nCyl,nr),
m : array of size (nCyl,nr),
Xcyl,Ycyl,Zcyl: array of size nCyl) giving the center of the rotor
Ground: boolean, True if ground effect is to be accounted for
All inputs (except Ground) should be numpy arrays
"""
Xcp=np.asarray(Xcp)
Ycp=np.asarray(Ycp)
Zcp=np.asarray(Zcp)
ux = np.zeros(Xcp.shape)
uy = np.zeros(Xcp.shape)
uz = np.zeros(Xcp.shape)
nCyl,nr = R.shape
print('Tang. (skewed) ',end='')
for i in np.arange(nCyl):
Xcp0,Ycp0,Zcp0=Xcp-Xcyl[i],Ycp-Ycyl[i],Zcp-Zcyl[i]
if Ground:
YcpMirror = Ycp0+2*Ycyl[i]
Ylist = [Ycp0,YcpMirror]
else:
Ylist = [Ycp0]
for iy,Y in enumerate(Ylist):
for j in np.arange(nr):
if iy==0:
print('.',end='')
else:
print('m',end='')
if np.abs(gamma_t[i,j]) > 0:
ux1,uy1,uz1 = svc_tang_u(Xcp0,Y,Zcp0,gamma_t[i,j],R[i,j],m[i,j],ntheta=ntheta,polar_out=False)
ux = ux + ux1
uy = uy + uy1
uz = uz + uz1
print('')
return ux,uy,uz
| 19,051
|
def np_to_o3d_images(images):
"""Convert numpy image list to open3d image list
Parameters
----------
images : list[numpy.ndarray]
Returns
o3d_images : list[open3d.open3d.geometry.Image]
-------
"""
o3d_images = []
for image in images:
image = np_to_o3d_image(image)
o3d_images.append(image)
return o3d_images
| 19,052
|
def compute_mse(y_true, y_pred):
"""ignore zero terms prior to comparing the mse"""
mask = np.nonzero(y_true)
mse = mean_squared_error(y_true[mask], y_pred[mask])
return mse
| 19,053
|
def test_enable_caching_specific(configure_caching):
"""
Check that using enable_caching for a specific identifier works.
"""
identifier = 'some_ident'
with configure_caching({'default_enabled': False}):
with enable_caching(identifier=identifier):
assert get_use_cache(identifier=identifier)
| 19,054
|
def image2d(math_engine, batch_len, batch_width, height, width, channels, dtype="float32"):
"""Creates a blob with two-dimensional multi-channel images.
:param neoml.MathEngine.MathEngine math_engine: the math engine that works with this blob.
:param batch_len: the **BatchLength** dimension of the new blob.
:type batch_len: int, > 0
:param batch_width: the **BatchWidth** dimension of the new blob.
:type batch_width: int, > 0
:param height: the image height.
:type height: int, > 0
:param width: the image width.
:type width: int, > 0
:param channels: the number of channels in the image format.
:type channels: int, > 0
:param dtype: the type of data in the blob.
:type dtype: str, {"float32", "int32"}, default="float32"
"""
if dtype != "float32" and dtype != "int32":
raise ValueError('The `dtype` must be one of {`float32`, `int32`}.')
if batch_len < 1:
raise ValueError('The `batch_len` must be > 0.')
if batch_width < 1:
raise ValueError('The `batch_width` must be > 0.')
if height < 1:
raise ValueError('The `height` must be > 0.')
if width < 1:
raise ValueError('The `width` must be > 0.')
if channels < 1:
raise ValueError('The `channels` must be > 0.')
shape = np.array((batch_len, batch_width, 1, height, width, 1, channels), dtype=np.int32, copy=False)
return Blob(PythonWrapper.tensor(math_engine._internal, shape, dtype))
| 19,055
|
def error_response(error, message):
"""
returns error response
"""
data = {
"status": "error",
"error": error,
"message": message
}
return data
| 19,056
|
def graph_to_text(
graph: MultiDiGraph, quoting: bool = True, verbose: bool = True
) -> str:
"""Turns a graph into
its text representation.
Parameters
----------
graph : MultiDiGraph
Graph to text.
quoting : bool
If true, quotes will be added.
verbose : bool
If true, a progress bar will be displayed.
Examples
--------
>>> import cfpq_data
>>> g = cfpq_data.labeled_cycle_graph(2, edge_label="a", verbose=False)
>>> cfpq_data.graph_to_text(g, verbose=False)
"'0' 'a' '1'\\n'1' 'a' '0'\\n"
>>> cfpq_data.graph_to_text(g, quoting=False, verbose=False)
'0 a 1\\n1 a 0\\n'
Returns
-------
text : str
Graph text representation.
"""
text = ""
for u, v, edge_labels in tqdm(
graph.edges(data=True), disable=not verbose, desc="Generation..."
):
if len(edge_labels.values()) > 0:
for label in edge_labels.values():
if quoting:
text += f"'{u}' '{label}' '{v}'\n"
else:
text += f"{u} {label} {v}\n"
else:
if quoting:
text += f"'{u}' '{v}'\n"
else:
text += f"{u} {v}\n"
return text
| 19,057
|
def test_has_valid_dir_structure():
"""Check if the specified dir structure is valid"""
def recurse_contents(contents):
if contents is None:
return None
else:
for key, value in contents.items():
assert(isinstance(key, str))
if value is None:
return None
elif "dir" in value:
recurse_contents(value["dir"])
elif "file" in value:
assert(value["file"] is None or isinstance(value["file"], str) or callable(value["file"]))
if callable(value["file"]):
generator = value["file"]
assert(isinstance(generator("test"), str))
else:
raise Exception("""
Every entry in the directory structure must be
either a directory or a file.
""")
recurse_contents(skeleton.dir_structure)
| 19,058
|
def create_user(username, password, firstname, lastname):
"""Create an user."""
pwd_hash = pwd_context.encrypt(password)
with db_conn.cursor() as cur:
cur.execute("""
INSERT INTO AdminUser (username, password, firstname, lastname)
VALUES (%s, %s, %s, %s)""", (username, pwd_hash, firstname, lastname))
db_conn.commit()
| 19,059
|
def getFBA(fba):
"""AC factory.
reads a fileobject and creates a dictionary for easy insertation
into a postgresdatabase. Uses Ohlbergs routines to read the files (ACfile)
"""
word = fba.getSpectrumHead()
while word is not None:
stw = fba.stw
mech = fba.Type(word)
datadict = {
'stw': stw,
'mech_type': mech,
}
return datadict
raise EOFError
| 19,060
|
def logtimestamp():
"""
returns a formatted datetime object with the curren year, DOY, and UT
"""
return DT.datetime.utcnow().strftime("%Y-%j-%H:%M:%S")
| 19,061
|
def get_most_common_non_ascii_char(file_path: str) -> str:
"""Return first most common non ascii char"""
with open(file_path, encoding="raw_unicode_escape") as f:
non_ascii = {}
for line in f:
for char in line:
if not char.isascii():
if char in non_ascii:
non_ascii[char] += 1
else:
non_ascii[char] = 1
if non_ascii:
return max(non_ascii, key=non_ascii.get)
else:
return "No non ascii chars in the file"
| 19,062
|
def compute_noise_from_target_epsilon(
target_epsilon,
target_delta,
epochs,
batch_size,
dataset_size,
alphas=None,
approx_ratio=0.01,
):
"""
Takes a target epsilon (eps) and some hyperparameters.
Returns a noise scale that gives an epsilon in [0.99 eps, eps].
The approximation ratio can be tuned.
If alphas is None, we'll explore orders.
"""
steps = compute_steps(epochs, batch_size, dataset_size)
sampling_rate = batch_size / dataset_size
if alphas is None:
alphas = ALPHAS
def get_eps(noise):
rdp = privacy_analysis.compute_rdp(sampling_rate, noise, steps, alphas)
epsilon, order = privacy_analysis.get_privacy_spent(
alphas, rdp, delta=target_delta
)
return epsilon
# Binary search bounds
noise_min = MIN_NOISE
noise_max = MAX_NOISE
# Start with the smallest epsilon possible with reasonable noise
candidate_noise = noise_max
candidate_eps = get_eps(candidate_noise)
if candidate_eps > target_epsilon:
raise ("Cannot reach target eps. Try to increase MAX_NOISE.")
# Search up to approx ratio
while (
candidate_eps < (1 - approx_ratio) * target_epsilon
or candidate_eps > target_epsilon
):
if candidate_eps < (1 - approx_ratio) * target_epsilon:
noise_max = candidate_noise
else:
noise_min = candidate_noise
candidate_noise = (noise_max + noise_min) / 2
candidate_eps = get_eps(candidate_noise)
print("Use noise {} for epsilon {}".format(candidate_noise, candidate_eps))
return candidate_noise
| 19,063
|
def cart2pol(x, y):
"""
author : Dr. Schaeffer
"""
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
| 19,064
|
def set_seed(seed: int) -> None:
"""
Seeds various random generators.
Args:
seed: Seed to use.
"""
random.seed(seed)
np.random.seed(seed)
torch.seed(seed)
torch.cuda.manual_seed_all(seed)
| 19,065
|
def requires_site(site):
"""Skip test based on where it is being run"""
skip_it = bool(site != SITE)
return pytest.mark.skipif(skip_it,
reason='SITE is not %s.' % site)
| 19,066
|
def set_error_redirect(request, error_redirect):
"""
Convenience method to set the Location redirected
to after an error. Should be used at the top of
views.
You could call set_error_redirect on the interia
object directly but you need to check that the
inertia object is actually there.
"""
if hasattr(request, "inertia"):
request.inertia.set_error_redirect(error_redirect)
| 19,067
|
def handle_error(e):
"""
Handle errors, formatting them as JSON if requested
"""
error_type = type(e).__name__
message = str(e)
trace = None
description = None
status_code = 500
if isinstance(e, werkzeug.exceptions.HTTPException):
status_code = e.code
description = e.description
if app.debug:
trace = traceback.format_exc()
if request_wants_json():
details = {
'message': message,
'type': error_type,
}
if description is not None:
details['description'] = description
if trace is not None:
details['trace'] = trace.split('\n')
return flask.jsonify({'error': details}), status_code
else:
message = message.replace('\\n', '<br />')
if isinstance(e, digits.frameworks.errors.NetworkVisualizationError):
trace = message
message = ''
return flask.render_template('error.html',
title=error_type,
message=message,
description=description,
trace=trace,
), status_code
| 19,068
|
def iou(bbox_1, bbox_2):
"""Computes intersection over union between two bounding boxes.
Parameters
----------
bbox_1 : np.ndarray
First bounding box, of the form (x_min, y_min, x_max, y_max).
bbox_2 : np.ndarray
Second bounding box, of the form (x_min, y_min, x_max, y_max).
Returns
-------
float
Intersection over union value between both bounding boxes.
"""
x_min = np.maximum(bbox_1[0], bbox_2[0])
y_min = np.maximum(bbox_1[1], bbox_2[1])
x_max = np.minimum(bbox_1[2], bbox_2[2])
y_max = np.minimum(bbox_1[3], bbox_2[3])
width = np.maximum(0.0, x_max - x_min)
height = np.maximum(0.0, y_max - y_min)
intersection = width * height
return (
intersection
) / (
(bbox_1[2] - bbox_1[0]) * (bbox_1[3] - bbox_1[1])
+ (bbox_2[2] - bbox_2[0]) * (bbox_2[3] - bbox_2[1])
- intersection
)
| 19,069
|
def _create_directory_structure_if_necessary(folders):
""" Ensure basic file structure in project. """
site_folder = folders['site']
if not env.exists(site_folder):
# base deployment folder
env.run('mkdir -p {site_folder}'.format(site_folder=site_folder,))
# set linux user group.
env.sudo('chown :{group} {site_folder}'.format(group=LINUXGROUP, site_folder=site_folder))
# set folder priveleges - 6*** means group and user sticky bits are set, and subfolders and files.
# will inherit parent folder's group and user.
# 770 means read, write and execute for folder is enabled for owner and group.
env.run('chmod 6770 {site_folder}'.format(site_folder=site_folder,))
# Make folders for static files, virtual environment and so on.
# -p flag means that folder is only created if it doesn't exist,
# and parent directories are also created if needed.
env.run('mkdir -p {folder_paths}'.format(folder_paths=' '.join(folders.values())))
| 19,070
|
def GetIntensityArray(videofile, threshold, scale_percent):
"""Finds pixel coordinates within a videofile (.tif, .mp4) for pixels
that are above a brightness threshold, then accumulates the
brightness event intensities for each coordinate,
outputting it as a 2-D array in the same size as the video frames
Input:
-videofile: file containing an image stack of fluorescent events
-threshold: minimum brightness for detection
-scale_percent: helps resize image for faster computing speeds
Output: 2-d Array of accumulated intensity values for each pixel above
a calculated brightness threshold in the video"""
# Reading video file and convert to grayscale
ret, img = cv2.imreadmulti(videofile, flags=cv2.IMREAD_GRAYSCALE)
# Setting Resizing Dimensions
width = int(img[0].shape[1] * scale_percent / 100)
height = int(img[0].shape[0] * scale_percent / 100)
dim = (width, height)
img_resized = cv2.resize(img[0], dim, interpolation=cv2.INTER_AREA)
# Creating empty array to add intensity values to
int_array = np.zeros(np.shape(img_resized))
for frame in range(len(img)):
# Resize Frame
frame_resized = cv2.resize(img[frame],
dim, interpolation=cv2.INTER_AREA)
intensity = GetIntensityValues(frame_resized, threshold)
if len(np.where(intensity >= 1)) > 0:
# Get coordinates of the single pixel counts
row, col = np.where(intensity >= 1)
for i in range(len(row)):
for j in range(len(col)):
# Add single count to freq_array in location of event
int_array[row[i], col[j]] += intensity[row[i], col[j]]
else:
pass
return int_array
| 19,071
|
def test_unicode_handling(username, password):
"""
With a unicode string for the password, set and verify the
Authorization header.
"""
header_dict = {}
authorizer = BasicAuthorizer(username, password)
authorizer.set_authorization_header(header_dict)
assert header_dict["Authorization"][:6] == "Basic "
decoded = base64.b64decode(header_dict["Authorization"][6:].encode("utf-8")).decode(
"utf-8"
)
assert decoded == f"{username}:{password}"
| 19,072
|
def _check_data(handler, data):
"""Check the data."""
if 'latitude' not in data or 'longitude' not in data:
handler.write_text("Latitude and longitude not specified.",
HTTP_UNPROCESSABLE_ENTITY)
_LOGGER.error("Latitude and longitude not specified.")
return False
if 'device' not in data:
handler.write_text("Device id not specified.",
HTTP_UNPROCESSABLE_ENTITY)
_LOGGER.error("Device id not specified.")
return False
if 'id' not in data:
handler.write_text("Location id not specified.",
HTTP_UNPROCESSABLE_ENTITY)
_LOGGER.error("Location id not specified.")
return False
if 'trigger' not in data:
handler.write_text("Trigger is not specified.",
HTTP_UNPROCESSABLE_ENTITY)
_LOGGER.error("Trigger is not specified.")
return False
return True
| 19,073
|
def get_soup(page_url):
""" Returns BeautifulSoup object of the url provided """
try:
req = requests.get(page_url)
except Exception:
print('Failed to establish a connection with the website')
return
if req.status_code == 404:
print('Page not found')
return
content = req.content
soup = BeautifulSoup(content, 'html.parser')
return soup
| 19,074
|
def check_energybal(ec_dataset, timeseries=None, dailyaverage=False, monthlyaverage=False, monthly_cumulative=False):
"""
:param ec_dataset: the full dataset from the EC tower
:param timeseries: a series of datetimes
:return: a plot of the energy balance closure over time for the Eddy Covariance tower.
"""
# if not dailyaverage:
# Rn = ec_dataset['NETRAD']
# H = ec_dataset['H']
# LE = ec_dataset['LE']
# closure_check(ec_dataset, Rn, H, LE, timeseries)
# ===== daily averaging =====
if dailyaverage:
timeseries = timeseries.values
Rn = ec_dataset['NETRAD'].values
H = ec_dataset['H'].values
LE = ec_dataset['LE'].values
# indexed_datetimes = pd.DataFrame(pd.DatetimeIndex(timeseries))
# recreate a dataframe of the variables you want to time average on a dialy timestep
halfhour_data = pd.DataFrame({'timeseries':timeseries, 'Rn':Rn, 'LE':LE, 'H':H})
# resample the dataframe by making the timeseries column into the index and using .resample('d') for day
halfhour_data = halfhour_data.set_index(pd.DatetimeIndex(halfhour_data['timeseries']))
daily_time = halfhour_data.resample('d').mean()
# Get the values of the datetime index as an array
timeseries_daily = daily_time.index.values
# Net Radiation
Rn_av = daily_time['Rn']
# Heat
H_av = daily_time['H']
LE_av = daily_time['LE']
closure_check(ec_dataset, Rn_av, H_av, LE_av, timeseries_daily, daily_average=True)
# ===== monthly averaging =====
elif monthlyaverage:
timeseries = timeseries.values
Rn = ec_dataset['NETRAD'].values
H = ec_dataset['H'].values
LE = ec_dataset['LE'].values
# indexed_datetimes = pd.DataFrame(pd.DatetimeIndex(timeseries))
# recreate a dataframe of the variables you want to time average on a monthly timestep
halfhour_data = pd.DataFrame({'timeseries': timeseries, 'Rn': Rn, 'LE': LE, 'H': H})
# resample the dataframe by making the timeseries column into the index and using .resample('d') for day
halfhour_data = halfhour_data.set_index(pd.DatetimeIndex(halfhour_data['timeseries']))
monthly_time = halfhour_data.resample('m').mean()
# Get the values of the datetime index as an array
timeseries_daily = monthly_time.index.values
# Net Radiation
Rn_av = monthly_time['Rn']
# Heat
H_av = monthly_time['H']
LE_av = monthly_time['LE']
# closure_check(ec_dataset, Rn_av, H_av, LE_av, timeseries_daily, daily_average=True)
# ===== cumulative monthly =====
elif monthly_cumulative:
timeseries = timeseries.tolist()
# print 'timeseries\n', timeseries
Rn = ec_dataset['NETRAD'].values
H = ec_dataset['H'].values
LE = ec_dataset['LE'].values
# indexed_datetimes = pd.DataFrame(pd.DatetimeIndex(timeseries))
# recreate a dataframe of the variables you want to time average on a monthly timestep
halfhour_data = pd.DataFrame({'timeseries': timeseries, 'Rn': Rn, 'LE': LE, 'H': H})
# set the timeseries column to the index so groupby function can group by year and month of the index.
halfhour_data = halfhour_data.set_index(pd.DatetimeIndex(halfhour_data['timeseries']))
halfhour_data['mmh20'] = halfhour_data['LE'] * 7.962e-4
monthly_cumulative = halfhour_data.groupby([lambda x: x.year, lambda x: x.month]).sum()
print 'monthly cumulative \n', monthly_cumulative
print 'cum index', monthly_cumulative.index
# last_month = (halfhour_data.index[-1].year, halfhour_data.index[-1].month)
# last_date = date(last_month[0], last_month[1], 1)
# monthly_time_series = []
# for year in monthly_cumulative.index.levels[0]:
# for month in monthly_cumulative.index.levels[1]:
# # print year, month, 1
# year_month = date(year, month, 1)
# if year_month <= last_date:
# monthly_time_series.append(year_month)
# # TODO - NEEDS TO STOP AT 2018 04
monthly_list = monthly_time_parse(timeseries)
# print 'the monthly cumulatives \n', monthly_cumulative.index.values.tolist
# print 'the time series \n', monthly_time_series
print 'length of the month', len(monthly_list)
print 'lenght of the monthly cumulatives', len(monthly_cumulative.LE)
# try plotting
plt.plot(monthly_list, monthly_cumulative.mmh20)
plt.scatter(monthly_list, monthly_cumulative.mmh20)
plt.show()
# # Get the values of the datetime index as an array
# timeseries_daily = monthly_time.index.values
#
# # Net Radiation
# Rn_av = monthly_time['Rn']
#
# # Heat
# H_av = monthly_time['H']
# LE_av = monthly_time['LE']
| 19,075
|
def foreign_key_constraint_sql(table):
"""Return the SQL to add foreign key constraints to a given table"""
sql = ''
fk_names = list(table.foreign_keys.keys())
for fk_name in sorted(fk_names):
foreign_key = table.foreign_keys[fk_name]
sql += "FOREIGN KEY({fn}) REFERENCES {tn}({kc}), ".format(
fn=foreign_key.from_col,
tn=foreign_key.to_table.name,
kc=foreign_key.to_col
)
return sql
| 19,076
|
def expandDimConst(term: AST.PPTerm,
ntId: int) -> Optional[AST.PPTerm]:
"""
Expand dimension constant to integer constants (Required for fold zeros)
"""
nt = ASTUtils.getNthNT(term, ntId)
if type(nt.sort) != AST.PPDimConst:
return None
subTerm = AST.PPIntConst(nt.sort.value)
termExpanded = ReprUtils.replaceNthNT(term, ntId, subTerm)
return termExpanded
| 19,077
|
def _fit_amplitude_scipy(counts, background, model, optimizer='Brent'):
"""
Fit amplitude using scipy.optimize.
Parameters
----------
counts : `~numpy.ndarray`
Slice of count map.
background : `~numpy.ndarray`
Slice of background map.
model : `~numpy.ndarray`
Model template to fit.
flux : float
Starting value for the fit.
Returns
-------
amplitude : float
Fitted flux amplitude.
niter : int
Number of function evaluations needed for the fit.
"""
from scipy.optimize import minimize_scalar
args = (counts, background, model)
amplitude_min, amplitude_max = _amplitude_bounds_cython(counts, background, model)
try:
result = minimize_scalar(f_cash, bracket=(amplitude_min, amplitude_max),
args=args, method=optimizer, tol=10)
return result.x, result.nfev
except ValueError:
result = minimize_scalar(f_cash, args=args, method=optimizer, tol=0.1)
return result.x, result.nfev
| 19,078
|
def account_key__sign(data, key_pem=None, key_pem_filepath=None):
"""
This routine will use crypto/certbot if available.
If not, openssl is used via subprocesses
:param key_pem: (required) the RSA Key in PEM format
:param key_pem_filepath: (optional) the filepath to a PEM encoded RSA account key file.
"""
log.info("account_key__sign >")
if openssl_crypto:
pkey = openssl_crypto.load_privatekey(openssl_crypto.FILETYPE_PEM, key_pem)
if PY3:
if not isinstance(data, bytes):
data = data.encode()
signature = pkey.to_cryptography_key().sign(
data,
cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15(),
cryptography.hazmat.primitives.hashes.SHA256(),
)
return signature
log.debug(".account_key__sign > openssl fallback")
_tmpfile = None
try:
if key_pem_filepath is None:
_tmpfile = new_pem_tempfile(key_pem)
key_pem_filepath = _tmpfile.name
with psutil.Popen(
[openssl_path, "dgst", "-sha256", "-sign", key_pem_filepath],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
if PY3:
if not isinstance(data, bytes):
data = data.encode()
signature, err = proc.communicate(data)
if proc.returncode != 0:
raise IOError("account_key__sign\n{0}".format(err))
return signature
finally:
if _tmpfile:
_tmpfile.close()
| 19,079
|
def get_world_paths() -> list:
"""
Returns a list of paths to the worlds on the server.
"""
server_dir = Path(__file__).resolve().parents[1]
world_paths = []
for p in server_dir.iterdir():
if p.is_dir and (p / "level.dat").is_file():
world_paths.append(p.absolute())
return world_paths
| 19,080
|
def xml_ff_bond_force(data, prm, ff_prefix):
"""
Add Bond Force into Element (data)
Parameters
----------
data : xml.etree.ElementTree.Element
prm : OpenMM.AmberPrmtop
ff_prefix : string
"""
bnd_k0 = prm._raw_data["BOND_FORCE_CONSTANT"]
bnd_r0 = prm._raw_data["BOND_EQUIL_VALUE"]
prm_atom_type = prm._raw_data['AMBER_ATOM_TYPE']
forceConstConversionFactor = (kilocalorie_per_mole/(angstrom*angstrom)
).conversion_factor_to(kilojoule_per_mole/(nanometer*nanometer))
lengthConversionFactor = angstrom.conversion_factor_to(nanometer)
ff_bnd_frc = ET.SubElement(data, 'HarmonicBondForce')
raw_data = prm._raw_data["BONDS_WITHOUT_HYDROGEN"] \
+ prm._raw_data["BONDS_INC_HYDROGEN"]
typ_list = []
for ii in range(0, len(raw_data), 3):
ibnd = int(raw_data[ii])//3
jbnd = int(raw_data[ii+1])//3
ityp = int(raw_data[ii+2])-1
r0 = "%10.6f" % (float(bnd_r0[ityp])*lengthConversionFactor)
# amber (k (x-x0)^2) ---> openmm (0.5 k' (x-x0)^2) : k' = 2k
k0 = "%20.6f" % (float(bnd_k0[ityp])*forceConstConversionFactor*2.0)
if not ityp in typ_list:
typ_list.append(ityp)
ff_bnd = ET.SubElement(ff_bnd_frc, 'Bond')
itypeName = prm_atom_type[ibnd].upper()
jtypeName = prm_atom_type[jbnd].upper()
if itypeName > jtypeName:
(itypeName, jtypeName) = (jtypeName, itypeName)
ff_bnd.set('type1', ff_prefix+"-"+itypeName)
ff_bnd.set('type2', ff_prefix+"-"+jtypeName)
ff_bnd.set('length', r0.strip())
ff_bnd.set('k', k0.strip())
| 19,081
|
def derivative_p(α_L, α_G, ρ_G, v_L, v_G): # (1)
"""
Calculates pressure spatial derivative to be pluged into the expression for
pressure at the next spatial step (see first equation of the model). It
returns the value of pressure spatial derivative at the current time step
and, hence, takes as arguments volume fractions, velocities, and gas density
at the current spatial step.
Args:
α_L (float) - liquid phase volume fraction. Can assume any value
from 0 to 1.
α_G (float) - gaseous phase volume fraction. Can assume any value
from 0 to 1.
ρ_G (float) - gaseous phase density. Can assume any positive value.
v_L (float) - liquid phase velocity. Can assume either positive or
negative values.
v_G (float) - gaseous phase velocity. Can assume any positive value.
Returns:
float: the return value (pressure derivative at the current spatial
step). Can assume any value from negative infinity to 0.
"""
derivative_p = (-1)*(ρ_L*α_L + ρ_G*α_G) \
* ( g + (2*f/D) * (α_L*v_L + α_G*v_G)**2 ) # line continuation operator
return(derivative_p)
| 19,082
|
def write_populate_schemas(writer):
"""
Writes out a __SCHEMAS dict which contains all RecordSchemas by their full name. Used by get_schema_type
:param writer:
:return:
"""
writer.write('\n__SCHEMAS = dict((n.fullname.lstrip("."), n) for n in six.itervalues(__NAMES.names))\n')
| 19,083
|
def fit_sigmoid(colors, a=0.05):
"""Fits a sigmoid to raw contact temperature readings from the ContactPose dataset. This function is copied from that repo"""
idx = colors > 0
ci = colors[idx]
x1 = min(ci) # Find two points
y1 = a
x2 = max(ci)
y2 = 1-a
lna = np.log((1 - y1) / y1)
lnb = np.log((1 - y2) / y2)
k = (lnb - lna) / (x1 - x2)
mu = (x2*lna - x1*lnb) / (lna - lnb)
ci = np.exp(k * (ci-mu)) / (1 + np.exp(k * (ci-mu))) # Apply the sigmoid
colors[idx] = ci
return colors
| 19,084
|
def deprecated(func):
"""Decorator for reporting deprecated function calls
Use this decorator sparingly, because we'll be charged if we make too many Rollbar notifications
"""
@wraps(func)
def wrapped(*args, **kwargs):
# try to get a request, may not always succeed
request = get_current_request()
# notify a maximum of once per function per request/session
if request:
if DEPRECATED_ROLLBAR_NOTIFIED not in request.session:
deprecated_notifications = {}
request.session[DEPRECATED_ROLLBAR_NOTIFIED] = deprecated_notifications
deprecated_notifications = request.session[DEPRECATED_ROLLBAR_NOTIFIED]
key = '%s' % func
# first get it
already_notified = deprecated_notifications.get(key, False)
# then mark it
deprecated_notifications[key] = True
else:
already_notified = False
if not already_notified:
rollbar.report_message('Deprecated function call warning: %s' % func, 'warning', request)
return func(*args, **kwargs)
return wrapped
| 19,085
|
def read_file(path_file: path) -> str:
"""
Reads the content of the file at path_file
:param path_file:
:return:
"""
content = None
with open(path_file, 'r') as file:
content = file.read()
return content
| 19,086
|
def absorptionCoefficient_Voigt(Components=None,SourceTables=None,partitionFunction=PYTIPS,
Environment=None,OmegaRange=None,OmegaStep=None,OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
ParameterBindings=DefaultParameterBindings,
EnvironmentDependencyBindings=DefaultEnvironmentDependencyBindings,
GammaL='gamma_air', HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None):
"""
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
OmegaRange: wavenumber range to consider.
OmegaStep: wavenumber step to consider.
OmegaWing: absolute wing for calculating a lineshape (in cm-1)
IntensityThreshold: threshold for intensities
OmegaWingHW: relative wing for calculating a lineshape (in halfwidths)
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts significant digits in OmegaStep)
OUTPUT PARAMETERS:
Omegas: wavenumber grid with respect to parameters OmegaRange and OmegaStep
Xsect: absorption coefficient calculated on the grid
---
DESCRIPTION:
Calculate absorption coefficient using Voigt profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation
(such as OmegaRange, OmegaStep, OmegaWing, OmegaWingHW, IntensityThreshold).
The choise of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which gives a decent precision (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_Voigt(((2,1),),'co2',OmegaStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
"""
# warn user about too large omega step
if OmegaStep>0.1: warn('Too small omega step: possible accuracy decline')
# "bug" with 1-element list
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
# determine final input values
Components,SourceTables,Environment,OmegaRange,OmegaStep,OmegaWing,\
IntensityThreshold,Format = \
getDefaultValuesForXsect(Components,SourceTables,Environment,OmegaRange,
OmegaStep,OmegaWing,IntensityThreshold,Format)
# get uniform linespace for cross-section
#number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1
#Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points)
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep)
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
# reference temperature and pressure
Tref = __FloatType__(296.) # K
pref = __FloatType__(1.) # atm
# actual temperature and pressure
T = Environment['T'] # K
p = Environment['p'] # atm
# create dictionary from Components
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M,I)][ISO_INDEX['abundance']]
except KeyError:
raise Exception('cannot find component M,I = %d,%d.' % (M,I))
ABUNDANCES[(M,I)] = ni
NATURAL_ABUNDANCES[(M,I)] = ISO[(M,I)][ISO_INDEX['abundance']]
# precalculation of volume concentration
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p,T)
# SourceTables contain multiple tables
for TableName in SourceTables:
# get line centers
nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# loop through line centers (single stream)
for RowID in range(nline):
# get basic line parameters (lower level)
LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID]
#Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_air'][RowID]
#Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_self'][RowID]
Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data'][GammaL][RowID]
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][RowID]
#TempRatioPowerDB = 1.0 # for planar molecules
if LineShift:
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data']['delta_air'][RowID]
else:
Shift0DB = 0
# filter by molecule and isotopologue
if (MoleculeNumberDB,IsoNumberDB) not in ABUNDANCES: continue
# partition functions for T and Tref
# TODO: optimize
SigmaT = partitionFunction(MoleculeNumberDB,IsoNumberDB,T)
SigmaTref = partitionFunction(MoleculeNumberDB,IsoNumberDB,Tref)
# get all environment dependences from voigt parameters
# intensity
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB,T,Tref,SigmaT,SigmaTref,
LowerStateEnergyDB,LineCenterDB)
# FILTER by LineIntensity: compare it with IntencityThreshold
# TODO: apply wing narrowing instead of filtering, this would be more appropriate
if LineIntensity < IntensityThreshold: continue
# doppler broadening coefficient (GammaD)
# V1 >>>
#GammaDDB = cSqrtLn2*LineCenterDB/cc*sqrt(2*cBolts*T/molecularMass(MoleculeNumberDB,IsoNumberDB))
#GammaD = EnvironmentDependency_GammaD(GammaDDB,T,Tref)
# V2 >>>
cMassMol = 1.66053873e-27 # hapi
#cMassMol = 1.6605402e-27 # converter
m = molecularMass(MoleculeNumberDB,IsoNumberDB) * cMassMol * 1000
GammaD = sqrt(2*cBolts*T*log(2)/m/cc**2)*LineCenterDB
# lorentz broadening coefficient
Gamma0 = EnvironmentDependency_Gamma0(Gamma0DB,T,Tref,p,pref,TempRatioPowerDB)
# get final wing of the line according to Gamma0, OmegaWingHW and OmegaWing
# XXX min or max?
OmegaWingF = max(OmegaWing,OmegaWingHW*Gamma0,OmegaWingHW*GammaD)
# shift coefficient
Shift0 = Shift0DB*p/pref
# XXX other parameter (such as Delta0, Delta2, anuVC etc.) will be included in HTP version
#PROFILE_VOIGT(sg0,GamD,Gam0,sg)
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
# XXX time?
BoundIndexLower = bisect(Omegas,LineCenterDB-OmegaWingF)
BoundIndexUpper = bisect(Omegas,LineCenterDB+OmegaWingF)
lineshape_vals = PROFILE_VOIGT(LineCenterDB+Shift0,GammaD,Gamma0,Omegas[BoundIndexLower:BoundIndexUpper])[0]
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
LineIntensity * lineshape_vals
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect
| 19,087
|
def stellar_mags_scatter_cube_pair(file_pair, min_relative_flux=0.5, save=False):
"""Return the scatter in stellar colours within a star datacube pair."""
hdulist_pair = [pf.open(path, 'update') for path in file_pair]
flux = np.vstack(
[hdulist[0].data for hdulist in hdulist_pair])
noise = np.sqrt(np.vstack(
[hdulist['VARIANCE'].data for hdulist in hdulist_pair]))
wavelength = np.hstack(
[get_coords(hdulist[0].header, 3) for hdulist in hdulist_pair])
smoothed_flux = flux.copy()
smoothed_flux[~np.isfinite(smoothed_flux)] = 0.0
smoothed_flux = median_filter(smoothed_flux, (201, 1, 1))
image = np.sum(smoothed_flux, 0)
keep = (image >= (min_relative_flux * np.max(image)))
flux = flux[:, keep]
noise = noise[:, keep]
mags = []
for flux_i, noise_i in zip(flux.T, noise.T):
mags_i = measure_mags(flux_i, noise_i, wavelength)
mags.append([mags_i['g'], mags_i['r']])
mags = np.array(mags)
colour = mags[:, 0] - mags[:, 1]
scatter = np.std(colour)
if save:
for hdulist in hdulist_pair:
hdulist[0].header['COLORSTD'] = (
scatter, 'Scatter in g-r within cubes')
hdulist.flush()
for hdulist in hdulist_pair:
hdulist.close()
return scatter
| 19,088
|
def get_data_filename(relative_path):
"""Get the full path to one of the reference files shipped for testing
In the source distribution, these files are in ``examples/*/``,
but on installation, they're moved to somewhere in the user's python
site-packages directory.
Parameters
----------
relative_path : str
Name of the file to load, with respect to the yank egg folder which
is typically located at something like
``~/anaconda/lib/python3.6/site-packages/yank-*.egg/examples/``
Returns
-------
fn : str
Resource Filename
"""
fn = resource_filename('yank', relative_path)
if not os.path.exists(fn):
raise ValueError("Sorry! {} does not exist. If you just added it, you'll have to re-install".format(fn))
return fn
| 19,089
|
def is_valid_webhook_request(webhook_token: str, request_body: str, webhook_signature_header: str) -> bool:
"""This method verifies that requests to your Webhook URL are genuine and from Buycoins.
Args:
webhook_token: your webhook token
request_body: the body of the request
webhook_signature_header: the X-Webhook-Signature header from BuyCoins
Returns:
a Boolean stating whether the request is valid or not
"""
hmac_request_body = hmac.new(webhook_token.encode(), request_body.encode(), hashlib.sha1)
return hmac.compare_digest(hmac_request_body.hexdigest(), webhook_signature_header)
| 19,090
|
def logsigsoftmax(logits):
"""
Computes sigsoftmax from the paper - https://arxiv.org/pdf/1805.10829.pdf
"""
max_values = torch.max(logits, 1, keepdim=True)[0]
exp_logits_sigmoided = torch.exp(logits - max_values) * torch.sigmoid(logits)
sum_exp_logits_sigmoided = exp_logits_sigmoided.sum(1, keepdim=True)
log_probs = logits - max_values + F.logsigmoid(logits) - torch.log(sum_exp_logits_sigmoided)
return log_probs
| 19,091
|
def multi(dispatch_fn):
"""Initialise function as a multimethod"""
def _inner(*args, **kwargs):
return _inner.__multi__.get(
dispatch_fn(*args, **kwargs),
_inner.__multi_default__
)(*args, **kwargs)
_inner.__multi__ = {}
_inner.__multi_default__ = lambda *args, **kwargs: None # Default default
return _inner
| 19,092
|
def choose_key(somemap, default=0, prompt="choose", input=input, error=default_error,
lines=LINES, columns=COLUMNS):
"""Select a key from a mapping.
Returns the key selected.
"""
keytype = type(print_menu_map(somemap, lines=lines, columns=columns))
while 1:
try:
userinput = get_input(prompt, default, input)
except EOFError:
return default
if not userinput:
return default
try:
idx = keytype(userinput)
except ValueError:
error("Not a valid entry. Please try again.")
continue
if idx not in somemap:
error("Not a valid selection. Please try again.")
continue
return idx
| 19,093
|
def delete_nodes(ctx, name, node_names, force):
"""Delete node(s) in a cluster that uses native Kubernetes provider."""
try:
restore_session(ctx)
client = ctx.obj['client']
cluster = Cluster(client)
result = cluster.delete_nodes(ctx.obj['profiles'].get('vdc_in_use'),
name, node_names, force)
stdout(result, ctx)
except Exception as e:
stderr(e, ctx)
| 19,094
|
def test_validate_err():
"""An erroneous Thing Description raises error on validation."""
update_funcs = [
lambda x: x.update({"properties": [1, 2, 3]}) or x,
lambda x: x.update({"actions": "hello-interactions"}) or x,
lambda x: x.update({"events": {"overheating": {"forms": 0.5}}}) or x,
lambda x: x.update({"events": {"Invalid Name": {}}}) or x,
lambda x: x.update({"events": {100: {"label": "Invalid Name"}}}) or x
]
for update_func in update_funcs:
td_err = update_func(copy.deepcopy(TD_EXAMPLE))
with pytest.raises(InvalidDescription):
ThingDescription.validate(doc=td_err)
| 19,095
|
def set_api_key(token_name, api_key, m=None):
"""Sets an API key as an environment variable.
Args:
token_name (str): The token name.
api_key (str): The API key.
m (ipyleaflet.Map | folium.Map, optional): A Map instance.. Defaults to None.
"""
os.environ[token_name] = api_key
if m is not None:
m.api_keys[token_name] = api_key
| 19,096
|
def test_dist(ctx):
"""Test both sdist and bdist wheel and install."""
test_sdist(ctx)
test_bdist_wheel(ctx)
| 19,097
|
def test_pds4_orbnum_generated_list(self):
"""Test inclusion of ORBNUM in kernel list.
Test ORBNUM file generation with automatic plan generation, when the
plan is not provided by the user.
"""
post_setup(self)
config = "../config/maven.xml"
shutil.copy(
"../data/misc/orbnum/maven_orb_rec_210101_210401_v1.orb", "misc/orbnum/"
)
main(config, faucet=self.faucet, silent=self.silent, log=True)
| 19,098
|
def _ValidateFieldValues(field_values,
metadata):
"""Checks that the given sequence of field values is valid.
Args:
field_values: A sequence of values for a particular metric's fields.
metadata: MetricMetadata for the metric. The order of fields in the metadata
should match the field values.
Raises:
ValueError: If the number of field values doesn't match the number
of fields the metric has.
"""
# Field values are always arranged in the order in which the
# the respective fields are defined in metadata.
if len(field_values) != len(metadata.fields_defs):
raise ValueError(
"Value for metric %s has %d field values, yet the metric "
"was defined to have %d fields." %
(metadata.metric_name, len(field_values), len(metadata.field_defs)))
| 19,099
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.