content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from io import StringIO
def unparse(input_dict, output=None, encoding='utf-8', **kwargs):
"""Emit an XML document for the given `input_dict` (reverse of `parse`).
The resulting XML document is returned as a string, but if `output` (a
file-like object) is specified, it is written there instead.
Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted
as XML node attributes, whereas keys equal to `cdata_key`
(default=`'#text'`) are treated as character data.
The `pretty` parameter (default=`False`) enables pretty-printing. In this
mode, lines are terminated with `'\n'` and indented with `'\t'`, but this
can be customized with the `newl` and `indent` parameters.
"""
((key, value),) = input_dict.items()
must_return = False
if output == None:
output = StringIO()
must_return = True
content_handler = XMLGenerator(output, encoding)
content_handler.startDocument()
_emit(key, value, content_handler, **kwargs)
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
|
31cd6225144fcd1296105a66d2318c6d1a22bcca
| 3,648,900
|
import os
def create_callbacks(model, data, ARGS):
"""Create keras custom callback with checkpoint and logging"""
# Create callbacks
if not os.path.exists(ARGS.out_directory):
os.makedirs(ARGS.out_directory)
# log to Model/log.txt as specified by ARGS.out_directory
checkpoint_cb = ModelCheckpoint(filepath=ARGS.out_directory + '/weights.{epoch:03d}.hdf5',
verbose=2, save_best_only=True)
earlystopping_cb = EarlyStopping(monitor='val_loss', patience=3)
# Use builtin logger instead of LogEval
csv_cb = CSVLogger(f'{ARGS.out_directory}/log.txt', append=False, separator='\t')
# custom_callback = LogEval(f'{ARGS.out_directory}/log.txt', model, data, ARGS, interval=1, extlog=True)
callback_list = [checkpoint_cb, earlystopping_cb, csv_cb]
return callback_list
|
10719e27298d41c2a4362d4bf190b3c2b4e606d1
| 3,648,901
|
import os
import urllib
def fetch_or_use_cached(temp_dir, file_name, url):
# type: (str, str, str) -> str
"""
Check for a cached copy of the indicated file in our temp directory.
If a copy doesn't exist, download the file.
Arg:
temp_dir: Local temporary dir
file_name: Name of the file within the temp dir, not including the temp
dir path
url: Full URL from which to download the file, including remote file
name, which can be different from file_name
Returns the path of the cached file.
"""
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
cached_filename = "{}/{}".format(temp_dir, file_name)
if not os.path.exists(cached_filename):
print("Downloading {} to {}".format(url, cached_filename))
urllib.request.urlretrieve(url, cached_filename)
return cached_filename
|
29dfcf983a35c277c15f290f044a07ecee05dd0f
| 3,648,902
|
def to_bin(val):
"""
Receive int and return a string in binary. Padded by 32 bits considering 2's complement for negative values
"""
COMMON_DIGITS = 32
val_str = "{:b}".format(val) # Count '-' in negative case
padded_len = len(val_str) + ((COMMON_DIGITS - (len(val_str) % COMMON_DIGITS)) % COMMON_DIGITS)
if val < 0:
val_2_complement = val & ((1 << padded_len) - 1)
final_val_str = "{:b}".format(val_2_complement)
else:
final_val_str = "0" * (padded_len - len(val_str)) + val_str
return(final_val_str)
|
819d1c0a9d387f6ad1635f0fe0e2ab98b3ca17b0
| 3,648,903
|
def PSingle (refLamb2, lamb2, qflux, qsigma, uflux, usigma, err, nterm=2):
""" Fit RM, EVPA0 to Q, U flux measurements
Also does error analysis
Returns array of fitter parameters, errors for each and Chi Squares of fit
refLamb2 = Reference lambda^2 for fit (m^2)
lamb2 = Array of lambda^2 for fit (m^2)
qflux = Array of Q fluxes (Jy) same dim as lamb2
qsigma = Array of Q errors (Jy) same dim as lamb2
uflux = Array of U fluxes (Jy) same dim as lamb2
usigma = Array of U errors (Jy) same dim as lamb2
err = Obit error stack
nterm = Number of coefficients to fit (1 or 2)
"""
################################################################
#
nlamb2 = len(lamb2)
ret = Obit.RMFitSingle(nlamb2, nterm, refLamb2, lamb2,
qflux, qsigma, uflux, usigma, err.me)
OErr.printErr(err)
OErr.printErrMsg(err,"Fitting failed")
return ret
# end PSingle
|
29c3fd75203317265cccc804b1114b5436fd12bc
| 3,648,904
|
from typing import List
def exec_waveform_function(wf_func: str, t: np.ndarray, pulse_info: dict) -> np.ndarray:
"""
Returns the result of the pulse's waveform function.
If the wf_func is defined outside quantify-scheduler then the
wf_func is dynamically loaded and executed using
:func:`~quantify_scheduler.helpers.waveforms.exec_custom_waveform_function`.
Parameters
----------
wf_func
The custom waveform function path.
t
The linear timespace.
pulse_info
The dictionary containing pulse information.
Returns
-------
:
Returns the computed waveform.
"""
whitelist: List[str] = ["square", "ramp", "soft_square", "drag"]
fn_name: str = wf_func.split(".")[-1]
waveform: np.ndarray = []
if wf_func.startswith("quantify_scheduler.waveforms") and fn_name in whitelist:
if fn_name == "square":
waveform = waveforms.square(t=t, amp=pulse_info["amp"])
elif fn_name == "ramp":
waveform = waveforms.ramp(t=t, amp=pulse_info["amp"])
elif fn_name == "soft_square":
waveform = waveforms.soft_square(t=t, amp=pulse_info["amp"])
elif fn_name == "drag":
waveform = waveforms.drag(
t=t,
G_amp=pulse_info["G_amp"],
D_amp=pulse_info["D_amp"],
duration=pulse_info["duration"],
nr_sigma=pulse_info["nr_sigma"],
phase=pulse_info["phase"],
)
else:
waveform = exec_custom_waveform_function(wf_func, t, pulse_info)
return waveform
|
29c44de1cc94f6d63e41fccbbef5c23b67870b4d
| 3,648,905
|
def generate_straight_pipeline():
""" Simple linear pipeline """
node_scaling = PrimaryNode('scaling')
node_ridge = SecondaryNode('ridge', nodes_from=[node_scaling])
node_linear = SecondaryNode('linear', nodes_from=[node_ridge])
pipeline = Pipeline(node_linear)
return pipeline
|
2ef1d8137aeb100f6216d6a853fe22953758faf3
| 3,648,906
|
def get_socialnetwork_image_path(instance, filename):
"""
Builds a dynamic path for SocialNetwork images. This method takes an
instance an builds the path like the next pattern:
/simplesite/socialnetwork/PAGE_SLUG/slugified-path.ext
"""
return '{0}/{1}/{2}/{3}'.format(instance._meta.app_label,
str(instance._meta.model_name),
str(instance.slug),
get_slugified_file_name(filename)
)
|
b54e53f0c2a79b3b4e6d4d496d6a85264fffcef1
| 3,648,907
|
def openReadBytesFile(path: str):
"""
以只读模式打开二进制文件
:param path: 文件路径
:return: IO文件对象
"""
return openFile(path, "rb")
|
72fd2be5264a27a2c5c328cb7a8a4e818d799447
| 3,648,908
|
from datetime import datetime
def diff_time(a:datetime.time,b:datetime.time):
"""
a-b in seconds
"""
return 3600 * (a.hour -b.hour) + 60*(a.minute-b.minute) + (a.second-b.second) + (a.microsecond-b.microsecond)/1000000
|
e0557d3d3e1e9e1184d7ea7a84665813e7d32760
| 3,648,909
|
def _create_pseudo_names(tensors, prefix):
"""Creates pseudo {input | output} names for subclassed Models.
Warning: this function should only be used to define default
names for `Metics` and `SavedModel`. No other use cases should
rely on a `Model`'s input or output names.
Example with dict:
`{'a': [x1, x2], 'b': x3}` becomes:
`['a_1', 'a_2', 'b']`
Example with list:
`[x, y]` becomes:
`['output_1', 'output_2']`
Args:
tensors: `Model`'s outputs or inputs.
prefix: 'output_' for outputs, 'input_' for inputs.
Returns:
Flattened list of pseudo names.
"""
def one_index(ele):
# Start with "output_1" instead of "output_0".
if isinstance(ele, int):
return ele + 1
return ele
flat_paths = list(nest.yield_flat_paths(tensors))
flat_paths = nest.map_structure(one_index, flat_paths)
names = []
for path in flat_paths:
if not path:
name = prefix + '1' # Single output.
else:
name = '_'.join(str(p) for p in path)
if isinstance(path[0], int):
name = prefix + name
names.append(name)
return names
|
5e4ee64026e9eaa8aa70dab85d8dcf0ad0b6d89f
| 3,648,910
|
import sqlite3
def search_for_breakpoint(db_name, ids):
"""
Function will retrieve ID of last caluclated grid node to continue interrupted grid caclulation.
:param db_name: str;
:param ids: numpy.array; list of grid node ids to calculate in this batch
:return: int; grid node from which start the calculation
"""
conn = sqlite3.connect(db_name, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = lambda cursor, row: row[0]
cursor = conn.cursor()
sql = f"SELECT last_index FROM auxiliary"
last_idx = np.array(cursor.execute(sql).fetchall())
if last_idx.size == 0:
return 0
elif last_idx[0] in ids:
return np.where(last_idx[0] == ids)[0][0]
else:
raise ValueError('IDs of already calculated objects do not correspond to the generated ID. Breakpoint cannot '
'be generated.')
conn.close()
|
3354fcd505de9aefae5f0b4448e1ada7eab0a092
| 3,648,911
|
import os
def reddit_client_secret() -> str:
"""Client secret of the reddit app."""
value = os.getenv("REDDIT_CLIENT_SECRET")
if not value:
raise ValueError("REDDIT_CLIENT_SECRET environment variable not set")
return value
|
dfddbb4b7306b9638b68f3b75721471a82118a64
| 3,648,912
|
def rgetattr(obj, attr):
"""
Get named attribute from an object, i.e. getattr(obj, 'a.a') is
equivalent to ``obj.a.a''.
- obj: object
- attr: attribute name(s)
>>> class A: pass
>>> a = A()
>>> a.a = A()
>>> a.a.a = 1
>>> rgetattr(a, 'a.a')
1
>>> rgetattr(a, 'a.c')
Traceback (most recent call last):
...
AttributeError: 'A' object has no attribute 'c'
"""
attrs = attr.split(".")
obj = getattr(obj, attrs[0])
for name in attrs[1:]:
obj = getattr(obj, name)
return obj
|
5fb58634c4ba910d0a20753c04addf667614a07f
| 3,648,913
|
def lambda1_plus_lambda2(lambda1, lambda2):
"""Return the sum of the primary objects tidal deformability and the
secondary objects tidal deformability
"""
return lambda1 + lambda2
|
4ac3ef51bb66861b06b16cec564f0773c7692775
| 3,648,914
|
import os
def __create_resource_management_client():
"""
Create a ResourceManagementClient object using the subscription ID from environment variables
"""
subscription_id = os.environ.get("AZURE_SUBSCRIPTION_ID", None)
if subscription_id is None:
return None
return ResourceManagementClient(
credential=__create_service_principal_credentials(),
subscription_id=subscription_id
)
|
99ced5240542eb41ff33f4ea88c1b170e5e0a9bd
| 3,648,915
|
def create_cut_sht(stockOutline,array,features,partSpacing,margin):
""" """
numParts = len(array)
basePlanes = generate_base_planes_from_array(array)
targetPlanes = create_cut_sht_targets(stockOutline,array,margin,partSpacing)
if targetPlanes == None:
return None
else:
# converts GH branch to python list for a set of features
features = [item for item in features.Branches]
cut_sht = []
for i in range(numParts):
objects = [array[i]]
for item in features[i]:
objects.append(item)
cutPart = reorient_objects(objects,basePlanes[i],targetPlanes[i])
cut_sht.append(cutPart)
return cut_sht
|
12d8f56a7b38b06cd89d86fdbf0096f5c8d6e869
| 3,648,916
|
def unicode_is_ascii(u_string):
"""Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool
"""
assert isinstance(u_string, str)
try:
u_string.encode('ascii')
return True
except UnicodeEncodeError:
return False
|
2a742c7334d68fe0bf6b546fb79bf00a338355f9
| 3,648,917
|
def duplicate_item(api_key: str, board_id: str, item_id: str, *args, **kwargs):
"""Duplicate an item.
Parameters
api_key : `str`
The monday.com v2 API user key.
board_id : `str`
The board's unique identifier.
item_id : `str`
The item's unique identifier.
args : `tuple`
The list of item return fields.
kwargs : `dict`
Optional arguments for item.
Returns
data : `dict`
A monday.com item in dictionary form.
Return Fields
assets : `list[moncli.entities.Asset]`
The item's assets/files.
board : `moncli.entities.Board`
The board that contains this item.
column_values : `list[moncli.entities.ColumnValue]`
The item's column values.
created_at : `str`
The item's create date.
creator : `moncli.entities.User`
The item's creator.
creator_id : `str`
The item's unique identifier.
group : `moncli.entities.Group`
The group that contains this item.
id : `str`
The item's unique identifier.
name : `str`
The item's name.
state : `str`
The board's state (all / active / archived / deleted)
subscriber : `moncli.entities.User`
The pulse's subscribers.
updated_at : `str`
The item's last update date.
updates : `moncli.entities.Update`
The item's updates.
Optional Arguments
with_updates : `bool`
Duplicate with the item's updates.
"""
kwargs = {
'board_id': gql.IntValue(board_id),
'item_id': gql.IntValue(item_id)
}
return execute_query(api_key, query_name=DUPLICATE_ITEM, operation_type=gql.OperationType.MUTATION, fields=args, arguments=kwargs)
|
9e24952a2443b4bcf40d2ae5e3e9d65b8485fece
| 3,648,918
|
import re
def hdparm_secure_erase(disk_name, se_option):
"""
Secure erase using hdparm tool
:param disk_name: disk to be erased
:param se_option: secure erase option
:return: a dict includes SE command exitcode and SE message
"""
# enhance_se = ARG_LIST.e
log_file = disk_name.split("/")[-1] + ".log" # log file for sdx will be sdx.log
log = open(log_file, "a+")
if se_option:
hdparm_option = "--" + se_option
else:
hdparm_option = "--security-erase" # Default is security erase
# Hdparm SE Step1: check disk status
#
# Secure Erase supported output example
# Security:
# Master password revision code = 65534
# supported
# not enabled
# not locked
# not frozen
# not expired: security count
# supported: enhanced erase
# 2min for SECURITY ERASE UNIT. 2min for ENHANCED SECURITY ERASE UNIT.
# Checksum: correct
#
# except for "supported" and "enabled", other items should have "not" before them
if hdparm_option == "--security-erase":
pattern_se_support = re.compile(r'[\s\S]*(?!not)[\s]*supported'
r'[\s]*[\s\S]*enabled[\s]*not[\s]'
r'*locked[\s]*not[\s]*frozen[\s]*not[\s]*expired[\s\S]*')
else:
pattern_se_support = re.compile(r'[\s\S]*(?!not)[\s]*supported[\s]*[\s\S]*enabled[\s]*not'
r'[\s]*locked[\s]*not[\s]*frozen[\s]*not[\s]*expired[\s\S]*'
r'supported: enhanced erase[\s\S]*')
hdparm_check_drive_status(pattern_se_support, disk_name, log)
# TODO: add section to unlocked a disk
# Hdparm SE Step2: set password
command = ["hdparm", "--verbose", "--user-master", "u",
"--security-set-pass", SE_PASSWORD, disk_name]
assert robust_check_call(command, log)["exit_code"] == 0, \
"Failed to set password for disk " + disk_name
# Hdparm SE Step3: confirm disk is ready for secure erase
# both "supported" and "enabled" should have no "not" before them
# other items should still have "not" before them
pattern_se_enabled = re.compile(r'[\s\S]*(?!not)[\s]*supported[\s]*(?!not)[\s]*enabled[\s]*not'
r'[\s]*locked[\s]*not[\s]*frozen[\s]*not[\s]*expired[\s\S]*')
hdparm_check_drive_status(pattern_se_enabled, disk_name, log)
log.close()
# Hdparm SE step4: run secure erase command
command = ["hdparm", "--verbose", "--user-master", "u", hdparm_option, SE_PASSWORD, disk_name]
return secure_erase_base(disk_name, command)
|
7232abd8caaa2cbf52ebf2e7e852c81475ec2ca2
| 3,648,919
|
def compute_loss(retriever_logits, retriever_correct, reader_logits,
reader_correct):
"""Compute loss."""
# []
retriever_loss = marginal_log_loss(retriever_logits, retriever_correct)
# []
reader_loss = marginal_log_loss(
tf.reshape(reader_logits, [-1]), tf.reshape(reader_correct, [-1]))
# []
any_retrieved_correct = tf.reduce_any(retriever_correct)
any_reader_correct = tf.reduce_any(reader_correct)
retriever_loss *= tf.cast(any_retrieved_correct, tf.float32)
reader_loss *= tf.cast(any_reader_correct, tf.float32)
loss = retriever_loss + reader_loss
tf.summary.scalar("num_read_correct",
tf.reduce_sum(tf.cast(reader_correct, tf.int32)))
tf.summary.scalar("reader_loss", tf.reduce_mean(reader_loss))
tf.summary.scalar("retrieval_loss", tf.reduce_mean(retriever_loss))
# []
loss = tf.reduce_mean(loss)
return loss
|
2576191d23a303e9d045cb7c8bbeccbd49b22b43
| 3,648,920
|
import os
def is_using_git():
"""True if git checkout is used."""
return os.path.exists(os.path.join(REPO_ROOT, '.git', 'objects'))
|
23df0df4a97c52f8576ba5fc1437695601c94cc2
| 3,648,921
|
def index() -> render_template:
"""
The main part of the code that is ran when the user visits the address.
Parameters:
covid_data: This is a dictionary of the data returned from the API request.
local_last7days_cases: The number of local cases in the last 7 days.
national_last7days_cases: The number of national cases in the last 7 days.
current_hospital_cases: The number of current hospital cases.
total_deaths: The number of total deaths in The UK.
news: A list of all the news.
update_name: The name of the scheduled update.
update_interval: The time the event will take place.
repeat: Whether the update will repeat.
updating_covid: Whether the update will update the covid data.
updating_news: Whether the update will update the news.
news_to_delete: The title of the news that is to be deleted.
update_to_delete: The title of the update that is to be deleted.
Returns:
A rendered template with the data.
"""
s.run(blocking=False) # stops the scheduler from blocking the server from running
covid_data = covid_API_request()
(local_last7days_cases,
national_last7days_cases,
current_hospital_cases,
total_deaths) = process_covid_data(covid_data)
news = update_news()
update_name = request.args.get("two")
if update_name: # checks if an update has been scheduled
update_interval = request.args.get("update")
repeat = request.args.get("repeat")
updating_covid = request.args.get("covid-data")
updating_news = request.args.get("news")
schedule_covid_updates(update_interval, update_name, repeat, updating_covid, updating_news)
if request.args.get("notif"): # checks if news has been deleted
news_to_delete = request.args.get("notif")
delete_news(news_to_delete)
if request.args.get("update_item"): # checks if an update has been deleted
update_to_delete = request.args.get("update_item")
delete_update(update_to_delete, True)
return render_template('index.html',
title=(title),
news_articles=news,
updates=update,
location=(city),
local_7day_infections=(local_last7days_cases),
nation_location=("United Kingdom"),
national_7day_infections=(national_last7days_cases),
hospital_cases=(f"Hospital Cases: {current_hospital_cases}"),
deaths_total=(f"Total Deaths: {total_deaths}"))
|
d9357f29c9329c901e8389497435ead319841242
| 3,648,922
|
def r(x):
"""
Cartesian radius of a point 'x' in 3D space
Parameters
----------
x : (3,) array_like
1D vector containing the (x, y, z) coordinates of a point.
Returns
-------
r : float
Radius of point 'x' relative to origin of coordinate system
"""
return np.sqrt((x[0]**2) + (x[1]**2) + (x[2]**2))
|
3729f91a6671c17bc9fda7eebb9809d316a0d714
| 3,648,923
|
def solve(*args):
"""
Crunch the numbers; solve the problem.
solve(IM A, IM b) -> IM
solve(DM A, DM b) -> DM
solve(SX A, SX b) -> SX
solve(MX A, MX b) -> MX
solve(IM A, IM b, str lsolver, dict opts) -> IM
solve(DM A, DM b, str lsolver, dict opts) -> DM
solve(SX A, SX b, str lsolver, dict opts) -> SX
solve(MX A, MX b, str lsolver, dict opts) -> MX
"""
return _casadi.solve(*args)
|
8866fba2efa51e7117d1d39fd7d2b7a259209c66
| 3,648,924
|
def NonNegativeInteger(num):
"""
Ensures that the number is non negative
"""
if num < 0:
raise SmiNetValidationError("A non-negative integer is required")
return num
|
dc5241e8dd7dbd07c5887c35a790ec4eab2593f0
| 3,648,925
|
def to_cartesian(r, ang):
"""Returns the cartesian coordinates of a polar point."""
x = r * np.cos(ang)
y = r * np.sin(ang)
return x, y
|
bc4e2e21c42b31a7a45185e58fb20a7b4a4b52e4
| 3,648,926
|
def _get_filtered_partially_learnt_topic_summaries(
topic_summaries, topic_ids):
"""Returns a list of summaries of the partially learnt topic ids and the ids
of topics that are no longer present.
Args:
topic_summaries: list(TopicSummary). The list of topic
summary domain objects to be filtered.
topic_ids: list(str). The ids of the topic corresponding to
the topic summary domain objects.
Returns:
tuple. A 2-tuple whose elements are as follows:
- list(TopicSummary). A filtered list with the summary domain
objects of the partially_learnt topics.
- list(str). The ids of the topics that are no longer present.
"""
nonexistent_partially_learnt_topic_ids = []
filtered_partially_learnt_topic_summaries = []
topic_rights = topic_fetchers.get_multi_topic_rights(topic_ids)
for index, topic_summary in enumerate(topic_summaries):
if topic_summary is None:
nonexistent_partially_learnt_topic_ids.append(topic_ids[index])
else:
topic_id = topic_summary.id
if not topic_rights[index].topic_is_published:
nonexistent_partially_learnt_topic_ids.append(topic_id)
else:
filtered_partially_learnt_topic_summaries.append(topic_summary)
return (
filtered_partially_learnt_topic_summaries,
nonexistent_partially_learnt_topic_ids)
|
c977966381dea0b1b91e904c3f9ec4823d26b006
| 3,648,927
|
def build_bar_chart_with_two_bars_per_label(series1, series2, series1_label, series2_label, series1_labels,
series2_labels,
title, x_axis_label, y_axis_label, output_file_name):
"""
This function builds a bar chart that has two bars per label.
:param series1: a list of values containing the data for the first series
:param series2: a list of values containing the data for the second series
:param series1_label: a label to be shown in the legend for the first series
:param series2_label: a label to be shown in the legend for the second series
:param series1_labels: a list of labels for the first series
:param series2_labels: a list of labels for the second series
:param title: string value of the title of the bar chart
:param x_axis_label: the label to show on the x axis
:param y_axis_label: the label to show on the y axis
:param output_file_name: the name and path of the file where the figure is to be exported to
:return: string path of the image that has been saved of the figure
"""
index_series1 = np.arange(len(series1_labels))
index_series2 = np.arange(len(series2_labels))
fig, ax = plt.subplots()
ax.bar(x=index_series1 - 0.4, height=series1, width=0.4, bottom=0, align='center', label=series1_label)
ax.bar(x=index_series2, height=series2, width=0.4, bottom=0, align='center', label=series2_label)
ax.set_xlabel(x_axis_label, fontsize=10)
ax.set_ylabel(y_axis_label, fontsize=10)
ax.set_xticks(index_series1)
ax.set_xticklabels(series1_labels, fontsize=10, rotation=30)
ax.set_title(title)
ax.legend(loc='upper right', frameon=True)
plt.show()
# fig.savefig(output_file_name, dpi=300, bbox_inches='tight')
# return '../{}'.format(output_file_name)
return "{}".format(write_to_image_file(fig, output_file_name, False, 300))
|
f43c4e525f0a2dd07b815883753974e1aa2e08cf
| 3,648,928
|
def calculateDescent():
"""
Calculate descent timestep
"""
global descentTime
global tod
descentTime = myEndTime
line = len(originalTrajectory)
for segment in reversed(originalTrajectory):
flInit = int(segment[SEGMENT_LEVEL_INIT])
flEnd = int(segment[SEGMENT_LEVEL_END])
status = segment[STATUS]
if flInit == flEnd and status == '2':
stop=True
for i in range(1,4):
flInitAux = int(originalTrajectory[line-i][SEGMENT_LEVEL_INIT])
flEndAux = int(originalTrajectory[line-i][SEGMENT_LEVEL_END])
statAux = originalTrajectory[line-i][STATUS]
if flInitAux == flEndAux and statAux == '2': pass
else: stop = False; break
if stop: break
else: descentTime-= TIME_STEP
line-=1
tod = {}
tod['LAT'] = originalTrajectory[line][SEGMENT_LAT_INIT]
tod['LON'] = originalTrajectory[line][SEGMENT_LON_INIT]
tod['ALT'] = originalTrajectory[line][SEGMENT_LEVEL_INIT]
logger(myLogFile,rankMsg,LOG_STD,'Descending starts at time '+str(descentTime)+' [s]')
return line
|
11c04e49ef63f7f51cb874bf19cd245c40f0f6f4
| 3,648,929
|
def update_tutorial(request,pk):
"""View function for updating tutorial """
tutorial = get_object_or_404(Tutorial, pk=pk)
form = TutorialForm(request.POST or None, request.FILES or None, instance=tutorial)
if form.is_valid():
form.save()
messages.success(request=request, message="Congratulations! Tutorial has been updated.")
return redirect(to="dashboard")
context={
"form":form,
}
return render(request=request, context=context, template_name="dashboard/dashboard_addtutorialseries.html")
|
06fe827f26537fe376e79bc95d2ab04f879b971a
| 3,648,930
|
def get_transform_dest_array(output_size):
"""
Returns a destination array of the desired size. This is also used to define the
order of points necessary for cv2.getPerspectiveTransform: the order can change, but
it must remain consistent between these two arrays.
:param output_size: The size to make the output image ((width, height) tuple)
:return: The destination array, suitable to feed into cv2.getPerspectiveTransform
"""
bottom_right = [output_size[0] - 1, output_size[1] - 1]
bottom_left = [0, output_size[1] - 1]
top_left = [0, 0]
top_right = [output_size[0] - 1, 0]
return np.array(
[bottom_right, bottom_left, top_left, top_right],
dtype="float32")
|
84f092b5f263f3dd65ea9dfb18890454666e982d
| 3,648,931
|
def fetch(url):
"""
引数urlで与えられたURLのWebページを取得する。
WebページのエンコーディングはContent-Typeヘッダーから取得する。
戻り値:str型のHTML
"""
f = urlopen(url)
# HTTPヘッダーからエンコーディングを取得する(明示されていない場合はutf-8とする)。
encoding = f.info().get_content_charset(failobj="utf-8")
html = f.read().decode(encoding) # 得られたエンコーディングを指定して文字列にデコードする。
return html
|
31b69019f35e983a7a6c9d60b4367502b6540c56
| 3,648,932
|
import grp
import os
import subprocess
def _is_industrial_user():
"""Checking if industrial user is trying to use relion_it.."""
if not grp:
# We're not on a linux/unix system, therefore not at Diamond
return False
not_allowed = ["m10_valid_users", "m10_staff", "m08_valid_users", "m08_staff"]
uid = os.getegid()
fedid = grp.getgrgid(uid)[0]
groups = str(subprocess.check_output(["groups", str(fedid)]))
return any(group in groups for group in not_allowed)
|
8b462431a96b25b7fc9a456807bfcd087a799651
| 3,648,933
|
def get_rounded_coordinates(point):
"""Helper to round coordinates for use in permalinks"""
return str(round(point.x, COORDINATE_ROUND)) + '%2C' + str(round(point.y, COORDINATE_ROUND))
|
a707864e4b62a91e609b3674bdfe0de7fdddf154
| 3,648,934
|
def rgb_to_hls(image: np.ndarray, eps: float = 1e-8) -> np.ndarray:
"""Convert a RGB image to HLS. Image data is assumed to be in the range
of [0.0, 1.0].
Args:
image (np.ndarray[B, 3, H, W]):
RGB image to be converted to HLS.
eps (float):
Epsilon value to avoid div by zero.
Returns:
hls (np.ndarray[B, 3, H, W]):
HLS version of the image.
"""
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
|
841379110bd273a7a6239e3598656c46acbde583
| 3,648,935
|
def array_max_dynamic_range(arr):
"""
Returns an array scaled to a minimum value of 0 and a maximum value of 1.
"""
finite_arr = arr[np.isfinite(arr)]
low = np.nanmin(finite_arr)
high = np.nanmax(finite_arr)
return (arr - low)/(high - low)
|
b2182c43dea2981b3759119cf1381a82a9e168b1
| 3,648,936
|
def production(*args):
"""Creates a production rule or list of rules from the input.
Supports two kinds of input:
A parsed string of form "S->ABC" where S is a single character, and
ABC is a string of characters. S is the input symbol, ABC is the output
symbols.
Neither S nor ABC can be any of the characters "-", ">" for obvious
reasons.
A tuple of type (S, Seq, ...) where S is the symbol of some hashable
type and seq is an finite iterable representing the output symbols.
Naturally if you don't want to use characters/strings to represent
symbols then you'll typically need to use the second form.
You can pass multiple inputs to generate multiple production rules,
in that case the result is a list of rules, not a single rule.
If you pass multiple inputs the symbol must differ since a simple
L-System only supports one production rule per symbol.
Example:
>>> production("F->Ab[]")
('F', ['A', 'b', '[', ']'])
>>> production("F->Ab[]", ("P", "bAz"), (1, (0,1)))
[('F', ['A', 'b', '[', ']']), ('P', ['b', 'A', 'z']), (1, [0, 1])]
"""
if len(args) < 1:
raise ValueError("missing arguments")
res = []
for a in args:
if issubclass(str, type(a)):
parts = a.split(sep="->", maxsplit=1)
if len(parts) < 2:
raise ValueError("couldn't parse invalid string \"{}\"".format(a))
res.append((parts[0], list(parts[1])))
elif issubclass(tuple, type(a)):
s, to, *vals = a
res.append((s, list(to)))
else:
raise TypeError("sorry don't know what to do with " + str(type(a)))
if len(res) == 1:
return res[0]
return res
|
bcb3e415a283f654ab65e0656a3c7e3912eeb53b
| 3,648,937
|
def _unpack_compute(input_place, num, axis):
"""Unpack a tensor into `num` tensors along axis dimension."""
input_shape = get_shape(input_place)
for index, _ in enumerate(input_shape):
input_shape[index] = input_shape[index] if index != axis else 1
output_shape_list = [input_shape for i in range(num)]
offset = 0
out_tensor_list = []
for i, t_shape in enumerate(output_shape_list):
out_tensor = tvm.compute(
t_shape,
lambda *index, t_shape=t_shape:
input_place(*_index_offset(t_shape, axis, offset, *index)),
name='tensor' + str(i))
out_tensor_list.append(out_tensor)
offset = offset + 1
return tuple(out_tensor_list)
|
89972e932d3c0bf3b5cbc548633dd2b2a6173c85
| 3,648,938
|
def flatten(items):
"""Convert a sequence of sequences to a single flat sequence.
Works on dictionaries, tuples, lists.
"""
result = []
for item in items:
if isinstance(item, list):
result += flatten(item)
else:
result.append(item)
return result
|
d44e3391f791dfd2ec9b323c37c510a415bb23bf
| 3,648,939
|
from typing import Dict
def _datum_to_cap(datum: Dict) -> float:
"""Cap value of a datum."""
return _cap_str_to_mln_float(datum["cap"])
|
4554cb021f077e3b69495a6266a2596a968ee79d
| 3,648,940
|
def add_eval_to_game(game: chess.pgn.Game, engine: chess.engine.SimpleEngine, analysis_time: float,
should_re_add_analysis: bool = False) -> chess.pgn.Game:
"""
MODIFIES "game" IN PLACE
"""
current_move = game
while len(current_move.variations):
if "eval" in current_move.comment and not should_re_add_analysis:
continue
score, actual_eval = get_score(current_move.board(), engine, analysis_time=analysis_time)
current_move.comment += f'[%eval {score}]'
if current_move.eval().pov(chess.WHITE) != actual_eval:
# assert not rounding error
assert abs(current_move.eval().pov(chess.WHITE).score() - actual_eval.score()) == 1, \
f"eval's not equal, not rounding error: {current_move.eval().pov(chess.WHITE)} != {actual_eval}"
current_move = current_move.variations[0]
return game
|
72c677fc9f71cafca6af5b86cca69d896547835d
| 3,648,941
|
def MC_no(a,b,N,pi,mp):
""" Monte Carlo simulation drawn from beta distribution for the uninsured agents
Args:
N (integer): number of draws
a (integer): parameter
b (integer): parameter
Returns:
(numpy float): Monte Carlo integration that computes expected utility for given gamma and premium
"""
x = np.random.beta(a,b,N)
return np.mean(utility(mp['y']-x,mp))
|
7910a1894839eaac89af9df61a3f64fbb1eaf933
| 3,648,942
|
def get_conflicting_types(type, tyepdef_dict):
"""Finds typedefs defined in the same class that conflict. General algo
is: Find a type definition that is identical to type but for a
different key. If the type definitions is coming from a different
class, neglect it. This is a pretty slow function for large dictionaries."""
conflicting_types = []
if type in typedef_dict:
typedef = typedef_dict[type] # Look for an identical typedef mapped under a different key.
for key, value in typedef_dict.items():
if((typedef == value) and (type != key) and (type.rpartition("::")[0] == key.rpartition("::")[0])):
conflicting_types.append(key)
return conflicting_types
|
5270ddfbf8a1f887de7ea9fcf2dcd32511ce6a32
| 3,648,943
|
from typing import Tuple
def extract_entity_type_and_name_from_uri(uri: str) -> Tuple[str, str]:
"""
从entity uri中提取出其type和name
:param uri: 如 http://www.kg.com/kg/ontoligies/ifa#Firm/百度
:return: ('Firm', '百度')
"""
name_separator = uri.rfind('/')
type_separator = uri.rfind('#')
return uri[type_separator + 1: name_separator], uri[name_separator + 1:]
|
a70b1fdb5490f029cc6a88bee53eee048731a709
| 3,648,944
|
import os
import typing
def resolve_raw_resource_description(
raw_rd: GenericRawRD, root_path: os.PathLike, nodes_module: typing.Any
) -> GenericResolvedNode:
"""resolve all uris and sources"""
rd = UriNodeTransformer(root_path=root_path).transform(raw_rd)
rd = SourceNodeTransformer().transform(rd)
rd = RawNodeTypeTransformer(nodes_module).transform(rd)
return rd
|
63d9643a2b957d43155912365eed91616802cd8f
| 3,648,945
|
def ret_dict() -> dict:
"""
Returns
-------
"""
# blahs
return {}
|
79a29f69f5d0389d266f917500d25d696415c25a
| 3,648,946
|
def load_rokdoc_well_markers(infile):
"""
Function to load well markers exported from RokDoc in ASCII format.
"""
with open(infile, 'r') as fd:
buf = fd.readlines()
marker = []
well = []
md = []
tvdkb = []
twt = []
tvdss = []
x = []
y = []
for line in buf[5:]:
c1, c2, c3, c4, c5 = line.split("'")
c6, c7, c8, c9, c10, c11 = c5.strip().split()
marker.append(c2)
well.append(c4)
md.append(float(c6))
tvdkb.append(float(c7))
twt.append(float(c8))
tvdss.append(float(c9))
x.append(float(c10))
y.append(float(c11))
markers = {}
for each in list(set(well)):
markers[each] = {}
for i in range(len(marker)):
cur_well = well[i]
cur_marker = marker[i]
cur_md = md[i]
cur_tvdkb = tvdkb[i]
cur_tvdss = tvdss[i]
cur_twt = twt[i]
cur_x = x[i]
cur_y = y[i]
markers[cur_well][cur_marker] = {'md': cur_md, 'tvdkb': cur_tvdkb,
'tvdss': cur_tvdss, 'twt': cur_twt,
'x': cur_x, 'y': cur_y}
return markers
|
f3a781accdd84ff2f5aff12e59aeff05aa428d6a
| 3,648,947
|
def get_fees():
"""
Returns all information related to fees configured for the institution.
:returns: String containing xml or an lxml element.
"""
return get_anonymous('getFees')
|
17d16c65d8aefa5989f0c371ed2db2527691ccf9
| 3,648,948
|
import torch
from typing import Tuple
def resample_uv_to_bbox(
predictor_output: DensePoseChartPredictorOutput,
labels: torch.Tensor,
box_xywh_abs: Tuple[int, int, int, int],
) -> torch.Tensor:
"""
Resamples U and V coordinate estimates for the given bounding box
Args:
predictor_output (DensePoseChartPredictorOutput): DensePose predictor
output to be resampled
labels (tensor [H, W] of uint8): labels obtained by resampling segmentation
outputs for the given bounding box
box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
Return:
Resampled U and V coordinates - a tensor [2, H, W] of float
"""
x, y, w, h = box_xywh_abs
w = max(int(w), 1)
h = max(int(h), 1)
u_bbox = F.interpolate(predictor_output.u, (h, w), mode="bilinear", align_corners=False)
v_bbox = F.interpolate(predictor_output.v, (h, w), mode="bilinear", align_corners=False)
uv = torch.zeros([2, h, w], dtype=torch.float32, device=predictor_output.u.device)
for part_id in range(1, u_bbox.size(1)):
uv[0][labels == part_id] = u_bbox[0, part_id][labels == part_id]
uv[1][labels == part_id] = v_bbox[0, part_id][labels == part_id]
return uv
|
655fa330a0fb68d0a6f94084b1a4fde2e1368792
| 3,648,949
|
def run(cmd, capture_output=True):
"""
Run command locally with current user privileges
:returns: command output on success
:raises: LocalExecutionFailed if command failed"""
try:
LOG.debug("Running '%s' locally", cmd)
return api.local(cmd, capture=capture_output)
except (SystemExit, env.abort_exception) as e:
LOG.debug("Command '%s' failed with '%s'", cmd, e.message)
raise LocalExecutionFailed(e.message, e.code)
|
6022961e2be94527a9fe6a0f3fe8b2418b263c78
| 3,648,950
|
from typing import List
def get_error_code(output: int,
program: List[int]
) -> int:
"""
Determine what pair of inputs, "noun" and "verb", produces the output.
The inputs should be provided to the program by replacing the values
at addresses 1 and 2. The value placed in address 1 is called the "noun",
and the value placed in address 2 is called the "verb".
It returns the error code: 100 * noun + verb
Implementation options:
- By brute force, looping twice over 0-99
- Looping over the noun linearly, and using binary search for the verb,
since all the values of the program are integers, and therefore
positive (IMPLEMENTED)
- Optimize the possible value intervals for both noun and verb checking
the possible min and max outputs for each pair
"""
# Reset the memory
program = program.copy()
# Linear loop over the noun
for noun in range(0, 100):
program[1] = noun
# Binary search over the verb
verb = binary_search_code(program, output)
# Return the code if found
if verb != -1:
return (100 * noun + verb)
raise ValueError('Code not found!')
|
a3ff93557217197c3988b1f2ffde0a114ec6de81
| 3,648,951
|
def page(page_id):
"""Gets one page from the database."""
page = Page.objects.get(id=page_id)
return render_template('page.html', page=page)
|
011d0d96564e328674c9e919eed3647c41ebb0a4
| 3,648,952
|
def compute_Csigma_from_alphaandC(TT,minT,alphaT,CT,ibrav=4):
"""
This function calculate the difference between the constant stress heat capacity
:math:`C_{\sigma}` and the constant strain heat capacity :math:`C_{\epsilon}`
from the *V* (obtained from the input lattice parameters *minT*, the thermal
expansion tensor *alphaT* and the elastic constant tensor *CT*, all as a function
of temperature. This is essentially the anisotropic equivalent of the equation
:math:`Cp - Cv = T V beta^2 B0` for the isotropic case (volume only)
and it avoids a further numerical derivation to obtain :math:`C_{\sigma}`.
It is however more complex in the anisotropic case since *minT*, *alphaT* and
in particul the elastic constant tensor *CT* must me known in principle
including their temperature dependence.
.. Warning::
Still very experimental...
"""
CT = CT / RY_KBAR
Csigma = np.zeros(len(TT))
for i in range(1,len(TT)):
V = compute_volume(minT[i],ibrav)
for l in range(0,6):
for m in range(0,6):
temp = alphaT[i,l] * CT[l,m] * alphaT[i,m]
Csigma[i] = V * TT[i] * temp # this is C_sigma-C_epsilon at a given T
return Csigma
|
66159810aeeadd4614f66b6c7bc43a11a5ebf28d
| 3,648,953
|
def services(request):
"""
"""
context = {}
services = Service.objects.filter(active=True, hidden=False)
context["services"] = services
context["services_nav"] = True
return render(request, "services.html", context)
|
45792f2032236a74f8edd2141bf10a2dd7b6c075
| 3,648,954
|
def _create_presigned_url(method, object_name, duration_in_seconds=600):
"""
Create presigned S3 URL
"""
s3_client = boto3.client('s3',
endpoint_url=CONFIG.get('s3', 'url'),
aws_access_key_id=CONFIG.get('s3', 'access_key_id'),
aws_secret_access_key=CONFIG.get('s3', 'secret_access_key'))
if method == 'get':
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket':CONFIG.get('s3', 'bucket'), 'Key': object_name},
ExpiresIn=duration_in_seconds)
except Exception:
logger.critical('Unable to generate presigned url for get')
return None
else:
try:
response = s3_client.generate_presigned_url('put_object',
Params={'Bucket':CONFIG.get('s3', 'bucket'), 'Key':object_name},
ExpiresIn=duration_in_seconds,
HttpMethod='PUT')
except Exception:
logger.critical('Unable to generate presigned url for put')
return None
return response
|
f285d90058d3f6d450e82917d883f97100b78889
| 3,648,955
|
def read_data(model_parameters, ARGS):
"""Read the data from provided paths and assign it into lists"""
data = pd.read_pickle(ARGS.path_data)
y = pd.read_pickle(ARGS.path_target)['target'].values
data_output = [data['codes'].values]
if model_parameters.numeric_size:
data_output.append(data['numerics'].values)
if model_parameters.use_time:
data_output.append(data['to_event'].values)
return (data_output, y)
|
66f89d87b22579f3d06a1d8f0faf0db4bc0fd13d
| 3,648,956
|
def _is_src(file):
""" Returns true if the file is a source file
Bazel allows for headers in the srcs attributes, we need to filter them out.
Args:
file (File): The file to check.
"""
if file.extension in ["c", "cc", "cpp", "cxx", "C", "c++", "C++"] and \
file.is_source:
return True
return False
|
b0466073d4d1b05c5cab37946fb6ca8432dc752d
| 3,648,957
|
def constructResponseObject(responsePassed):
"""
constructs an Error response object, even if the
"""
if (not (responsePassed is None)):
temp_resp = Response()
temp_resp.status_code = responsePassed.status_code or 404
if((temp_resp.status_code >= 200) and (temp_resp.status_code < 300)):
temp_resp.status_code = 404
temp_resp.reason = 'Bad Request'
details = 'UnexpectedError'
temp_resp.headers = {'Content-Type': 'text/html', 'Warning': details}
else:
temp_resp.reason = responsePassed.reason or 'Bad Request'
details = responsePassed.content or 'UnexpectedError'
temp_resp.headers = {'Content-Type': 'text/html', 'WWW-Authenticate': details}
else:
temp_resp = Response()
temp_resp.reason = 'Bad Request'
temp_resp.status_code = 404
details = 'UnexpectedError'
temp_resp.headers = {'Content-Type': 'text/html', 'WWW-Authenticate': details}
return temp_resp
|
e5f5aa0f87db30598e85fe66e8bf3062eac4388c
| 3,648,958
|
def calculate_signal_strength(rssi):
# type: (int) -> int
"""Calculate the signal strength of access point."""
signal_strength = 0
if rssi >= -50:
signal_strength = 100
else:
signal_strength = 2 * (rssi + 100)
return signal_strength
|
d5a0955446e0fe0548639ddd1a849f7e7901c36b
| 3,648,959
|
from datetime import datetime
async def verify_email(token: str, auth: AuthJWT = Depends()):
"""Verify the user's email with the supplied token"""
# Manually assign the token value
auth._token = token # pylint: disable=protected-access
user = await User.by_email(auth.get_jwt_subject())
if user.email_confirmed_at is not None:
raise HTTPException(400, "Email is already verified")
if user.disabled:
raise HTTPException(400, "Your account is disabled")
user.email_confirmed_at = datetime.now(tz=timezone.utc)
await user.save()
return Response(status_code=200)
|
4e6eebd22b6206fa20f9c03230c09b470c414740
| 3,648,960
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a National Weather Service entry."""
hass_data = hass.data.setdefault(DOMAIN, {})
station = entry.data[CONF_STATION]
radar = Nexrad(station)
radar_update = Debouncer(
hass, _LOGGER, cooldown=60, immediate=True, function=radar.update
)
await radar_update.async_call()
_LOGGER.debug("layers: %s", radar.layers)
if radar.layers is None:
raise ConfigEntryNotReady
hass_data[entry.entry_id] = {"radar": radar, "radar_update": radar_update}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
|
eb05efea751b2dc739157e394a7c892788a3bf3f
| 3,648,961
|
def lookAtThisMethod(
first_parameter,
second_paramter=None,
third_parameter=32,
fourth_parameter="a short string as default argument",
**kwargs
):
"""The point of this is see how it reformats parameters
It might be fun to see what goes on
Here I guess it should respect this spacing, since we are in a comment.
We are done!
"""
return kwargs["whatever"](
first_parameter * third_parameter,
second_paramter,
fourth_parameter,
"extra string because I want to",
)
|
8dab028b40184bb7cf686c524d5abd452cee2bc3
| 3,648,962
|
from typing import Sequence
from typing import Callable
from typing import List
from typing import Set
def data_incremental_benchmark(
benchmark_instance: GenericCLScenario,
experience_size: int,
shuffle: bool = False,
drop_last: bool = False,
split_streams: Sequence[str] = ("train",),
custom_split_strategy: Callable[
[ClassificationExperience], Sequence[AvalancheDataset]
] = None,
experience_factory: Callable[
[ClassificationStream, int], ClassificationExperience
] = None,
):
"""
High-level benchmark generator for a Data Incremental setup.
This generator accepts an existing benchmark instance and returns a version
of it in which experiences have been split in order to produce a
Data Incremental stream.
In its base form this generator will split train experiences in experiences
of a fixed, configurable, size. The split can be also performed on other
streams (like the test one) if needed.
The `custom_split_strategy` parameter can be used if a more specific
splitting is required.
Beware that experience splitting is NOT executed in a lazy way. This
means that the splitting process takes place immediately. Consider
optimizing the split process for speed when using a custom splitting
strategy.
Please note that each mini-experience will have a task labels field
equal to the one of the originating experience.
The `complete_test_set_only` field of the resulting benchmark instance
will be `True` only if the same field of original benchmark instance is
`True` and if the resulting test stream contains exactly one experience.
:param benchmark_instance: The benchmark to split.
:param experience_size: The size of the experience, as an int. Ignored
if `custom_split_strategy` is used.
:param shuffle: If True, experiences will be split by first shuffling
instances in each experience. This will use the default PyTorch
random number generator at its current state. Defaults to False.
Ignored if `custom_split_strategy` is used.
:param drop_last: If True, if the last experience doesn't contain
`experience_size` instances, then the last experience will be dropped.
Defaults to False. Ignored if `custom_split_strategy` is used.
:param split_streams: The list of streams to split. By default only the
"train" stream will be split.
:param custom_split_strategy: A function that implements a custom splitting
strategy. The function must accept an experience and return a list
of datasets each describing an experience. Defaults to None, which means
that the standard splitting strategy will be used (which creates
experiences of size `experience_size`).
A good starting to understand the mechanism is to look at the
implementation of the standard splitting function
:func:`fixed_size_experience_split_strategy`.
:param experience_factory: The experience factory.
Defaults to :class:`GenericExperience`.
:return: The Data Incremental benchmark instance.
"""
split_strategy = custom_split_strategy
if split_strategy is None:
split_strategy = partial(
fixed_size_experience_split_strategy,
experience_size,
shuffle,
drop_last,
)
stream_definitions: TStreamsUserDict = dict(
benchmark_instance.stream_definitions
)
for stream_name in split_streams:
if stream_name not in stream_definitions:
raise ValueError(
f"Stream {stream_name} could not be found in the "
f"benchmark instance"
)
stream = getattr(benchmark_instance, f"{stream_name}_stream")
split_datasets: List[AvalancheDataset] = []
split_task_labels: List[Set[int]] = []
exp: ClassificationExperience
for exp in stream:
experiences = split_strategy(exp)
split_datasets += experiences
for _ in range(len(experiences)):
split_task_labels.append(set(exp.task_labels))
stream_def = StreamUserDef(
split_datasets,
split_task_labels,
stream_definitions[stream_name].origin_dataset,
False,
)
stream_definitions[stream_name] = stream_def
complete_test_set_only = (
benchmark_instance.complete_test_set_only
and len(stream_definitions["test"].exps_data) == 1
)
return GenericCLScenario(
stream_definitions=stream_definitions,
complete_test_set_only=complete_test_set_only,
experience_factory=experience_factory,
)
|
e24756245b3d6b5126d32fb66541e4cd23a993c2
| 3,648,963
|
import typing
def generate_doc_from_endpoints(
routes: typing.List[tornado.web.URLSpec],
*,
api_base_url,
description,
api_version,
title,
contact,
schemes,
security_definitions,
security
):
"""Generate doc based on routes"""
from tornado_swagger.model import export_swagger_models # pylint: disable=C0415
swagger_spec = {
"openapi": "3.0.0",
"info": {
"title": title,
"description": _clean_description(description),
"version": api_version,
},
"basePath": api_base_url,
"schemes": schemes,
"components": {
"schemas": export_swagger_models(),
},
"paths": _extract_paths(routes),
}
if contact:
swagger_spec["info"]["contact"] = {"name": contact}
if security_definitions:
swagger_spec["securityDefinitions"] = security_definitions
if security:
swagger_spec["security"] = security
return swagger_spec
|
943a3c7a8bdd71bce92089c9dd89a7c262124dc0
| 3,648,964
|
def _filter_builds(build: Build) -> bool:
"""
Determine if build should be filtered.
:param build: Build to check.
:return: True if build should not be filtered.
"""
if build.display_name.startswith("!"):
return True
return False
|
c3bbc91595752b92b77034afcdd35d4a0b70f737
| 3,648,965
|
def load_transformers(model_name, skip_model=False):
"""Loads transformers config, tokenizer, and model."""
config = AutoConfig.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(
model_name,
add_prefix_space=True,
additional_special_tokens=('[T]', '[P]'),
)
model = AutoModelForMaskedLM.from_pretrained(model_name, config=config)
return config, tokenizer, model
|
49b8809745ba70b2a2a8ccd6063bbe2ea3acbae0
| 3,648,966
|
def build_test_data(data):
"""
Generates various features needed to predict
the class of the news.
Input: DataFrame
Returns Array of generated features.
"""
data = process(data)
generators = [
CountFeatureGenerator,
TfidfFeatureGenerator,
Word2VecFeatureGenerator,
SentimentFeatureGenerator,
ReadabilityFeatureGenerator
]
# Class generators one by one to generate features
features = [feature for generator in generators for feature in generator(data)]
print("Total number of raw features: {}".format(len(features)))
# Stack and return the features
return np.hstack(features)
|
a4bd3af16deff190471ffbd6028cb47b314d498f
| 3,648,967
|
def set_password_for_sub_account(account_id, password):
"""
Create a message to set the password for a given sub-account.
:param account_id: Integer representing the ID of the account
:param password: String representing the password for the sub-account
:return: Message (dict)
"""
data = sanitize(account_id=account_id, password=password)
msg = message(method=ACCOUNT_GET_SUB_ACCOUNTS)
params = {"sid": data["account_id"], "password": data["password"]}
return add_params_to_message(params, msg)
|
02887e73bd11c551f472c033e256882206e9042a
| 3,648,968
|
def post(req, api):
"""
Append a story to our rpg
Input:
content: string
Output:
string
"""
api.debug(req.body['content'])
return 'Success'
|
008313afcd838a525268959118cfc19b3c23535e
| 3,648,969
|
def generate_batch(n, batch_size):
""" Generates a set of batch indices
Args:
n: total number of samples in set
batch_size: size of batch
Returns:
batch_index: a list of length batch_size containing randomly sampled indices
"""
batch_index = a.sample(range(n), batch_size)
return batch_index
|
a8fe7d9356b30824210c89e2defa4b6bae697ffd
| 3,648,970
|
def solve(si, y, infile):
"""Conducts the solution step, based on the dopri5 integrator in scipy
:param si: the simulation info object
:type si: SimInfo
:param y: the solution vector
:type y: np.ndarray
:param infile: the imported infile module
:type infile: imported module
"""
n = ode(f_n).set_integrator('dopri5')
n.set_initial_value(y0_n(si), si.timer.
t0.magnitude)
n.set_f_params(si)
th = ode(f_th).set_integrator('dopri5', nsteps=infile.nsteps)
th.set_initial_value(y0_th(si), si.timer.t0.magnitude)
th.set_f_params(si)
while (n.successful() and
n.t < si.timer.tf.magnitude and
th.t < si.timer.tf.magnitude):
si.timer.advance_one_timestep()
si.db.record_all()
n.integrate(si.timer.current_time().magnitude)
update_n(n.t, n.y, si)
th.integrate(si.timer.current_time().magnitude)
update_th(th.t, n.y, th.y, si)
return si.y
|
079e97394befb39d6c65dc0c8a7eb2d57cf37920
| 3,648,971
|
def base(request, format=None):
"""Informational version endpoint."""
message = f"Welcome to {VERSION} of the Cannlytics API. Available endpoints:\n\n"
for endpoint in ENDPOINTS:
message += f"{endpoint}\n"
return Response({ "message": message}, content_type="application/json")
|
51d8deaa6b5fda2b69fc8674e0d9d927d910c0ba
| 3,648,972
|
def discover(discover_system: bool = True) -> Discovery:
"""
Discover Reliably capabilities from this extension.
"""
logger.info("Discovering capabilities from chaostoolkit-reliably")
discovery = initialize_discovery_result(
"chaostoolkit-reliably", __version__, "reliably"
)
discovery["activities"].extend(load_exported_activities())
return discovery
|
78bb7bcb086d08099f5585c3e27452647ecb8d64
| 3,648,973
|
from collections import Counter
def frequent_word(message: str) -> str:
"""get frequent word."""
words = Counter(message.split())
result = max(words, key=words.get)
print(result)
return result
|
86af88287a8874d824b96e1a96e430555db64f2e
| 3,648,974
|
def parse_bjobs_nodes(output):
"""Parse and return the bjobs command run with
options to obtain node list, i.e. with `-w`.
This function parses and returns the nodes of
a job in a list with the duplicates removed.
:param output: output of the `bjobs -w` command
:type output: str
:return: compute nodes of the allocation or job
:rtype: list of str
"""
nodes = []
lines = output.split("\n")
nodes_str = lines[1].split()[5]
nodes = nodes_str.split(":")
return list(dict.fromkeys(nodes))
|
a582307d0d869d2dbde454928571246320cb6e31
| 3,648,975
|
def find_nearest_array(array, array_comparison, tol = 1e-4):
"""
Find nearest array
@ In, array, array-like, the array to compare from
@ In, array_comparison, array-like, the array to compare to
@ In, tol, float, the tolerance
"""
array_comparison = np.asarray(array_comparison)
indeces = np.zeros(len(array), dtype=bool)
notFound = np.zeros(len(array), dtype=bool)
for val in array_comparison:
idx, diff = find_nearest(array, val)
rel = (np.abs(diff / val)) if val != 0 else np.abs(val)
if rel <= tol:
indeces[idx] = True
else:
notFound[idx] = True
return indeces, not np.any(notFound)
|
5c15cb58d50eef03ae7bcffed18bc60587fec0fc
| 3,648,976
|
import os
def create_bar_filled_line_fusion_chart(fname, frame_data, chart_dir=''):
"""Create the bar filled line fusion chart from window data"""
path_to_image = os.path.join(chart_dir, ChartType.BAR_FLINE_FUSION.value, "%s.png" % fname)
if not os.path.exists(path_to_image):
fig_obj, ax_fline_obj = plt.subplots()
time_series = convert_to_list(frame_data['Time'])
trading_volume = convert_to_list(frame_data['Trade Volume'])
high_prices = convert_to_list(frame_data['Trade High'])
low_prices = convert_to_list(frame_data['Trade Low'])
mean_prices = ((np.array(high_prices) + np.array(low_prices)) / 2).tolist()
transformed_time_series = list(range(len(time_series)))
ax_bar_obj = draw_fusion_bar_chart(ax_fline_obj, transformed_time_series, trading_volume)
ax_fline_obj.plot(transformed_time_series, high_prices, color='green', linewidth=0.1)
ax_fline_obj.plot(transformed_time_series, low_prices, color='red', linewidth=0.1)
ax_fline_obj.fill_between(transformed_time_series, high_prices, mean_prices, color='green')
ax_fline_obj.fill_between(transformed_time_series, mean_prices, low_prices, color='red')
format_and_save_chart(path_to_image, fig_obj, ax_fline_obj, ax_bar_obj)
return decode_img(path_to_image)
|
8fb33cb18fe919447e29c1b931e400f11ca4d771
| 3,648,977
|
from datetime import datetime
def create_block_statistics_on_addition(
block_hash: str,
block_hash_parent: str,
chain_name: str,
deploy_cost_total: int,
deploy_count: int,
deploy_gas_price_avg: int,
era_id: int,
height: int,
is_switch_block: bool,
network: str,
size_bytes: str,
state_root_hash: str,
status: int,
timestamp: datetime,
proposer: str,
) -> BlockStatistics:
"""Returns a domain object instance: BlockStatistics.
"""
return BlockStatistics(
block_hash = block_hash,
block_hash_parent = block_hash_parent,
chain_name = chain_name,
deploy_cost_total = deploy_cost_total,
deploy_count = deploy_count,
deploy_gas_price_avg = deploy_gas_price_avg,
era_id = era_id,
height = height,
is_switch_block = is_switch_block,
network = network,
size_bytes = size_bytes,
state_root_hash = state_root_hash,
status = status,
timestamp = timestamp,
proposer = proposer,
)
|
921e7045ff0df080a3769d0e62753e0e5a13e4af
| 3,648,978
|
import argparse
def get_arg():
"""解析参数"""
parser = argparse.ArgumentParser(prog='prcdns', description='google dns proxy.')
parser.add_argument('--debug', help='debug model,default NO', default=False)
parser.add_argument('-l', '--listen', help='listening IP,default 0.0.0.0', default='0.0.0.0')
parser.add_argument('-p', '--port', help='listening Port,default 3535', default=3535)
parser.add_argument('-r', '--proxy', help='Used For Query Google DNS,default direct', default=None)
parser.add_argument('-ip', '--myip', help='IP location', default=None)
return parser.parse_args()
|
21be51cbace8714dd10d7c18a4f2e5bcddcfc1da
| 3,648,979
|
def text(title='Text Request', label='', parent=None, **kwargs):
"""
Quick and easy access for getting text input. You do not have to have a
QApplication instance, as this will look for one.
:return: str, or None
"""
# -- Ensure we have a QApplication instance
q_app = qApp()
# -- Get the text
name, ok = Qt.QtWidgets.QInputDialog.getText(
parent,
title,
label,
**kwargs
)
if not ok:
return None
return name
|
c3d0c4fab15f6882fea614f5eec252738abc3e1c
| 3,648,980
|
def get_key_score(chroma_vector, keys, key_index):
"""Returns the score of an approximated key, given the index of the key weights to try out"""
chroma_vector = np.rot90(chroma_vector,3)
chroma_vector = chroma_vector[0,:]
key_vector = keys[key_index,:]
score = np.dot(key_vector,chroma_vector)
return score
|
b41d4f3af4d621ba46b8786a5c906c470454fcc1
| 3,648,981
|
def app():
"""Create the test application."""
return flask_app
|
01fbd44671a342be38560bc4c5b089a55214caf3
| 3,648,982
|
def coord_for(n, a=0, b=1):
"""Function that takes 3 parameters or arguments, listed above, and returns a list of the interval division coordinates."""
a=float(a)
b=float(b)
coords = []
inc = (b-a)/ n
for x in range(n+1):
coords.append(a+inc*x)
return coords
|
57e12200dcc113786c9deeb4865d7906d74c763f
| 3,648,983
|
def find_indeces_vector(transect_lons, transect_lats, model_lons, model_lats,
tols={
'NEMO': {'tol_lon': 0.104, 'tol_lat': 0.0388},
'GEM2.5': {'tol_lon': 0.016, 'tol_lat': 0.012},
}):
"""Find all indeces for the given vector
:arg transect_lons: Longitude of point 1.
:type lon1: float or :py:class:`numpy.ndarray`
:arg transect_lats: Latitude of point 1.
:type lat1: float or :py:class:`numpy.ndarray`
:arg model_lons: Longitude of point 2.
:type lon2: float or :py:class:`numpy.ndarray`
:arg model_lats: Latitude of point 2.
:type lat2: float or :py:class:`numpy.ndarray`
:returns: vector of i and j indices associated with the input lons and lats
:rtype: float or :py:class:`numpy.ndarray`
"""
transect_i = np.array([])
transect_j = np.array([])
for k in range(0,len(transect_lons)):
i, j = find_closest_model_point(transect_lons[k], transect_lats[k], model_lons, model_lats,tols=tols)
try:
transect_i = np.append(transect_i, int(i))
transect_j = np.append(transect_j, int(j))
except:
transect_i = np.append(transect_i, np.nan)
transect_j = np.append(transect_j, np.nan)
return transect_i, transect_j
|
93ad7a8cd16e154069618e606293e64ee3266501
| 3,648,984
|
def _make_prediction_ops(features, hparams, mode, num_output_classes):
"""Returns (predictions, predictions_for_loss)."""
del hparams, mode
logits = tf.layers.dense(
features, num_output_classes, name='logits')
confidences = tf.nn.softmax(logits)
confidence_of_max_prediction = tf.reduce_max(confidences, axis=-1)
predicted_index = tf.argmax(confidences, axis=-1)
predictions = {
'label': predicted_index,
'logits': logits,
'confidences': confidences,
'confidence_of_max_prediction': confidence_of_max_prediction
}
predictions_for_loss = logits
return predictions, predictions_for_loss
|
0a58ab8753a39b3e9da67dfc7988383585e6562e
| 3,648,985
|
def manhattan_loadings(
iteration,
gtf,
loadings,
title=None,
size=4,
hover_fields=None,
collect_all=False,
n_divisions=500,
):
"""modify hail manhattan plot"""
palette = [
'#1f77b4',
'#ff7f0e',
'#2ca02c',
'#d62728',
'#9467bd',
'#8c564b',
'#e377c2',
'#7f7f7f',
'#bcbd22',
'#17becf',
]
# add gene names, p-values, and locus info
loadings = loadings.annotate(gene_names=gtf[loadings.locus].gene_name)
pvals = hl.abs(loadings.loadings[iteration])
locus = loadings.locus
if hover_fields is None:
hover_fields = {}
hover_fields['locus'] = hl.str(locus)
hover_fields['gene'] = hl.str(loadings.gene_names)
source_pd = (
hl.plot.plots._collect_scatter_plot_data( # pylint: disable=protected-access
('_global_locus', locus.global_position()),
('_pval', pvals),
fields=hover_fields,
n_divisions=None if collect_all else n_divisions,
)
)
source_pd['p_value'] = source_pd['_pval']
source_pd['_contig'] = [locus.split(':')[0] for locus in source_pd['locus']]
observed_contigs = set(source_pd['_contig'])
ref = locus.dtype.reference_genome
observed_contigs = [
contig for contig in ref.contigs.copy() if contig in observed_contigs
]
contig_ticks = [
ref._contig_global_position(contig) # pylint: disable=protected-access
+ ref.contig_length(contig) // 2
for contig in observed_contigs
]
color_mapper = CategoricalColorMapper(
factors=ref.contigs, palette=palette[:2] * int((len(ref.contigs) + 1) / 2)
)
p = figure(
title=title, x_axis_label='Chromosome', y_axis_label='Loadings', width=1000
)
(
p,
_,
legend,
_,
_,
_,
) = hl.plot.plots._get_scatter_plot_elements( # pylint: disable=protected-access
p,
source_pd,
x_col='_global_locus',
y_col='_pval',
label_cols=['_contig'],
colors={'_contig': color_mapper},
size=size,
)
legend.visible = False
p.xaxis.ticker = contig_ticks
p.xaxis.major_label_overrides = dict(zip(contig_ticks, observed_contigs))
p.select_one(HoverTool).tooltips = [
t for t in p.select_one(HoverTool).tooltips if not t[0].startswith('_')
]
return p
|
5f99c1f5a16ee35c056ef019870d2d89a31ba988
| 3,648,986
|
def preprocess_point(p, C):
"""Preprocess a single point (a clip).
WARN: NAN-preserving
Arguments:
p {ndarray} -- shape = (variable, C.joint_n, C.joint_d)
C {DDNetConfig} -- A Config object
Returns:
ndarray, ndarray -- X0, X1 to input to the net
"""
assert p.shape[1:] == (C.joint_n, C.joint_d)
p = zoom(p,target_l=C.frame_l,joints_num=C.joint_n,joints_dim=C.joint_d)
# interploate to the right number of frames
assert p.shape == (C.frame_l, C.joint_n, C.joint_d)
M = get_CG(p, C)
return M, p
|
a1f2c1eda877562439c0b194490a6ac50df6bd81
| 3,648,987
|
def link_to_existing_user_by_email_if_backend_is_trusted(backend, details, user=None, *args, **kwargs):
"""Return user entry with same email address as one returned on details."""
if user or not _is_trusted_email_backend(backend):
return
email = details.get('email')
if email:
# try to link accounts registered with the same email address,
# only if it's a single object. AuthException is raised if multiple
# objects are returned
try:
return {'user': EmailAddress.objects.get(email=email).user}
except MultipleObjectsReturned:
raise AuthException(kwargs['backend'], 'Not unique email address.')
except ObjectDoesNotExist:
pass
|
58f2363030ae0b8f8d3533dedbae6c7af304136c
| 3,648,988
|
import os
import glob
from typing import OrderedDict
def _monthlyfile(yr, path, ppath, force, layer, ANfn, latmax, latmin, lonmin, lonmax):
"""
Function to proccess the monthl;y data into an annual file
args:
yr: int
year
path: str
dir to do the work in
ppath: str
processed path
"""
# ========== get the web address ==========
address = "ftp://anon-ftp.ceda.ac.uk/neodc/esacci/fire/data/burned_area/MODIS/pixel/v5.1/compressed/%d/" % yr
# ========== list to holf the file names ==========
ptnames = []
# ========== process the mask ==========
def _maskmaker(ds_tomask):
print("starting the 2018 mask")
ipdb.set_trace()
# dates = datefixer(yr, 12, 31)
maskfn = "/media/ubuntu/Seagate Backup Plus Drive/Data51/BurntArea/esacci/FRI/esacci_landseamask.nc"
# change the values
with ProgressBar():
ds_mask = (ds_tomask != -2).astype("float32").sum(dim="time").compute()
# +++++ ds_mask has 1 for land 0 for water +++++
# that is then summed over time
# ========== create a date ==========
dates = datefixer(2018, 12, 31)
# ========= find plces with a few good values ==========
ds_mask = (ds_mask>=5).astype("float32")
ds_mask = ds_mask.where(ds_mask == 1.0).rename({layer:"mask"}).expand_dims({"time":dates["CFTime"]})
# ===== fix the time =====
# ds_mask["time"] = dates["CFTime"]
ds_mask.time.attrs["calendar"] = dates["calendar"]
ds_mask.time.attrs["units"] = dates["units"]
# da_mask = (da_mask == -2).mean(dim="time")
# da_mask = attrs_fixer(da_mask, dates)
# ========== Setup the metadata ==========
ds_mask.attrs = GlobalAttributes(maskfn)
# layers = OrderedDict()
# layers["mask"] = da_mask
# ========== create the dataset ==========
ds_mask = tempNCmaker(ds_mask, maskfn, "mask", chunks={"latitude":1000, 'longitude': 1000}, pro = "%d mask"% yr)
return ds_mask
# ========== loop over the month ==========
for mn in range(1, 13):
print(yr, mn, pd.Timestamp.now())
# ========== Create the file name and check if they need to get downloaded ==========
fnA = "%d%02d01-ESACCI-L3S_FIRE-BA-MODIS-AREA_4-fv5.1.tar.gz" % (yr, mn)
fnE = "%d%02d01-ESACCI-L3S_FIRE-BA-MODIS-AREA_3-fv5.1.tar.gz" % (yr, mn)
for fn in [fnA, fnE]:
filefetcher(fn, address, path)
# ========== Make the file name and see if it already exists ==========
ftout = path+"tmp/%d%02d01_tmp_%s.nc" %(yr, mn, layer)
if os.path.isfile(ftout):
ds_testing = xr.open_dataset(ftout)
ptnames.append(ftout)
print(ds_testing[layer].shape)
continue
# ========== open the components ==========
fn_XR = glob.glob(path+"tmp/%d%02d*-%s.tif" % (yr, mn, layer))
renm = {"band":"time","x":"longitude", "y":"latitude"}
da_ls = [xr.open_rasterio(fnr).rename(renm).sel(
dict(
latitude=slice(latmax, latmin),
longitude=slice(lonmin, lonmax))) for fnr in fn_XR]
# Address tiny rounding errors in the data
da_ls[0]["latitude"] = da_ls[1].latitude.values
# ========== Merge into a single dataset ==========
ds_out = xr.Dataset({layer:xr.concat(da_ls, dim="longitude").chunk({"longitude":1000})})#.sortby("latitude", ascending=False)#.transpose("latitude")
date = pd.Timestamp("%d-%02d-01" % (yr, mn))
ds_out["time"] = [date]
# ========== Save a tempoary netcdf fiel ==========
ds_out = tempNCmaker(ds_out, ftout, layer, chunks={'longitude': 1000}, skip=False)
# ========== append the save name ==========
ptnames.append(ftout)
# ========== Build annual dataset ==========
da = xr.open_mfdataset(
ptnames, concat_dim="time",
chunks={"time":1, "latitude":1000, 'longitude': 1000})[layer]
da = da.reindex(latitude=list(reversed(da.latitude)))
if yr == 2018:
# Copy the data
da_mask = _maskmaker(da.copy())
ipdb.set_trace()
# ========== mask it away ==========
da_bl = da.where( da > 0)
# ========== Aggragate and finalise the da ==========
dates = datefixer(yr, 12, 31)
da_out = da_bl.sum("time")
da_out = da_out.where(da_out<= 0, 1).rename()
da_out = attrs_fixer(da_out, dates)
# ========== Setup the metadata ==========
global_attrs = GlobalAttributes(ANfn)
layers = OrderedDict()
layers["BA"] = da_out
# ========== create the dataset ==========
ds = xr.Dataset(layers, attrs= global_attrs)
# ========== build a mask ==========
ds = tempNCmaker(ds, ANfn, "BA", chunks={"latitude":1000, 'longitude': 1000}, pro = "%d Burnt Area"% yr)
# ========== return the dataset ==========
return ds
|
8633f65b08052aa3f8a6998a45af7ed964413638
| 3,648,989
|
def get_global_threshold(image_gray, threshold_value=130):
""" 이미지에 Global Threshold 를 적용해서 흑백(Binary) 이미지객체를 반환합니다.
하나의 값(threshold_value)을 기준으로 이미지 전체에 적용하여 Threshold 를 적용합니다.
픽셀의 밝기 값이 기준 값 이상이면 흰색, 기준 값 이하이면 검정색을 적용합니다.
이 때 인자로 입력되는 이미지는 Gray-scale 이 적용된 2차원 이미지여야 합니다.
:param image_gray:
:param threshold_value: 이미지 전체에 Threshold 를 적용할 기준 값.
:return: Global Threshold 를 적용한 흑백(Binary) 이미지
"""
copy = image_gray.copy() # copy the image to be processed
_, binary_image = cv2.threshold(copy, threshold_value, 255, cv2.THRESH_BINARY)
return binary_image
|
03c344a40c5a84027d790ddf404106efe29716e9
| 3,648,990
|
import torch
def get_batch(src_gen, trgt_gen, batch_size=10):
"""
Return a batch of batch_size of results as in get_rotated_src_target_spirals
Args:
batch_size (int): number of samples in the batch
factor (float): scaling factor for the spiral
Return:
[torch.tensor,torch.tensor]: src and target batches
"""
batch_points = [src_gen.generate() for _ in range(batch_size)]
batch_targets = [trgt_gen.generate() for _ in range(batch_size)]
batch_points = [to_torch_tensor(i) for i in batch_points]
batch_targets = [to_torch_tensor(i) for i in batch_targets]
return torch.cat(batch_points), torch.cat(batch_targets)
|
6e0e4549c12fb252c54496f7947e5787970b64eb
| 3,648,991
|
def compute_crop_parameters(image_size, bbox, image_center=None):
"""
Computes the principal point and scaling factor for focal length given a square
bounding box crop of an image.
These intrinsic parameters are used to preserve the original principal point even
after cropping the image.
Args:
image_size (int or array): Size of image, either length of longer dimension or
(N, H, C).
bbox: Square bounding box in xyxy (4,).
image_center: Center of projection/principal point (2,).
Returns:
principal_point: Coordinates in NDC using Pytorch3D convention with (1, 1)
as upper-left (2,).
crop_scale (float): Scaling factor for focal length.
"""
bbox = np.array(bbox)
b = max(bbox[2:] - bbox[:2])
if isinstance(image_size, int):
h = w = image_size
else:
h, w, *c = image_size
image_size = max(image_size)
if image_center is None:
image_center = np.array([w / 2, h / 2])
bbox_center = (bbox[:2] + bbox[2:]) / 2
crop_scale = b / image_size
principal_point = 2 * (bbox_center - image_center) / b
return principal_point, crop_scale
|
18ca5822bf86fb01ff8652fc9239ce6ae4d2801f
| 3,648,992
|
def input_fn_builder(input_file, seq_length, num_labels, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([num_labels], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _cast_features(features):
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in features:
t = features[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
features[name] = t
return features
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
features = tf.parse_single_example(record, name_to_features)
return _cast_features(features)
def file_based_input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
def serving_input_receiver_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = tf.placeholder(
dtype=tf.string,
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, name_to_features)
features = _cast_features(features)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
if input_file is not None:
return file_based_input_fn
else:
return serving_input_receiver_fn
|
ad82ded8691561eb8f60b645497200cd36d94a21
| 3,648,993
|
def get_user_by_username(username):
"""Return User by username"""
try:
return User.objects.get(username=username)
except User.DoesNotExist:
return None
|
b6c676d22c7ef586392b20a072d2239c2dfce7e6
| 3,648,994
|
def get_xyz_to_rgb_matrix(name):
"""
XYZ to RGB の Matrix を求める。
DCI-P3 で D65 の係数を返せるように内部関数化した。
"""
if name != "DCI-P3":
xyz_to_rgb_matrix = RGB_COLOURSPACES[name].XYZ_to_RGB_matrix
else:
rgb_to_xyz_matrix\
= calc_rgb_to_xyz_matrix(RGB_COLOURSPACES[DCI_P3].primaries,
xy_to_XYZ(ILLUMINANTS[CMFS_NAME]['D65']))
xyz_to_rgb_matrix = linalg.inv(rgb_to_xyz_matrix)
return xyz_to_rgb_matrix
|
007b71f52af4e23ada073c712a840e05c0ac33a5
| 3,648,995
|
def find_bordering_snapnums(
snap_times_gyr,
dGyr=.005,
tmin=None,
tmax=None):
""" """
## handle default maximum time
tmax = snap_times_gyr[-1] if tmax is None else tmax
## handle default minimum time
if tmin is None:
tmin = snap_times_gyr[0]
## remove dGyr so that tmin is included in arange below
elif tmin - dGyr > 0:
tmin = tmin-dGyr
## create list of times, -1e-9 to avoid landing exactly on a snapshot number
times_gyr = np.arange(tmax,tmin,-dGyr)[::-1]-1e-9
inds_next = np.argmax((times_gyr - snap_times_gyr[:,None]) < 0 ,axis=0)
inds_prev = inds_next-1
return (
times_gyr,
np.array(list(zip(inds_prev,inds_next))),
np.array(list(zip(snap_times_gyr[inds_prev],snap_times_gyr[inds_next]))))
|
23a58ecce4036d9b7a6a91991de237dd30b87129
| 3,648,996
|
def maxIterationComb(N,k,l):
"""
title::
maxIterationComb
description::
Compute N!/k!l!(N-k-l)! (max iterations).
attributes::
N
Number of targets (graph size)
k
Number of human patrollers
l
Number of drones
returns::
Resulting maximum iterations (integer).
author::
Elizabeth Bondi (ebondi@g.harvard.edu)
Hoon Oh, Haifeng Xu, Kai Wang
disclaimer::
This source code is provided "as is" and without warranties as to
performance or merchantability. The author and/or distributors of
this source code may have made statements about this source code.
Any such statements do not constitute warranties and shall not be
relied on by the user in deciding whether to use this source code.
This source code is provided without any express or implied warranties
whatsoever. Because of the diversity of conditions and hardware under
which this source code may be used, no warranty of fitness for a
particular purpose is offered. The user is advised to test the source
code thoroughly before relying on it. The user must assume the entire
risk of using the source code.
"""
return int(comb(N,k)*comb(N-k,l))
|
d0019a1498a50f3733474fb0212627d1b061484d
| 3,648,997
|
def create_project_details_list (project):
"""makes a projects details section for the html
Parameters
----------
project: HeatRecovery
A HeatRecovery object thats run function has been called
Returns
-------
dict
with values used by summary
"""
try:
costs = '${:,.0f}'.format(project.get_NPV_costs())
except ValueError:
costs = project.get_NPV_costs()
try:
benefits = '${:,.0f}'.format(project.get_NPV_benefits())
except ValueError:
benefits = project.get_NPV_benefits()
try:
net_benefits = '${:,.0f}'.format(project.get_NPV_net_benefit())
except ValueError:
net_benefits = project.get_NPV_net_benefit()
try:
BC = '{:,.1f}'.format(project.get_BC_ratio())
except ValueError:
BC = project.get_BC_ratio()
try:
source = "<a href='" + \
project.comp_specs['link'] + "'> link </a>"
except StandardError as e:
source = "unknown"
try:
notes = project.comp_specs['notes']
except StandardError as e:
notes = "N/a"
try:
potential_hr = '{:,.0f} gallons'.format(float(
project.comp_specs[
'proposed gallons diesel offset']))
except ValueError:
potential_hr =\
str(project.comp_specs[
'proposed gallons diesel offset'])
try:
dist = \
'{:,.0f} ft'.format(\
float(project.comp_specs['total feet piping needed']))
except ValueError:
dist = 'Unknown'
#~ print dist
return [
{'words':'Capital cost',
'value': costs},
{'words':'Lifetime energy cost savings',
'value': benefits},
{'words':'Net lifetime savings',
'value': net_benefits},
{'words':'Benefit-cost ratio',
'value': BC},
{'words':'Est. potential annual heating fuel gallons displaced',
'value': potential_hr},
{'words':'Number of buildings to be connected',
'value': str(project.comp_specs['estimate buildings to heat'])},
{'words':'Round-trip distance of piping',
'value': dist},
{'words':'Source',
'value': source},
{'words':'Notes',
'value': notes},
]
|
74c68b0592939cc819091ac2d30bee44b455f27b
| 3,648,998
|
def compute_cluster_top_objects_by_distance(precomputed_distances,
max_top_number=10,
object_clusters=None):
"""
Compute the most representative objects for each cluster
using the precomputed_distances.
Parameters
----------
precomputed_distances : np.array
array of shape (n_topics, n_objects) -
a matrix of pairwise distances: distance from ith cluster centroid to the jth object
max_top_number : int
maximum number of top objects of cluster (resulting number can be less than it)
(Default value = 10)
object_clusters : np,array
array of shape n_objects - precomputed clusters for objects
Returns
-------
clusters_top_objects : list of list of indexes
(Default value = None)
""" # noqa: W291
# prediction for objects
if object_clusters is None:
object_clusters = predict_cluster_by_precomputed_distances(precomputed_distances)
# transformation from list to dict
clusters = transform_cluster_objects_list_to_dict(object_clusters)
n_topics = precomputed_distances.shape[0]
clusters_top_objects = []
for cluster_label in range(n_topics):
# cluster is empty
if cluster_label not in clusters.keys():
clusters_top_objects.append([])
continue
cluster_objects = np.array(clusters[cluster_label])
cluster_objects_to_center_distances = (
precomputed_distances[cluster_label][cluster_objects]
)
if max_top_number >= cluster_objects.shape[0]:
# cluster is too small; grab all objects
indexes_of_top_objects = np.arange(0, cluster_objects.shape[0])
else:
# filter by distance with partition
indexes_of_top_objects = np.argpartition(
cluster_objects_to_center_distances,
kth=max_top_number
)[:max_top_number]
distances_of_top_objects = cluster_objects_to_center_distances[indexes_of_top_objects]
top_objects = cluster_objects[indexes_of_top_objects]
# sorted partitioned array
indexes_of_top_objects_sorted_by_distance = np.argsort(distances_of_top_objects)
sorted_top_objects = top_objects[indexes_of_top_objects_sorted_by_distance]
clusters_top_objects.append(sorted_top_objects.tolist())
return clusters_top_objects
|
2ff29d6b59d2db3d9e169a44c0addf42d9abea9b
| 3,648,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.