content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def test_case_insensitive_dict_upper_get_not_existent(key):
"""
Test case insensitive lookup (non existent keys) in CaseInsensitiveDict with upper() as normalizing function
"""
from masci_tools.util.case_insensitive_dict import CaseInsensitiveDict
d = CaseInsensitiveDict(TEST_DICT, upper=True)
assert key not in d
with pytest.raises(KeyError):
val = d[key]
| 13,100
|
def _GenerateElementInfo(impl_path, names):
"""Generates the data a group needs to load sub elements.
Args:
impl_path: The file path to the command implementation for this group.
names: [str], The names of the sub groups or commands found in the group.
Raises:
LayoutException: if there is a command or group with an illegal name.
Returns:
{str: [str], A mapping from name to a list of paths that implement that
command or group. There can be multiple paths because a command or group
could be implemented in both python and yaml (for different release tracks).
"""
elements = {}
for name in names:
if re.search('[A-Z]', name):
raise LayoutException(
'Commands and groups cannot have capital letters: {0}.'.format(name))
cli_name = name[:-5] if name.endswith('.yaml') else name
sub_path = os.path.join(impl_path, name)
existing = elements.setdefault(cli_name, [])
existing.append(sub_path)
return elements
| 13,101
|
def locate_dir(instrument, mode=None):
"""Locate the instrument specific directory for a reference file.
The mode=None test case is disabled because it mysteriously causes these tests to
fail when running the runtests script:
ERROR: test_throughput_lookup_generation (crds.tests.test_synphot_lookup_generator.TestSynphotLookupGenerator)
FAIL: Doctest: crds.tests.test_bad_files.dt_bad_references_fast_mode
FAIL: Doctest: crds.tests.test_bad_files.dt_bad_rules_jwst_getreferences_warning
FAIL: Doctest: crds.tests.test_certify.certify_recursive
FAIL: Doctest: crds.tests.test_certify.certify_table_comparison_context
FAIL: Doctest: crds.tests.test_heavy_client.dt_getreferences_ignore_cache
FAIL: Doctest: crds.tests.test_list.dt_list_cached_references
FAIL: Doctest: crds.tests.test_synphot_hst.dt_synphot_core_integration_test
FAIL: Doctest: crds.tests.test_synphot_hst.dt_synphot_core_integration_test
XXXX TODO: Enable the mode=None test case and resolve the ensuing test failures in other modules.
>> locate_dir('wfi', None) # doctest: +ELLIPSIS
'.../references/roman/wfi'
>>> locate_dir('wfi', 'instrument') # doctest: +ELLIPSIS
'.../references/roman/wfi'
>>> locate_dir('wfi', 'flat') # doctest: +ELLIPSIS
'.../references/roman'
>>> locate_dir('wfi', 'other') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: Invalid CRDS cache subdirectory mode = 'other'
"""
if mode is None:
mode = config.get_crds_ref_subdir_mode(observatory="roman")
else:
config.check_crds_ref_subdir_mode(mode)
crds_refpath = config.get_crds_refpath("roman")
if mode == "instrument": # use simple names inside CRDS cache.
rootdir = os.path.join(crds_refpath, instrument.lower())
if not os.path.exists(rootdir):
if config.writable_cache_or_verbose("Skipping making instrument directory link for", repr(instrument)):
utils.ensure_dir_exists(rootdir + "/locate_dir.fits")
elif mode == "flat": # use original flat cache structure, all instruments in same directory.
rootdir = crds_refpath
else:
raise ValueError("Unhandled reference file location mode " + repr(mode))
return rootdir
| 13,102
|
def auto_grad():
"""
对函数 y=2x**2 求关于列向量 x 的梯度 4x
:return:
"""
x = nd.arange(4).reshape((4, 1))
logger.info("autograd 数组:")
logger.info(x)
# 调用attach_grad函数来申请存储梯度所需要的内存
x.attach_grad()
logger.info("autograd.is_training():")
logger.info(autograd.is_training())
# 调用record函数来要求MXNet记录与求梯度有关的计算。
with autograd.record():
y = 2 * nd.dot(x.T, x)
logger.info(autograd.is_training())
logger.info(y)
# 调用backward函数自动求梯度
y.backward()
logger.info("autograd 梯度:")
logger.info(x.grad)
| 13,103
|
def test_odl_rsp_retrieval_no_rsp(capfd, classifier, rsp_name):
"""
Test if classifier raises RuntimeError for RSP non-existing in ODL
Pass: if `RuntimeError` is raised
if 'operation-failed' string is in captured error log
Fail: if the above conditions fails
"""
with pytest.raises(RuntimeError):
classifier._fetch_rsp_first_hop_from_odl(rsp_name)
_, err = capfd.readouterr()
assert 'operation-failed' in err
| 13,104
|
def pas(al, ap, bl,bp):
""" Postion-angle from spherical coordinates.
:param al: longitude of point A in radians.
:type al: float
:param ap: latitude of point A in radians.
:type ap: float
:param bl: longitude of point B in radians.
:type bl: float
:param bp: latitude of point B in radians.
:type bp: float
:returns: position angle of B with respect to A in radians (float).
.. seealso:: |MANUAL| page 145
"""
return _sofa.iauPas(float(al), float(ap), float(bl), float(bp))
| 13,105
|
def get_messy_items_for_training(mod_factor=5):
"""
Fetch a subset of `FacilityListItem` objects that have been parsed and are
not in an error state.
Arguments:
mod_factor -- Used to partition a subset of `FacilityListItem` records. The
larger the value, the fewer records will be contained in the
subset.
Returns:
A dictionary. The key is the `FacilityListItem` ID. The value is a
dictionary of clean field values keyed by field name (country, name,
address). A "clean" value is one which has been passed through the `clean`
function.
"""
facility_list_item_set = FacilityListItem.objects.exclude(
Q(status=FacilityListItem.UPLOADED)
| Q(status=FacilityListItem.ERROR)
| Q(status=FacilityListItem.ERROR_PARSING)
| Q(status=FacilityListItem.ERROR_GEOCODING)
| Q(status=FacilityListItem.ERROR_MATCHING)
).extra(
select={'country': 'country_code'}).values(
'id', 'country', 'name', 'address')
records = [record for (i, record) in enumerate(facility_list_item_set)
if i % mod_factor == 0]
return {str(i['id']): {k: clean(i[k]) for k in i if k != 'id'}
for i in records}
| 13,106
|
def test_roc_curve_display_plotting(
pyplot,
response_method,
data_binary,
with_sample_weight,
drop_intermediate,
with_strings,
constructor_name,
default_name,
):
"""Check the overall plotting behaviour."""
X, y = data_binary
pos_label = None
if with_strings:
y = np.array(["c", "b"])[y]
pos_label = "c"
if with_sample_weight:
rng = np.random.RandomState(42)
sample_weight = rng.randint(1, 4, size=(X.shape[0]))
else:
sample_weight = None
lr = LogisticRegression()
lr.fit(X, y)
y_pred = getattr(lr, response_method)(X)
y_pred = y_pred if y_pred.ndim == 1 else y_pred[:, 1]
if constructor_name == "from_estimator":
display = RocCurveDisplay.from_estimator(
lr,
X,
y,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
alpha=0.8,
)
else:
display = RocCurveDisplay.from_predictions(
y,
y_pred,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
alpha=0.8,
)
fpr, tpr, _ = roc_curve(
y,
y_pred,
sample_weight=sample_weight,
drop_intermediate=drop_intermediate,
pos_label=pos_label,
)
assert_allclose(display.roc_auc, auc(fpr, tpr))
assert_allclose(display.fpr, fpr)
assert_allclose(display.tpr, tpr)
assert display.estimator_name == default_name
import matplotlib as mpl # noqal
assert isinstance(display.line_, mpl.lines.Line2D)
assert display.line_.get_alpha() == 0.8
assert isinstance(display.ax_, mpl.axes.Axes)
assert isinstance(display.figure_, mpl.figure.Figure)
expected_label = f"{default_name} (AUC = {display.roc_auc:.2f})"
assert display.line_.get_label() == expected_label
expected_pos_label = 1 if pos_label is None else pos_label
expected_ylabel = f"True Positive Rate (Positive label: {expected_pos_label})"
expected_xlabel = f"False Positive Rate (Positive label: {expected_pos_label})"
assert display.ax_.get_ylabel() == expected_ylabel
assert display.ax_.get_xlabel() == expected_xlabel
| 13,107
|
def tanh_squared(x: np.ndarray, margin: float, loss_at_margin: float = 0.95):
"""Returns a sigmoidal shaping loss based on Hafner & Reidmiller (2011).
Args:
x: A numpy array representing the error.
margin: Margin parameter, a positive `float`.
loss_at_margin: The loss when `l2_norm(x) == margin`. A `float` between 0
and 1.
Returns:
Shaping loss, a `float` bounded in the half-open interval [0, 1).
Raises:
ValueError: If the value of `margin` or `loss_at_margin` is invalid.
"""
if not margin > 0:
raise ValueError("`margin` must be positive.")
if not 0.0 < loss_at_margin < 1.0:
raise ValueError("`loss_at_margin` must be between 0 and 1.")
error = np.linalg.norm(x)
# Compute weight such that at the margin tanh(w * error) = loss_at_margin
w = np.arctanh(np.sqrt(loss_at_margin)) / margin
s = np.tanh(w * error)
return s * s
| 13,108
|
def test_fill(machine_tiny):
"""
Проверка заполнения машины
"""
with patch('trivial_tools.storage.caching_instance.datetime') as fake:
fake.now.return_value = datetime(2019, 10, 31, 18, 6, 0)
assert machine_tiny.total() == 0
machine_tiny.set('key_1', 'value')
assert machine_tiny.total() == 1
machine_tiny.set('key_1', 'value')
assert machine_tiny.total() == 1
machine_tiny.set('key_2', 'value')
assert machine_tiny.total() == 2
machine_tiny.set('key_3', 'value')
assert machine_tiny.total() == 3
machine_tiny.set('key_4', 'value')
assert machine_tiny.total() == 4
machine_tiny.set('key_5', 'value')
assert machine_tiny.total() == 4
machine_tiny.clear()
assert machine_tiny.total() == 0
| 13,109
|
def _inv_Jacobian_2D(J, detJ):
""" manually invert 2x2 jacobians J in place """
tmp = J[:, 1, 1, :] / detJ
J[:, 0, 1, :] = -J[:, 0, 1, :] / detJ
J[:, 1, 0, :] = -J[:, 1, 0, :] / detJ
J[:, 1, 1, :] = J[:, 0, 0, :] / detJ
J[:, 0, 0, :] = tmp
return J
| 13,110
|
def ratio_error_acc(y_true, y_pred, epsilon, threshold):
"""
Calculate the ratio error accuracy with the threshold.
:param y_true:
:param y_pred:
:param epsilon:
:param threshold:
:return:
"""
ratio_1 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_true, y_pred, epsilon])
ratio_2 = keras.layers.Lambda(lambda x: (x[0] + x[2]) / (x[1] + x[2]))([y_pred, y_true, epsilon])
ratio = K.maximum(ratio_1, ratio_2)
mask = K.cast(K.less(ratio, threshold), dtype="float32")
return K.mean(mask)
| 13,111
|
def error_embed(ctx: context.ApplicationContext, title: str, description: str, author: bool = True) -> discord.Embed:
"""Make a basic error message embed."""
return make_embed(
ctx=ctx,
title=title if title else "Error:",
description=description,
color=discord.Color.red(),
author=author,
)
| 13,112
|
def load_configuration() -> Dict[str, Any]:
"""
Return dict from TOML formatted string or file.
Returns:
The dict configuration.
"""
default_config = """
[key_bindings]
AUTOCLEAR = "c"
CANCEL = "esc"
ENTER = "enter"
FILTER = ["F4", "\\\\"]
FOLLOW_ROW = "F"
HELP = ["F1", "?"]
MOVE_DOWN = ["down", "j"]
MOVE_DOWN_STEP = "J"
MOVE_END = "end"
MOVE_HOME = "home"
MOVE_LEFT = ["left", "h"]
MOVE_RIGHT = ["right", "l"]
MOVE_UP = ["up", "k"]
MOVE_UP_STEP = "K"
NEXT_SORT = ["p", ">"]
PREVIOUS_SORT = "<"
PRIORITY_DOWN = ["F8", "d", "]"]
PRIORITY_UP = ["F7", "u", "["]
QUIT = ["F10", "q"]
REMOVE_ASK = ["del", "F9"]
RETRY = "r"
RETRY_ALL = "R"
REVERSE_SORT = "I"
SEARCH = ["F3", "/"]
SELECT_SORT = "F6"
SETUP = "F2"
TOGGLE_EXPAND_COLLAPSE = "x"
TOGGLE_EXPAND_COLLAPSE_ALL = "X"
TOGGLE_RESUME_PAUSE = "space"
TOGGLE_RESUME_PAUSE_ALL = "P"
TOGGLE_SELECT = "s"
UN_SELECT_ALL = "U"
ADD_DOWNLOADS = "a"
[colors]
BRIGHT_HELP = "CYAN BOLD BLACK"
FOCUSED_HEADER = "BLACK NORMAL CYAN"
FOCUSED_ROW = "BLACK NORMAL CYAN"
HEADER = "BLACK NORMAL GREEN"
METADATA = "WHITE UNDERLINE BLACK"
SIDE_COLUMN_FOCUSED_ROW = "BLACK NORMAL CYAN"
SIDE_COLUMN_HEADER = "BLACK NORMAL GREEN"
SIDE_COLUMN_ROW = "WHITE NORMAL BLACK"
STATUS_ACTIVE = "CYAN NORMAL BLACK"
STATUS_COMPLETE = "GREEN NORMAL BLACK"
STATUS_ERROR = "RED BOLD BLACK"
STATUS_PAUSED = "YELLOW NORMAL BLACK"
STATUS_WAITING = "WHITE BOLD BLACK"
"""
config_dict = {}
config_dict["DEFAULT"] = toml.loads(default_config)
# Check for configuration file
config_file_path = Path(user_config_dir("aria2p")) / "config.toml"
if config_file_path.exists():
try:
config_dict["USER"] = toml.load(config_file_path)
except Exception as error: # noqa: W0703 (too broad exception)
logger.error(f"Failed to load configuration file: {error}")
else:
# Write initial configuration file if it does not exist
config_file_path.parent.mkdir(parents=True, exist_ok=True)
with config_file_path.open("w") as fd:
fd.write(textwrap.dedent(default_config).lstrip("\n"))
return config_dict
| 13,113
|
def test_angle_wrapping():
"""
ensure angle wrapping works properly
"""
data = survey.get_scale_height_data(track = 'CrF', wrap_at_180 = True)
data2 = survey.get_scale_height_data(track = 'CrF', wrap_at_180 = False)
assert np.allclose(data["INTEN"], data2["INTEN"], equal_nan = True)
| 13,114
|
def type_weapon(stage, bin, data=None):
"""Weapon"""
if data == None:
return 1
if stage == 1:
return (str(data),'')
try:
v = int(data)
if 0 > v or v > 255:
raise
except:
raise PyMSError('Parameter',"Invalid Weapon value '%s', it must be 1 for ground attack or not 1 for air attack." % data)
return v
| 13,115
|
def show_home_menu_edit_msg(update, context):
"""
Edit current message with the home menu
"""
keyboard = get_home_menu_buttons()
text = 'I am your Terra Node Bot. 🤖\nClick *MY NODES* to get information about the Terra Nodes you monitor!'
query = update.callback_query
query.edit_message_text(text,
reply_markup=ReplyKeyboardMarkup(keyboard, resize_keyboard=True),
parse_mode='markdown')
| 13,116
|
def to_square_feet(square_metres):
"""Convert metres^2 to ft^2"""
return square_metres * 10.7639
| 13,117
|
def middle_name_handler(update: Update, context: CallbackContext) -> str:
"""Get and save patronymic of user. Send hello with full name."""
u = User.get_user(update, context)
name = (f'{context.user_data[LAST_NAME]} {context.user_data[FIRST_NAME]} '
f'{context.user_data[MIDDLE_NAME]}')
context.bot.send_message(
chat_id=u.user_id,
text=static_text.HELLO_FULL_NAME.format(name=name)
)
update.message.reply_text(
text=static_text.ASK_GENDER,
parse_mode=ParseMode.HTML,
reply_markup=keyboard_utils.get_keyboard_for_gender()
)
return GENDER
| 13,118
|
def import_dynamic_data(database_path, dynamic_data):
"""
Import dynamic . Currently designed for use with parsed dicitonary of dynamic data with station number and timestamp as key
:return:
"""
#Connect to database
conn = lite.connect(database_path)
with conn:
cur = conn.cursor()
for station_no in range(1,103):
if station_no != 50:
try:
station_index = dynamic_data[station_no]
# print("loop1", station_index)
#print('\n')
# print(station_no)
cur.execute("INSERT OR IGNORE INTO Dynamic_Data(Station_number, Timestamp, Last_update, Weekday, Status, Bike_stands, Available_bike_stands, Available_bikes) VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
(station_no, station_index['time_stamp'], station_index['last_update'],station_index['week_day'], station_index['status'], station_index['bike_stands'],station_index['available_bike_stands'],
station_index['available_bikes']))
#cur.execute("INSERT OR IGNORE INTO Dynamic_Data(Station_number) VALUES(?)", (station_no))
except:
print("The dataset is missing station number " + str(station_no))
conn.commit() #Redundant here
cur.close()
conn.close()
| 13,119
|
def load_enzyme_reaction_relation():
"""
Gets all reaction and respective catalyzing enzymes and creates populates the table enzyme_reaction_organism for
E. coli.
Returns:
None
"""
data_df = pd.read_csv(ENZYME_GENES_DATA_FILE, sep=',')
organism = Organism.query.filter_by(name='E. coli').first()
id = 1
for row in data_df.index:
if data_df.loc[row, 'isoenzyme'] != 'SUCOASa' and data_df.loc[row, 'isoenzyme'] != 'SUCOASb':
enzyme = Enzyme.query.filter_by(isoenzyme=data_df.loc[row, 'isoenzyme']).first()
reaction = Reaction.query.filter_by(acronym=data_df.loc[row, 'reaction_acronym']).first()
enzyme_reaction_organism = EnzymeReactionOrganism(id=id,
enzyme_id=enzyme.id,
reaction_id=reaction.id,
organism_id=organism.id)
db.session.add(enzyme_reaction_organism)
id += 1
db.session.commit()
| 13,120
|
def test_skipgram():
""" Test skip-gram with naiveSoftmaxLossAndGradient """
dataset, dummy_vectors, dummy_tokens = getDummyObjects()
print("==== Gradient check for skip-gram with naiveSoftmaxLossAndGradient ====")
gradcheck_naive(lambda vec: word2vec_sgd_wrapper(
skipgram, dummy_tokens, vec, dataset, 5, naiveSoftmaxLossAndGradient),
dummy_vectors, "naiveSoftmaxLossAndGradient Gradient")
grad_tests_softmax(skipgram, dummy_tokens, dummy_vectors, dataset)
print("==== Gradient check for skip-gram with negSamplingLossAndGradient ====")
gradcheck_naive(lambda vec: word2vec_sgd_wrapper(
skipgram, dummy_tokens, vec, dataset, 5, negSamplingLossAndGradient),
dummy_vectors, "negSamplingLossAndGradient Gradient")
grad_tests_negsamp(skipgram, dummy_tokens, dummy_vectors, dataset, negSamplingLossAndGradient)
| 13,121
|
def submit_barcodes(barcodes):
"""
Submits a set of {release1: barcode1, release2:barcode2}
Must call auth(user, pass) first
"""
query = mbxml.make_barcode_request(barcodes)
return _do_mb_post("release", query)
| 13,122
|
def get_idf_dict(arr, tokenizer, nthreads=4):
"""
Returns mapping from word piece index to its inverse document frequency.
Args:
- :param: `arr` (list of str) : sentences to process.
- :param: `tokenizer` : a BERT tokenizer corresponds to `model`.
- :param: `nthreads` (int) : number of CPU threads to use
"""
idf_count = Counter()
num_docs = len(arr)
process_partial = partial(process, tokenizer=tokenizer)
with Pool(nthreads) as p:
idf_count.update(chain.from_iterable(p.map(process_partial, arr)))
idf_dict = defaultdict(lambda: log((num_docs + 1) / (1)))
idf_dict.update({idx: log((num_docs + 1) / (c + 1)) for (idx, c) in idf_count.items()})
return idf_dict
| 13,123
|
def load_id_json_file(json_path):
"""
load the JSON file and get the data inside
all this function does is to call json.load(f)
inside a with statement
Args:
json_path (str): where the target JSON file is
Return:
ID list (list): all the data found in the file
"""
with open(json_path, 'r') as f:
return json.load(f)
| 13,124
|
def check_github_scopes(exc: ResponseError) -> str:
"""
Parse github3 ResponseError headers for the correct scopes and return a
warning if the user is missing.
@param exc: The exception to process
@returns: The formatted exception string
"""
user_warning = ""
has_wrong_status_code = exc.response.status_code not in (403, 404)
if has_wrong_status_code:
return user_warning
token_scopes = get_oauth_scopes(exc.response)
# Gist resource won't return X-Accepted-OAuth-Scopes for some reason, so this
# string might be `None`; we discard the empty string if so.
accepted_scopes = exc.response.headers.get("X-Accepted-OAuth-Scopes") or ""
accepted_scopes = set(accepted_scopes.split(", "))
accepted_scopes.discard("")
request_url = urlparse(exc.response.url)
if not accepted_scopes and request_url.path == "/gists":
accepted_scopes = {"gist"}
missing_scopes = accepted_scopes.difference(token_scopes)
if missing_scopes:
user_warning = f"Your token may be missing the following scopes: {', '.join(missing_scopes)}\n"
# This assumes we're not on enterprise and 'api.github.com' == request_url.hostname
user_warning += (
"Visit Settings > Developer settings > Personal access tokens to add them."
)
return user_warning
| 13,125
|
def _add_threat_intel_subparser(subparsers):
"""Add Threat Intel subparser: manage.py threat-intel [subcommand]"""
usage = 'manage.py threat-intel [subcommand]'
description = """
StreamAlertCLI v{}
Enable, configure StreamAlert Threat Intelligence feature.
Available Subcommands:
manage.py threat-intel enable Enable the Threat Intelligence feature in Rule Processor
Optional Arguments:
--dynamodb-table The DynamoDB table name which stores IOC(s).
Examples:
manage.py threat-intel enable
manage.py threat-intel enable --dynamodb-table my_ioc_table
""".format(version)
threat_intel_parser = _generate_subparser(subparsers, 'threat-intel', usage, description)
threat_intel_parser.add_argument(
'subcommand', choices=['enable'], help=ARGPARSE_SUPPRESS
)
threat_intel_parser.add_argument(
'--dynamodb-table',
help=ARGPARSE_SUPPRESS
)
| 13,126
|
def user_login():
"""
# 显示页面的设置
:return: 接收前端的session信息来显示不同的页面
"""
# 获取参数
name = session.get("name")
if name is not None:
return jsonify(errno=RET.OK, errmsg="True", data={"name": name})
else:
return jsonify(errno=RET.SESSIONERR, errmsg="用户未登入")
| 13,127
|
def sample_conditional(node: gtsam.GaussianConditional, N: int, parents: list = [], sample: dict = {}):
"""Sample from conditional """
# every node ~ exp(0.5*|R x + S p - d|^2)
# calculate mean as inv(R)*(d - S p)
d = node.d()
n = len(d)
rhs = d.reshape(n, 1)
if len(parents) > 0:
rhs = rhs - node.S() @ np.vstack([sample[p] for p in parents])
# sample from conditional Gaussian
invR = np.linalg.inv(node.R())
return invR @ (rhs + np.random.normal(size=(n, N)))
| 13,128
|
def _liftover_data_path(data_type: str, version: str) -> str:
"""
Paths to liftover gnomAD Table.
:param data_type: One of `exomes` or `genomes`
:param version: One of the release versions of gnomAD on GRCh37
:return: Path to chosen Table
"""
return f"gs://gnomad-public-requester-pays/release/{version}/liftover_grch38/ht/{data_type}/gnomad.{data_type}.r{version}.sites.liftover_grch38.ht"
| 13,129
|
def valueinfo_to_tensor(vi):
"""Creates an all-zeroes numpy tensor from a ValueInfoProto."""
dims = [x.dim_value for x in vi.type.tensor_type.shape.dim]
return np.zeros(
dims, dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[vi.type.tensor_type.elem_type]
)
| 13,130
|
def signup_email():
"""Create a new account using data encoded in the POST body.
Expects the following form data:
first_name: E.g. 'Taylor'
last_name: E.g. 'Swift'
email: E.g. 'tswift@gmail.com'
password: E.g. 'iknewyouweretrouble'
Responds with the session cookie via the `set-cookie` header on success.
Send the associated cookie for all subsequent API requests that accept
user authentication.
"""
# Prevent a CSRF attack from replacing a logged-in user's account with
# a new account with known credentials
current_user = view_helpers.get_current_user()
if current_user:
return api_util.jsonify({'message': 'A user is already logged in.'})
params = flask.request.form.copy()
# Don't log the password
password = params.pop('password', None)
rmclogger.log_event(
rmclogger.LOG_CATEGORY_API,
rmclogger.LOG_EVENT_SIGNUP, {
'params': params,
'type': rmclogger.LOGIN_TYPE_STRING_EMAIL,
},
)
first_name = params.get('first_name')
last_name = params.get('last_name')
email = params.get('email')
if not first_name:
raise api_util.ApiBadRequestError('Must provide first name.')
if not last_name:
raise api_util.ApiBadRequestError('Must provide last name.')
if not email:
raise api_util.ApiBadRequestError('Must provide email.')
if not password:
raise api_util.ApiBadRequestError('Must provide password.')
try:
user = m.User.create_new_user_from_email(
first_name, last_name, email, password)
except m.User.UserCreationError as e:
raise api_util.ApiBadRequestError(e.message)
view_helpers.login_as_user(user)
return api_util.jsonify({
'message': 'Created and logged in user %s' % user.name
})
| 13,131
|
def calculate_multi_rmse(regressor, n_task):
"""
Method which calculate root mean squared error value for trained model
Using regressor attributes
Return RMSE metrics as dict for train and test datasets
:param regressor: trained regression model object
:param n_task:
:type regressor: TrainedModel, TrainedModelDNN, TrainedModelCV
:return: rmse metrics
:rtype: dict
"""
# calculate mse metric
test_mse_tmp = mean_squared_error(
regressor.y_test.values[:, n_task],
regressor.predict_classes['test'][:, n_task]
)
train_mse_tmp = mean_squared_error(
regressor.y_train.values[:, n_task],
regressor.predict_classes['train'][:, n_task]
)
# convert mse to rmse
return {
(str(n_task), 'train', 'RMSE'): train_mse_tmp ** 0.5,
(str(n_task), 'test', 'RMSE'): test_mse_tmp ** 0.5,
}
| 13,132
|
def detr_predict(model, image, thresh=0.95):
"""
Function used to preprocess the image, feed it into the detr model, and prepare the output draw bounding boxes.
Outputs are thresholded.
Related functions: detr_load, draw_boxes in coco.py
Args:
model -- the detr model from detr_load()
image -- Array the original image from openCV [width, height, channels]
Returns:
boxes -- Torch tensor of coordinates of the top left and bottom right of the bounding box ordered as [(x1, y1, x2, y2)]
labels -- Torch tensor of index labels for each bounding box [<label indices>]
scores -- Torch tensor of class confidence scores for each bounding box [<class scores>]. For COCO, expects 91 different classes
"""
def box_cxcywh_to_xyxy(x):
# Converts bounding boxes to (x1, y1, x2, y2) coordinates of top left and bottom right corners
# (center_x, center_y, h, w)
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(out_bbox, size):
# Scale the bounding boxes to the image size
img_w, img_h = size
b = box_cxcywh_to_xyxy(out_bbox)
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
return b
# Preprocess image
transform = T.Compose([
T.ToPILImage(),
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
t_image = transform(image).unsqueeze(0)
# output is a dict containing "pred_logits" of [batch_size x num_queries x (num_classes + 1)]
# and "pred_boxes" of shape (center_x, center_y, height, width) normalized to be between [0, 1]
output = model(t_image)
# Scale the class probabilities to add up to 1
probas = output['pred_logits'].softmax(-1)[0,:,:-1]
# Create outputs
boxes = rescale_bboxes(output['pred_boxes'][0], (image.shape[1], image.shape[0])).detach()
labels = probas.max(-1).indices
conf = probas.max(-1).values.detach()
### Threshold scores
conf = conf.detach()
keep = conf > thresh
# Filter out scores, boxes, and labels using threshold
conf = conf[keep]
boxes = boxes.detach()[keep]
labels = labels.detach()[keep]
return boxes, labels, conf
| 13,133
|
def extract_colors_from_static_themes(ids, **kw):
"""Extract and store colors from existing static themes."""
log.info('Extracting static themes colors %d-%d [%d].', ids[0], ids[-1],
len(ids))
addons = Addon.objects.filter(id__in=ids)
extracted = []
for addon in addons:
first_preview = addon.current_previews.first()
if first_preview and not first_preview.colors:
colors = extract_colors_from_image(first_preview.thumbnail_path)
addon.current_previews.update(colors=colors)
extracted.append(addon.pk)
if extracted:
index_addons.delay(extracted)
| 13,134
|
def docker() -> DockerAPI:
"""Mock DockerAPI."""
images = [MagicMock(tags=["ghcr.io/home-assistant/amd64-hassio-supervisor:latest"])]
with patch("docker.DockerClient", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.images", return_value=MagicMock()
), patch("supervisor.docker.DockerAPI.containers", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.api", return_value=MagicMock()
), patch(
"supervisor.docker.DockerAPI.images.list", return_value=images
), patch(
"supervisor.docker.DockerAPI.info",
return_value=MagicMock(),
), patch(
"supervisor.docker.DockerConfig",
return_value=MagicMock(),
):
docker_obj = DockerAPI()
docker_obj.info.logging = "journald"
docker_obj.info.storage = "overlay2"
docker_obj.info.version = "1.0.0"
docker_obj.config.registries = {}
yield docker_obj
| 13,135
|
def comp_fill_factor(self):
"""Compute the fill factor of the winding"""
if self.winding is None:
return 0
else:
(Nrad, Ntan) = self.winding.get_dim_wind()
S_slot_wind = self.slot.comp_surface_wind()
S_wind_act = (
self.winding.conductor.comp_surface_active()
* self.winding.Ntcoil
* Nrad
* Ntan
)
return S_wind_act / S_slot_wind
| 13,136
|
def idewpt(vp):
"""
Calculate the dew point given the vapor pressure
Args:
vp - array of vapor pressure values in [Pa]
Returns:
dewpt - array same size as vp of the calculated
dew point temperature [C] (see Dingman 2002).
"""
# ensure that vp is a numpy array
vp = np.array(vp)
# take the log and convert to kPa
vp = np.log(vp/float(1000))
# calculate the vapor pressure
Td = (vp + 0.4926) / (0.0708 - 0.00421*vp)
return Td
| 13,137
|
def _hexify(num):
"""
Converts and formats to hexadecimal
"""
num = "%x" % num
if len(num) % 2:
num = '0'+num
return num.decode('hex')
| 13,138
|
def syn_ucbpe(num_workers, gp, acq_optimiser, anc_data):
""" Returns a recommendation via UCB-PE in the synchronous setting. """
# Define some internal functions.
beta_th = _get_ucb_beta_th(gp.input_dim, anc_data.t)
# 1. An LCB for the function
def _ucbpe_lcb(x):
""" An LCB for GP-UCB-PE. """
mu, sigma = gp.eval(x, uncert_form='std')
return mu - beta_th * sigma
# 2. A modified UCB for the function using hallucinated observations
def _ucbpe_2ucb(x):
""" An LCB for GP-UCB-PE. """
mu, sigma = gp.eval(x, uncert_form='std')
return mu + 2 * beta_th * sigma
# 3. UCB-PE acquisition for the 2nd point in the batch and so on.
def _ucbpe_acq(x, yt_dot, halluc_pts):
""" Acquisition for GP-UCB-PE. """
_, halluc_stds = gp.eval_with_hallucinated_observations(x, halluc_pts,
uncert_form='std')
return (_ucbpe_2ucb(x) > yt_dot).astype(np.double) * halluc_stds
# Now the algorithm
yt_dot_arg = _optimise_acquisition(_ucbpe_lcb, acq_optimiser, anc_data)
yt_dot = _ucbpe_lcb(yt_dot_arg.reshape((-1, gp.input_dim)))
recommendations = [asy_ucb(gp, acq_optimiser, anc_data)]
for _ in range(1, num_workers):
curr_acq = lambda x: _ucbpe_acq(x, yt_dot, np.array(recommendations))
new_rec = _optimise_acquisition(curr_acq, acq_optimiser, anc_data)
recommendations.append(new_rec)
return recommendations
| 13,139
|
def get_reward(intervention, state, time):
"""Compute the reward based on the observed state and choosen intervention."""
A_1, A_2, A_3 = 60, 500, 60
C_1, C_2, C_3, C_4 = 25, 20, 30, 40
discount = 4.0 / 365
cost = (
A_1 * state.asymptomatic_humans
+ A_2 * state.symptomatic_humans
+ A_3 * state.mosquito_population
)
cost += 0.5 * (
C_1 * intervention.updates["treated_bednet_use"] ** 2
+ C_2 * intervention.updates["condom_use"] ** 2
+ C_3 * intervention.updates["treatment_of_infected"] ** 2
+ C_4 * intervention.updates["indoor_spray_use"] ** 2
)
return -cost * np.exp(-discount * time)
| 13,140
|
def array_of_floats(f):
"""Read an entire file of text as a list of floating-point numbers."""
words = f.read().split()
return [builtin_float(x) for x in words]
| 13,141
|
def add_to_command_file(properties_path, command):
"""
This method writes the commands to a text file in the output folder
"""
commands_path = os.path.dirname(properties_path) + '/commands_' + timestamp + '.txt'
with open(commands_path, 'a+') as commands:
commands.write(command + '\n')
| 13,142
|
def change_currency():
""" Change user's currency """
form = CurrencyForm()
if form.validate_on_submit():
currency = form.rate.data
redirected = redirect(url_for('cashtrack.overview'))
redirected.set_cookie('filter', currency)
symbol = rates[currency]['symbol']
flash(f'Currency has been changed to {currency} ({symbol})', 'success')
return redirected
return rnd_tmp('currency.html', form=form, rates=rates)
| 13,143
|
def get_host_checks():
"""
Returns lxc configuration checks.
"""
import lxc
out = subprocess.check_output('lxc-checkconfig', shell=True)
response = []
if out:
for line in out.splitlines():
response.append(line.decode('utf-8'))
info = {
'checks': response,
}
return jsonify(info)
| 13,144
|
def remplissage_contenu(engine, article) -> None:
"""
Permet de remplir le texte des articles la base de données.
:param article : instance de la classe Surlignage.
:param engine : instance de connexion à la base de donnée.
"""
pbar = tqdm(range(len(article.url_surlignage)), colour='green', desc='Remplissage contenu')
with Session(bind=engine) as session:
for element in range(len(article.url_surlignage)):
insert_contenu(session, element, article)
pbar.update(1)
pbar.refresh()
| 13,145
|
def test_4():
"""
Detect the date columns in naukri_com.csv
Example date present - '2019-07-06 09:20:22 +0000'
"""
table = pandas.read_csv('data_for_tests/naukri_com.csv')
result = date_detection.detect(table)
print(result)
expected_result = '''{'Crawl Timestamp': {'type': <ColumnTypes.CONSINTENT: 1>, 'day_first': False}}'''
assert(expected_result == str(result))
| 13,146
|
def q2_1(df: pd.DataFrame) -> int:
"""
Finds # of entries in df
"""
return df.size[0]
| 13,147
|
def is_shell(command: str) -> bool:
"""Check if command is shell."""
return command.startswith(get_shell())
| 13,148
|
def parse_args():
"""set and check parameters."""
parser = argparse.ArgumentParser(description="bert process")
parser.add_argument("--pipeline_path", type=str, default="./config/fat_deepffm.pipeline", help="SDK infer pipeline")
parser.add_argument("--data_dir", type=str, default="../data/input/Criteo_bin/",
help="Dataset contain batch_spare batch_label batch_dense")
args_opt = parser.parse_args()
return args_opt
| 13,149
|
def board_init():
"""
Initializes board with all available values 1-9 for each cell
"""
board = [[[i for i in range(1,n+1)] for j in range(n)] for k in range(n)]
return board
| 13,150
|
def gen_int_lists(num):
"""
Generate num list strategies of integers
"""
return [
s.lists(s.integers(), max_size=100)
for _ in range(num)
]
| 13,151
|
def is_blob(bucket: str, file:str):
""" checking if it's a blob """
client = storage.Client()
blob = client.get_bucket(bucket).get_blob(file)
return hasattr(blob, 'exists') and callable(getattr(blob, 'exists'))
| 13,152
|
def _validate_train_test_split(X_train, X_test, y_train, y_test):
"""Check that data shapes are consistent with proper split."""
assert X_train.shape[0] == y_train.shape[0]
assert X_test.shape[0] == y_test.shape[0]
if X_train.ndim > 1:
assert X_train.shape[1] == X_test.shape[1]
if y_train.ndim > 1:
assert y_train.shape[1] == y_test.shape[1]
| 13,153
|
def rotate(q, p):
"""
Rotation of vectors in p by quaternions in q
Format: The last dimension contains the quaternion components which
are ordered as (i,j,k,w), i.e. real component last.
The other dimensions follow the default broadcasting rules.
"""
iw = 3
ii = 0
ij = 1
ik = 2
qi = q[...,ii]
qj = q[...,ij]
qk = q[...,ik]
qw = q[...,iw]
pi = p[...,ii]
pj = p[...,ij]
pk = p[...,ik]
# FIXME: This part does not export to the onnx model, i.e. the shape
# of the tensors will be hardcoded according to the input during
# the export. Not a problem though.
shape = tuple(np.maximum(pi.shape, qi.shape))
tmp = q.new_empty(shape + (4,))
out = q.new_empty(shape + (3,))
# Compute tmp = q*p, identifying p with a purly imaginary quaternion.
tmp[...,iw] = - qi*pi - qj*pj - qk*pk
tmp[...,ii] = qw*pi + qj*pk - qk*pj
tmp[...,ij] = qw*pj - qi*pk + qk*pi
tmp[...,ik] = qw*pk + qi*pj - qj*pi
# Compute tmp*q^-1.
out[...,ii] = -tmp[...,iw]*qi + tmp[...,ii]*qw - tmp[...,ij]*qk + tmp[...,ik]*qj
out[...,ij] = -tmp[...,iw]*qj + tmp[...,ii]*qk + tmp[...,ij]*qw - tmp[...,ik]*qi
out[...,ik] = -tmp[...,iw]*qk - tmp[...,ii]*qj + tmp[...,ij]*qi + tmp[...,ik]*qw
return out
| 13,154
|
def _is_whitelisted(name: str, doc_obj: Union['Module', 'Class']):
"""
Returns `True` if `name` (relative or absolute refname) is
contained in some module's __pdoc__ with a truish value.
"""
refname = doc_obj.refname + '.' + name
module = doc_obj.module
while module:
qualname = refname[len(module.refname) + 1:]
if module.__pdoc__.get(qualname) or module.__pdoc__.get(refname):
return True
module = module.supermodule
return False
| 13,155
|
def permute_bond_indices(atomtype_vector):
"""
Permutes the set of bond indices of a molecule according to the complete set of valid molecular permutation cycles
atomtype_vector: array-like
A vector of the number of each atoms, the length is the total number of atoms.
An A3B8C system would be [3, 8, 1]
Returns many sets permuted bond indices, the number of which equal to the number of cycles
"""
natoms = sum(atomtype_vector)
bond_indices = generate_bond_indices(natoms)
cycles_by_atom = molecular_cycles(atomtype_vector)
bond_indice_permutations = [] # interatomic distance matrix permutations
for atom in cycles_by_atom:
for cycle in atom:
tmp_bond_indices = copy.deepcopy(bond_indices) # need a deep copy, list of lists
for subcycle in cycle:
for i, bond in enumerate(tmp_bond_indices):
tmp_bond_indices[i] = permute_bond(bond, subcycle)
bond_indice_permutations.append(tmp_bond_indices)
return bond_indice_permutations
| 13,156
|
def get_cell_integer_param(device_resources,
cell_data,
name,
force_format=None):
"""
Retrieves definition and decodes value of an integer cell parameter. The
function can optionally force a specific encoding format if needed.
"""
# Get the parameter definition to determine its type
param = device_resources.get_parameter_definition(cell_data.cell_type,
name)
# Force the format if requested by substituting the paraameter
# definition object.
if not param.is_integer_like() and force_format is not None:
if force_format != param.string_format:
param = ParameterDefinition(
name=name,
string_format=force_format,
default_value=cell_data.attributes[name])
# Decode
return param.decode_integer(cell_data.attributes[name])
| 13,157
|
def remove(config, device_ids: tuple):
""" Removes USB devices from the config """
uev = config.uev
uev.remove_devices(device_ids=device_ids)
| 13,158
|
def get_md5(filename):
""" Calculates the MD5 sum of the passed file
Args:
filename (str): File to hash
Returns:
str: MD5 hash of file
"""
import hashlib
# Size of buffer in bytes
BUF_SIZE = 65536
md5 = hashlib.md5()
# Read the file in 64 kB blocks
with open(filename, "rb") as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
md5.update(data)
return md5.hexdigest()
| 13,159
|
def get_py_path(pem_path):
"""Returns the .py filepath used to generate the given .pem path, which may
or may not exist.
Some test files (notably those in verify_certificate_chain_unittest/ have a
"generate-XXX.py" script that builds the "XXX.pem" file. Build the path to
the corresponding "generate-XXX.py" (which may or may not exist)."""
file_name = os.path.basename(pem_path)
file_name_no_extension = os.path.splitext(file_name)[0]
py_file_name = 'generate-' + file_name_no_extension + '.py'
return os.path.join(os.path.dirname(pem_path), py_file_name)
| 13,160
|
def is_recurrent(sequence):
"""
Returns true if the given sequence is recurrent (elements can exist more than once), otherwise returns false.
Example
---------
>>> sequence = [1,2,3,4,5]
>>> ps.is_recurrent(sequence)
False
>>> sequence = [1,1,2,2,3]
>>> ps.is_recurrent(sequence)
True
"""
element_counts = get_element_counts(sequence)
truths = [count > 1 for element, count in element_counts.items()]
if True in truths:
return True
return False
| 13,161
|
def flex_stack(items, dim=0):
"""
"""
if len(items) < 1:
raise ValueError("items is empty")
if len(set([type(item) for item in items])) != 1:
raise TypeError("items are not of the same type")
if isinstance(items[0], list):
return items
elif isinstance(items[0], torch.Tensor):
return torch.stack(items, dim=0)
elif isinstance(items[0], np.ndarray):
return np.stack(items, axis=0)
else:
raise TypeError(f"Unrecognized type f{type(items[0])}")
| 13,162
|
def download_image_data(gpx_file,
padding,
square,
min_lat,
min_long,
max_lat,
max_long,
cache_dir):
"""
Download satellite imagery from USGS
Args:
gpx_file: (str) A file containing one of more tracks to use to determine the area of terrain to model
padding: (float) Padding to add around the GPX track, in miles
min_lat (float) Southern boundary of the region to model
min_long (float) Eastern boundary of the region to model
max_lat (float) Northern boundary of the region to model
max_long (float) Western boundary of the region to model
cache_dir (str) Directory to download the files to
"""
log = GetLogger()
# Determine the bounds of the output
if gpx_file:
log.info("Parsing GPX file")
gpx = GPXFile(gpx_file)
try:
min_lat, min_long, max_lat, max_long = gpx.GetBounds(padding, square)
except ApplicationError as ex:
log.error(ex)
return False
if None in (min_lat, min_long, max_lat, max_long):
raise InvalidArgumentError("You must specify an area to download")
log.info(f"Requested boundaries top(max_lat)={max_lat} left(min_long)={min_long} bottom(min_lat)={min_lat} right(max_long)={max_long}")
# Get the image data
cache_dir = Path(cache_dir)
image_filename = Path(get_cropped_image_filename(max_lat, min_long, min_lat, max_long))
try:
get_image_data(image_filename, min_lat, min_long, max_lat, max_long, cache_dir)
except ApplicationError as ex:
log.error(ex)
return False
log.passed("Successfully downloaded images")
return True
| 13,163
|
def find_nearest_values(array, value):
"""Find indexes of the two nearest values of an array to a given value
Parameters
----------
array (numpy.ndarray) : array
value (float) : value
Returns
-------
idx1 (int) : index of nearest value in the array
idx2 (int) : index of second nearest value in the array
"""
# index of nearest value in the array
idx1 = (np.abs(array-value)).argmin()
# check if value is bigger or smaller than nearest value
if array[idx1] >= value:
idx2 = idx1 - 1
else:
idx2 = idx1 + 1
return idx1, idx2
| 13,164
|
def channel_info(channel_id):
"""
Get Slack channel info
"""
channel_info = slack_client.api_call("channels.info", channel=channel_id)
if channel_info:
return channel_info['channel']
return None
| 13,165
|
def uninstall_trap(dut, feature_name, trap_id):
"""
Uninstall trap by disabling feature and set always_enable to false
Args:
dut (SonicHost): The target device
feature_name (str): feature name corresponding to the trap
trap_id (str): trap id
"""
disable_feature_entry(dut, feature_name)
configure_always_enabled_for_trap(dut, trap_id, "false")
| 13,166
|
def gen_pdf(outfile, asset_ids, width, prefix):
"""Generate a PDF with sheets of QR codes.
Args:
outfile: absolute filepath to write the PDF to.
asset_ids: list of lists of asset IDs. Each sublist forms a single page, and
should contain exactly enough codes to fill the page.
width: width to pad the asset ID to.
prefix: string prefix to prepend to the asset ID in the QR code. Not
included in the printed ID under the QR code.
"""
pdf = fpdf.FPDF()
with tempfile.TemporaryDirectory() as tmpdir:
for page_ids in asset_ids:
pdf.add_page()
for idx, asset_id in enumerate(page_ids):
add_qrcode_to_pdf(pdf, asset_id, width, prefix, tmpdir, idx)
pdf.output(outfile, 'F')
| 13,167
|
def log(message: str) -> Callable:
"""Returns a decorator to log info a message before function call.
Parameters
----------
message : str
message to log before function call
"""
def decorator(function: Callable) -> Callable:
@wraps(function)
def wrapper(*args: Any, **kwargs: Any) -> None:
logging.info(message)
return function(*args, **kwargs)
return wrapper
return decorator
| 13,168
|
def client(identity: PrivateIdentity) -> Client:
"""Client for easy access to iov42 platform."""
return Client(PLATFORM_URL, identity)
| 13,169
|
def init_weights(module, init='orthogonal'):
"""Initialize all the weights and biases of a model.
:param module: any nn.Module or nn.Sequential
:param init: type of initialize, see dict below.
:returns: same module with initialized weights
:rtype: type(module)
"""
if init is None: # Base case, no change to default.
return module
init_dict = {
'xavier_uniform': nn.init.xavier_uniform_,
'xavier_normal': nn.init.xavier_normal_,
'orthogonal': nn.init.orthogonal_,
'kaiming_normal': nn.init.kaiming_normal_,
'kaiming_uniform': nn.init.kaiming_uniform_,
}
for m in module.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
# print("initializing {} with {} init.".format(m, init))
init_dict[init](m.weight)
if hasattr(m, 'bias') and m.bias is not None:
# print("initial bias from ", m, " with zeros")
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, (nn.Sequential, nn.ModuleList, nn.ModuleDict)):
for mod in m:
init_weights(mod, init)
return module
| 13,170
|
def run_security_tests(*, image: str):
"""Run the security tests"""
temp_dir = CWD
if os.environ.get("GITHUB_ACTIONS") == "true":
if os.environ.get("RUNNER_TEMP"):
# Update the temp_dir if a temporary directory is indicated by the
# environment
temp_dir = Path(str(os.environ.get("RUNNER_TEMP"))).absolute()
else:
LOG.warning(
"Unable to determine the context due to inconsistent environment variables, falling back to %s",
str(temp_dir),
)
tag = image.split(":")[-1]
file_name = tag + ".tar"
image_file = temp_dir.joinpath(file_name)
raw_image = CLIENT.images.get(image).save(named=True)
with open(image_file, "wb") as file:
for chunk in raw_image:
file.write(chunk)
working_dir = "/tmp/"
volumes = {temp_dir: {"bind": working_dir, "mode": "ro"}}
num_tests_ran = 0
scanner = "aquasec/trivy:latest"
# Provide information about low priority vulnerabilities
command = (
"--quiet image --timeout 10m0s --exit-code 0 --severity "
+ ",".join(LOW_PRIORITY_VULNS)
+ " --format json --light --input "
+ working_dir
+ file_name
)
opinionated_docker_run(
image=scanner,
command=command,
working_dir=working_dir,
volumes=volumes,
)
num_tests_ran += 1
# Ensure no unacceptable vulnerabilities exist in the image
command = (
"--quiet image --timeout 10m0s --exit-code 1 --severity "
+ ",".join(UNACCEPTABLE_VULNS)
+ " --format json --light --input "
+ working_dir
+ file_name
)
opinionated_docker_run(
image=scanner,
command=command,
working_dir=working_dir,
volumes=volumes,
)
num_tests_ran += 1
# Cleanup the image file
image_file.unlink()
| 13,171
|
def run(puzzle_class, start_position=None, output_stream=sys.stdout,
settings=None):
"""Given a `Puzzle` subclass instance, find all solutions."""
if settings is None:
settings = Settings(start_position=start_position)
elif start_position:
settings.start_position = start_position
solve(puzzle_class, output_stream, settings)
| 13,172
|
def main(argv: Optional[Sequence[str]] = None) -> None:
"""Run VROOM command line interface."""
_main(sys.argv if argv is None else argv)
| 13,173
|
def getsoundchanges(reflex, root): # requires two ipastrings as input
"""
Takes a modern-day L1 word and its reconstructed form and returns \
a table of sound changes.
:param reflex: a modern-day L1-word
:type reflex: str
:param root: a reconstructed proto-L1 word
:type root: str
:return: table of sound changes
:rtype: pandas.core.frame.DataFrame
:Example:
>>> from loanpy import reconstructor as rc
>>> rc.getsoundchanges("ɟɒloɡ", "jɑlkɑ")
+---+--------+------+
| # | reflex | root |
+---+--------+------+
| 0 | #0 | 0 |
+---+--------+------+
| 1 | #ɟ | j |
+---+--------+------+
| 2 | ɒ | ɑ |
+---+--------+------+
| 3 | l | lk |
+---+--------+------+
| 4 | o | ɑ |
+---+--------+------+
| 5 | ɡ# | 0 |
+---+--------+------+
"""
reflex = ipa2clusters(reflex)
root = ipa2clusters(root)
reflex[0], reflex[-1] = "#" + reflex[0], reflex[-1] + "#"
reflex, root = ["#0"] + reflex, ["0"] + root
if reflex[1][1:] in vow and root[1] in cns:
root = root[1:]
elif reflex[1][1:] in cns and root[1] in vow:
reflex = reflex[1:]
diff = abs(len(root) - len(reflex)) # "a,b","c,d,e,f,g->"a,b,000","c,d,efg
if len(reflex) < len(root):
reflex += ["0#"]
root = root[:-diff] + ["".join(root[-diff:])]
elif len(reflex) > len(root):
root += ["0"]
reflex = reflex[:-diff] + ["".join(reflex[-diff:])]
else:
reflex, root = reflex + ["0#"], root + ["0"]
return pd.DataFrame({"reflex": reflex, "root": root})
| 13,174
|
def compute_normals(filename, datatype='cell'):
"""
Given a file, this method computes the surface normals of the mesh stored
in the file. It allows to compute the normals of the cells or of the points.
The normal computed in a point is the interpolation of the cell normals of
the cells adiacent to the point.
:param str filename: the name of the file to parse in order to extract
the geometry information.
:param str datatype: indicate if the normals have to be computed for the
points or the cells. The allowed values are: 'cell', 'point'. Default
value is 'cell'.
:return: the array that contains the normals.
:rtype: numpy.ndarray
"""
points, cells = FileHandler(filename).get_geometry(get_cells=True)
normals = np.array(
[normalize(normal(*points[cell][0:3])) for cell in cells])
if datatype == 'point':
normals_cell = np.empty((points.shape[0], 3))
for i_point in np.arange(points.shape[0]):
cell_adiacent = [cells.index(c) for c in cells if i_point in c]
normals_cell[i_point] = normalize(
np.mean(normals[cell_adiacent], axis=0))
normals = normals_cell
return normals
| 13,175
|
def distribution_of_feature_box_plot(dfData,
tplFigSize = (10,5),
intFontSize = 22,
strName = "Test",
boolSave = False):
"""
Parameters
#---------
dfData Pandas.DataFrame containing data
tplFigSize tuple telling the size of the figure for the plot.
intFontSize Integer for scaling size of font of text in the plot.
strName String giving the plot a name.
boolSave Boolean determin if the plots get saved or not.
Description
#----------
Uses pandas builtin function for computing boxplot
"""
"""
for j in dfData.columns:
lstTimeBoolDT = ["datetime64[ns]", "bool", "timedelta[ns]"]
if(dfData.dtypes[j].name in lstTimeBoolDT):
dfData.drop(labels = j, axis = 1)
"""
if isinstance(dfData, pd.Series):
box_plot(dfData, tplFigSize, intFontSize, strName, boolSave)
elif isinstance(dfData, pd.DataFrame):
for j in dfData.columns:
box_plot(dfData[j], tplFigSize, intFontSize, strName, boolSave)
else:
raise ValueError("No correct Dateformat given")
return
| 13,176
|
def bounce():
"""
The ball has VX as x velocity and 0 as y velocity. Each bounce reduces
y velocity to REDUCE of itself.
"""
vertical_velocity = 0
while ball.x <= window.width:
ball.move(VX, vertical_velocity)
vertical_velocity += GRAVITY
if ball.y >= window.height-ball.height and vertical_velocity > 0:
vertical_velocity *= -REDUCE
pause(DELAY)
| 13,177
|
def build_layers_url(
layers: List[str], *, size: Optional[LayerImageSize] = None
) -> str:
"""Convenience method to make the server-side-rendering URL of the provided layer URLs.
Parameters
-----------
layers: List[:class:`str`]
The image urls, in ascending order of Zone ID's
size: Optional[:class:`LayerImageSize`]
The desired size for the render. If one is not supplied, it defaults to `LayerImageSize.SIZE_600`.
"""
size_str = str(size or LayerImageSize.SIZE_600)[-3:]
joined = ",".join(quote(layer) for layer in layers)
return f"https://impress-2020.openneo.net/api/outfitImage?size={size_str}&layerUrls={joined}"
| 13,178
|
def SyncBatchNorm(*args, **kwargs):
"""In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead"""
if paddle.get_device() == 'cpu':
return nn.BatchNorm2D(*args, **kwargs)
else:
return nn.SyncBatchNorm(*args, **kwargs)
| 13,179
|
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_get(uuid, local_id): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_peak_burst_size_get
returns tapi.common.CapacityValue # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: TapiCommonCapacityValue
"""
return 'do some magic!'
| 13,180
|
def test_color():
"""
Text-buttons colors
"""
vq_button = vq.Button.text("foo")
| 13,181
|
def parse_path_length(path):
"""
parse path length
"""
matched_tmp = re.findall(r"(S\d+)", path)
return len(matched_tmp)
| 13,182
|
def check_permisions(request, allowed_groups):
""" Return permissions."""
try:
profile = request.user.id
print('User', profile, allowed_groups)
is_allowed = True
except Exception:
return False
else:
return is_allowed
| 13,183
|
def find_planets_around_stars(stars_df, campaign_no, max_planets=None,
provenance=["EVEREST", "K2SFF", "K2"],
log_file='summary_log.csv', interactive=False):
"""
Find one or more planet transits given input time, raw flux and corrected
flux series. Save the results to the log files and display plots of
light curves and folded light curves from TLS.
Parameters
----------
stars_df: str, pandas.DataFrame, list of str or int, tuple of str or int
String file name of a CSV file containing a column named `'EPIC'`.
The first line in the file must be column header and the second line
is ignored. Alternatively, ``stars_df`` can be a `~pandas.DataFrame`
containing a column named `'EPIC'`. Other options are a list or tuple
of string or integer EPIC IDs of the stars around which to search for
transits.
campaign_no: int, str
Campaign number or "sequence number". Can be a wildcard character `'*'`
to indicate that all campaigns should be searched.
max_planets: int
Maximum number of planets to find.
star_id: int, str
EPIC ID of the star.
provenance: str
Provenance name in MAST archive, e.g., ``'K2'``, ``'EVEREST'``,
``'K2SFF'``.
log_file: str
File name of the summary log file to which all planet search results
will be logged.
interactive: bool
Indicates whether or not to draw figures on screen. When ``interactive``
is `False`, figures are saved to files in the ``'Graphs/tls/'`` and
``'Graphs/lc/'`` sub-directories. Figure's file names will be
constructed using the following pattern:
``star_id + '_' + provenance + '_tls.log'``
``star_id + '_' + provenance + '_lc.log'``
"""
os.makedirs('logs/', exist_ok=True)
os.makedirs('Graphs', exist_ok=True)
os.makedirs('Graphs/tls/', exist_ok=True)
os.makedirs('Graphs/lc/', exist_ok=True)
sequence_name = str(campaign_no).strip()
if isinstance(stars_df, str):
stars_df = pd.read_csv(stars_df, header=[0], skiprows=[1])
if isinstance(stars_df, (list, tuple)):
stars_df = pd.DataFrame({'EPIC': stars_df})
elif not isinstance(stars_df, pd.DataFrame):
raise TypeError("'stars_df' must be either a Pandas DataFrame or a "
"string file name.")
if 'EPIC' not in stars_df.columns.values:
raise ValueError("Input list of stars does not have an 'EPIC' column.")
catalog = stars_df['EPIC'].unique().tolist()
if isinstance(provenance, str):
provenance = [provenance]
summary_log = create_summary_log(log_file)
plotno = 1
for star_id in catalog:
for prov in provenance:
try:
data_files = get_mast_file_list(
str(star_id), prov, sequence_name
)
# DEBUG:
# In order to bypass MAST and use already locally downloaded
# data files uncomment lines below and the 'glob' import
# at the beginning of the module:
# data_files = glob.glob(
# '/.../mastDownload/K2/*/*{:s}*{:s}*.fits'
# .format(prov, star_id)
# )
except Exception as e:
print("There was an issue retrieving the files for {} in "
"the {} data set.".format(star_id, prov))
print("Reported error: '{}'".format(e.args[0]))
continue
try:
time, raw_flux, cor_flux = get_lc(data_files[0], prov)
if time.size < 10:
print("WARNING: Too few data to find transit.")
continue
except ValueError:
continue
find_planets(
time, raw_flux, cor_flux, campaign_no=sequence_name,
star_id=star_id, provenance=prov, summary_log=summary_log,
max_planets=max_planets, plotno=plotno, interactive=interactive
)
summary_log.close()
| 13,184
|
def kanji2digit(s):
"""
1から99までの漢数字をアラビア数字に変換する
"""
k2d = lambda m, i: _kanjitable[m.group(i)]
s = _re_kanjijiu1.sub(lambda m: k2d(m,1) + k2d(m,2), s)
s = _re_kanjijiu2.sub(lambda m: u'1' + k2d(m,1), s)
s = _re_kanji.sub(lambda m: k2d(m,1), s)
s = s.replace(u'十', u'10')
return s
| 13,185
|
def type_right(wait=0):
"""
Press Right key
Args:
wait (int, optional): Wait interval (ms) after task
"""
kb.send('right')
delay = max(0, wait or config.DEFAULT_SLEEP_AFTER_ACTION)
sleep(delay)
| 13,186
|
def calc_rdm_segment(t, c, segment_id_beg, segment_id_end, segment_id, ph_index_beg, segment_ph_cnt, debug=0):
"""
Function to calculate radiometry (rdm)
Input:
t - time or delta_time of ATL03, for a given gt num
c - classification of ATL03 for a given gt num
ensure that no nans exist
segment_id_beg - segment_id_beg from ATL08
segment_id_end - segment_id_end from ATL08
segment_id - segment_id from ATL03 geolocation/
ph_index_beg - ph_index_beg from ATL03 geolocation/
segment_ph_cnt - segment_ph_cnt from ATL03 geolocation/
debug - val != 0 enables print statements if segments
do not match from 03 to 08 (see caveats)
Output:
n_shots_unique - total number of unique ttg per ATL08 100m bin
rdm_ground - rdm of ground photons (c==1)
rdm_veg - rdm of veg photons (c==2)
rdm_canopy - rdm of canopy photons (c==3)
Example:
n_shots_unique, rdm_ground, rdm_veg, rdm_canopy = \
calc_rdm(t, c, segment_id_beg, segment_id_end, ph_index_beg, segment_ph_cnt, debug=0)
Caveats:
Ensure that no nans exist in classification c
rdm_ground/veg/canopy and n_shots_unique are floating point
b/c it's possible to have no overlap in 03 and 08 data, in
which case the radiometry value is NaN; this is implemented by
initializing rdm vectors are NaN. Thus, they are floating-point-
valued.
This functions can handle when 03/08 do not totally overlap,
or when there is no overlap. That said, one should proceed with
caution knowing 03 and 08 do not overlap at all. NaN values are
initialized in rdm vectors based on these cases.
"""
if np.isnan(c).sum() > 0 and debug:
print('warning: NaN values found in c')
rdm_ground = np.full(segment_id_beg.shape, np.nan)
rdm_veg = np.full(segment_id_beg.shape, np.nan)
rdm_canopy = np.full(segment_id_beg.shape, np.nan)
n_shots_unique = np.full(segment_id_beg.shape, np.nan)
n_id = len(segment_id)
for s in range(len(segment_id_beg)):
# _, k0 = iu.getClosest(segment_id, [segment_id_beg[s]])
# _, k1 = iu.getClosest(segment_id, [segment_id_end[s]])
_, k0 = getClosest(segment_id, [segment_id_beg[s]])
_, k1 = getClosest(segment_id, [segment_id_end[s]])
k0, k1 = int(k0), int(k1)
warn = False
b_edge = False
if segment_id[k0] < segment_id_beg[s]:
# left side incomplete
# cm.pause('beg')
k = k0
while segment_id[k] < segment_id_beg[s]:
k += 1
if k >= n_id:
b_edge = True
break
elif segment_id[k0] > segment_id_beg[s]:
# print('warning: 03 seg id beg %d > 08 seg id beg %d' % (segment_id[k0], segment_id_beg[s]))
warn = True
# else:
# equal, totally fine
# if segment_id[k1] != segment_id_end[s]:
if segment_id[k1] > segment_id_end[s]:
# right side incomplete
# cm.pause('end')
k = k1
while segment_id[k] > segment_id_end[s]:
k -= 1
if k < 0:
b_edge = True
break
elif segment_id[k1] < segment_id_end[s]:
# print('warning: 03 seg id beg %d < 08 seg id beg %d' % (segment_id[k0], segment_id_beg[s]))
warn = True
# else:
# equal, totally fine
if b_edge and debug:
# 08 segment is entirely outside of 03 segment data
print('outside')
print('03: [%d, %d]' % (segment_id[k0], segment_id[k1]))
print('08: [%d, %d]' % (segment_id_beg[s], segment_id_end[s]))
# cm.pause()
input('enter to continue')
continue
if warn and debug:
print('partial')
print('03: [%d, %d]' % (segment_id[k0], segment_id[k1]))
print('08: [%d, %d]' % (segment_id_beg[s], segment_id_end[s]))
# cm.pause()
input('enter to continue')
i0, i1 = ph_index_beg[k0], ph_index_beg[k1] + segment_ph_cnt[k1] - 1
t_seg = t[i0:i1+1] # inclusive index
c_seg = c[i0:i1+1]
n_shots_total_uq = len(np.unique(t_seg))
n_shots_ground = (c_seg == 1).sum()
n_shots_veg = (c_seg == 2).sum()
n_shots_canopy = (c_seg == 3).sum()
n_shots_unique[s] = n_shots_total_uq
rdm_ground[s] = float(n_shots_ground / n_shots_total_uq)
rdm_veg[s] = float(n_shots_veg / n_shots_total_uq)
rdm_canopy[s] = float(n_shots_canopy / n_shots_total_uq)
return n_shots_unique, rdm_ground, rdm_veg, rdm_canopy
| 13,187
|
def load_featurizer(pretrained_local_path):
"""Load pretrained model."""
return CNN_tf("vgg", pretrained_local_path)
| 13,188
|
def create_zipfile(dir_to_zip, savepath=''):
"""Create a zip file from all the files under 'dir_to_zip'.
The output zip file will be saved to savepath.
If savepath ends with '.zip', then the output zip file will be
saved AS 'savepath'. Necessary tree subdirectories are created automatically.
Else, savepath is assumed to be a directory path,
hence the output zip file will be saved TO 'savepath'
directory. Necessary tree subdirectories are created automatically.
:return absolute savepath
"""
save_cwd = os.getcwd()
dir_to_zip = os.path.abspath(dir_to_zip)
if dir_to_zip in os.path.split(savepath)[0]: raise ValueError(
'To avoid recursion), resultant "savepath" should not be located inside "dir_to_zip"',
dict(dir_to_zip=dir_to_zip, savepath=savepath))
parent_dir, dir_name = os.path.split(dir_to_zip)
os.chdir(parent_dir)
if savepath:
if savepath.endswith('.zip'):
create_path(savepath, stop_depth=1)
else:
create_path(savepath, stop_depth=0)
savepath = os.path.join(savepath, dir_name + '.zip')
else:
savepath = dir_to_zip + '.zip'
pwd_length = len(os.getcwd())
with zipfile.ZipFile(savepath, "w", compression=zipfile.ZIP_DEFLATED) as zf:
for dirpath, dirnames, filenames in os.walk(dir_to_zip):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, path[pwd_length + 1:])
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, path[pwd_length + 1:])
os.chdir(save_cwd)
return os.path.abspath(savepath)
| 13,189
|
def make_cmdclass(basecmd):
"""Decorate setuptools commands."""
base_run = basecmd.run
def new_run(self):
from templateflow.conf import setup_home
setup_home()
base_run(self)
basecmd.run = new_run
return basecmd
| 13,190
|
def rmse(predictions, targets):
"""Compute root mean squared error"""
rmse = np.sqrt(((predictions - targets) ** 2).mean())
return rmse
| 13,191
|
def format_timedelta(value, time_format=None):
""" formats a datetime.timedelta with the given format.
Code copied from Django as explained in
http://stackoverflow.com/a/30339105/932593
"""
if time_format is None:
time_format = "{days} days, {hours2}:{minutes2}:{seconds2}"
if hasattr(value, 'seconds'):
seconds = value.seconds + value.days * 24 * 3600
else:
seconds = int(value)
seconds_total = seconds
minutes = int(math.floor(seconds / 60))
minutes_total = minutes
seconds -= minutes * 60
hours = int(math.floor(minutes / 60))
hours_total = hours
minutes -= hours * 60
days = int(math.floor(hours / 24))
days_total = days
hours -= days * 24
years = int(math.floor(days / 365))
years_total = years
days -= years * 365
return time_format.format(**{
'seconds': seconds,
'seconds2': str(seconds).zfill(2),
'minutes': minutes,
'minutes2': str(minutes).zfill(2),
'hours': hours,
'hours2': str(hours).zfill(2),
'days': days,
'years': years,
'seconds_total': seconds_total,
'minutes_total': minutes_total,
'hours_total': hours_total,
'days_total': days_total,
'years_total': years_total,
})
| 13,192
|
def categorical_log_likelihood(probs: chex.Array, labels: chex.Array):
"""Computes joint log likelihood based on probs and labels."""
num_data, unused_num_classes = probs.shape
assert len(labels) == num_data
assigned_probs = probs[jnp.arange(num_data), jnp.squeeze(labels)]
return jnp.sum(jnp.log(assigned_probs))
| 13,193
|
def ask_name(question: str = "What is your name?") -> str:
"""Ask for the users name."""
return input(question)
| 13,194
|
def p_path_primary_2(p):
"""
path_primary : bracketed_path
"""
# | not_path_negated_property_set # not implemented atm
p[0] = p[1]
| 13,195
|
def _maxcut(g: Graph, values: Sequence[int]) -> float:
"""
cut by given values $$\pm 1$$ on each vertex as a list
:param g:
:param values:
:return:
"""
cost = 0
for e in g.edges:
cost += g[e[0]][e[1]].get("weight", 1.0) / 2 * (1 - values[e[0]] * values[e[1]])
return cost
| 13,196
|
def main():
"""Execute main."""
cli.add_command(deploy)
cli.add_command(configure)
cli()
| 13,197
|
def cp_als(X, rank, random_state=None, init='randn', **options):
"""Fits CP Decomposition using Alternating Least Squares (ALS).
Parameters
----------
X : (I_1, ..., I_N) array_like
A tensor with ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
random_state : integer, ``RandomState``, or ``None``, optional (default ``None``)
If integer, sets the seed of the random number generator;
If RandomState instance, random_state is the random number generator;
If None, use the RandomState instance used by ``numpy.random``.
init : str, or KTensor, optional (default ``'randn'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
Alternating Least Squares (ALS) is a very old and reliable method for
fitting CP decompositions. This is likely a good first algorithm to try.
References
----------
Kolda, T. G. & Bader, B. W.
"Tensor Decompositions and Applications."
SIAM Rev. 51 (2009): 455-500
http://epubs.siam.org/doi/pdf/10.1137/07070111X
Comon, Pierre & Xavier Luciani & Andre De Almeida.
"Tensor decompositions, alternating least squares and other tales."
Journal of chemometrics 23 (2009): 393-405.
http://onlinelibrary.wiley.com/doi/10.1002/cem.1236/abstract
Examples
--------
```
import tensortools as tt
I, J, K, R = 20, 20, 20, 4
X = tt.randn_tensor(I, J, K, rank=R)
tt.cp_als(X, rank=R)
```
"""
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'CP_ALS', **options)
# Main optimization loop.
while result.still_optimizing:
# Iterate over each tensor mode.
for n in range(X.ndim):
# i) Normalize factors to prevent singularities.
U.rebalance()
# ii) Compute the N-1 gram matrices.
components = [U[j] for j in range(X.ndim) if j != n]
grams = sci.multiply.reduce([sci.dot(u.T, u) for u in components])
# iii) Compute Khatri-Rao product.
kr = khatri_rao(components)
# iv) Form normal equations and solve via Cholesky
c = linalg.cho_factor(grams, overwrite_a=False)
p = unfold(X, n).dot(kr)
U[n] = linalg.cho_solve(c, p.T, overwrite_b=False).T
# U[n] = linalg.solve(grams, unfold(X, n).dot(kr).T).T
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[-1].T.dot(U[-1])
# obj = np.sqrt(np.sum(grams) - 2*sci.sum(p*U[-1]) + normX**2) / normX
obj = linalg.norm(U.full() - X) / normX
# Update result
result.update(obj)
# Finalize and return the optimization result.
return result.finalize()
| 13,198
|
def load_station_enu(
station_name,
start_date=None,
end_date=None,
download_if_missing=True,
force_download=False,
zero_by="mean",
to_cm=True,
):
"""Loads one gps station's ENU data since start_date until end_date as a dataframe
Args:
station_name (str): 4 Letter name of GPS station
See http://geodesy.unr.edu/NGLStationPages/gpsnetmap/GPSNetMap.html for map
start_date (datetime or str): Optional. cutoff for beginning of GPS data
end_date (datetime or str): Optional. cut off for end of GPS data
download_if_missing (bool): default True
force_download (bool): default False
"""
# start_date, end_date = _parse_dates(start_date, end_date)
if zero_by not in ("start", "mean"):
raise ValueError("'zero_by' must be either 'start' or 'mean'")
station_name = station_name.upper()
gps_data_file = os.path.join(GPS_DIR, GPS_FILE.format(station=station_name))
if force_download:
try:
os.remove(gps_data_file)
logger.info(f"force removed {gps_data_file}")
except FileNotFoundError:
pass
if not os.path.exists(gps_data_file):
if download_if_missing:
logger.info(f"Downloading {station_name} to {gps_data_file}")
download_station_data(station_name)
else:
raise ValueError(
"{gps_data_file} does not exist, download_if_missing = False"
)
df = pd.read_csv(gps_data_file, header=0, sep=r"\s+", engine="c")
clean_df = _clean_gps_df(df, start_date, end_date)
if to_cm:
# logger.info("Converting %s GPS to cm" % station_name)
clean_df[["east", "north", "up"]] = 100 * clean_df[["east", "north", "up"]]
if zero_by.lower() == "mean":
mean_val = clean_df[["east", "north", "up"]].mean()
# enu_zeroed = clean_df[["east", "north", "up"]] - mean_val
clean_df[["east", "north", "up"]] -= mean_val
elif zero_by.lower() == "start":
start_val = clean_df[["east", "north", "up"]].iloc[:10].mean()
# enu_zeroed = clean_df[["east", "north", "up"]] - start_val
clean_df[["east", "north", "up"]] -= start_val
# Finally, make the 'date' column a DateIndex
return clean_df.set_index("date")
| 13,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.