content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def compute_wolfe_gap(point_x, objective_function, feasible_region):
"""Compute the Wolfe gap given a point."""
grad = objective_function.evaluate_grad(point_x.cartesian_coordinates)
v = feasible_region.lp_oracle(grad)
wolfe_gap = grad.dot(point_x.cartesian_coordinates - v)
return wolfe_gap
|
f2b09a232063599aa7525a70e6d3a0d8bafb57e7
| 3,646,300
|
async def get(ip, community, oid, port=161, timeout=DEFAULT_TIMEOUT):
# type: (str, str, str, int, int) -> PyType
"""
Delegates to :py:func:`~puresnmp.aio.api.raw.get` but returns simple Python
types.
See the "raw" equivalent for detailed documentation & examples.
"""
raw_value = await raw.get(ip, community, oid, port, timeout=timeout)
return raw_value.pythonize()
|
6682d9877ac4d5b287088fd17d626011b95b6c31
| 3,646,301
|
def preprocess_image(image, params):
"""Preprocess image tensor.
Args:
image: tensor, input image with shape
[cur_batch_size, height, width, depth].
params: dict, user passed parameters.
Returns:
Preprocessed image tensor with shape
[cur_batch_size, height, width, depth].
"""
func_name = "preprocess_image"
# Convert from [0, 255] -> [-1.0, 1.0] floats.
image = tf.cast(x=image, dtype=tf.float32) * (2. / 255) - 1.0
print_obj(func_name, "image", image)
return image
|
4e5a563610c2ecdcd29fa5c077025100625b767b
| 3,646,302
|
def get_vlim(xarr: xr.DataArray, alpha: float) -> dict:
"""Get vmin, vmax using mean and std."""
mean = xarr.mean()
std = xarr.std()
return {"vmin": max(0., mean - alpha * std), "vmax": mean + alpha * std}
|
4f6c87f290ab23db56fe67f700136a49e2b52363
| 3,646,303
|
def count_consumed_symbols(e):
"""Count how many symbols are consumed from each sequence by a single sequence diff entry."""
op = e.op
if op == DiffOp.ADDRANGE:
return (0, len(e.valuelist))
elif op == DiffOp.REMOVERANGE:
return (e.length, 0)
elif op == DiffOp.PATCH:
return (1, 1)
else:
raise NBDiffFormatError("Invalid op '{}'".format(op))
|
63a3d97840fae49a7ff3279e10e553d82dfcf801
| 3,646,304
|
def maha_dist_sq(cols, center, cov):
"""Calculate squared Mahalanobis distance of all observations (rows in the
vectors contained in the list cols) from the center vector with respect to
the covariance matrix cov"""
n = len(cols[0])
p = len(cols)
assert len(center) == p
# observation matrix
obs = flex.double(flex.grid(n, p))
for i, col in enumerate(cols):
obs.matrix_paste_column_in_place(col, i)
d2 = maha_dist_sq_cpp(obs, flex.double(center), cov)
return d2
|
9e54a7f49ed3b977b351007991f3fe263306a20a
| 3,646,305
|
def get_form_target():
"""
Returns the target URL for the comment form submission view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form_target"):
return get_comment_app().get_form_target()
else:
return urlresolvers.reverse("comments.views.comments.post_comment")
|
d7e6ad126a35109d589d7f2734a4bd3e56df748f
| 3,646,306
|
def secret_page(username=None, password=None):
"""
Returns the HTML for the page visited after the user has logged-in.
"""
if username is None or password is None:
raise ValueError("You need to pass both username and password!")
return _wrapper("""
<h1> Welcome, {username}! </h1>
<p> <small> Pst! I know your password is
<span class="spoilers"> {password}</span>.
</small>
</p>
""".format(username=escape(username.capitalize()),
password=escape(password)))
# ==== Edit username and pw ion secret.py ====
|
3bd81f30f0bf63290c6ee24cf3bccb7090fd406c
| 3,646,307
|
import collections
def user(username):
""" displays a single user """
all_badgers = loads(r_server.get('all_badgers'))
this_badger = all_badgers[username]
this_badger_sorted = collections.OrderedDict(sorted(this_badger.items(), reverse=True))
days = days_in_a_row(this_badger)
kwargs = {'badgers': { username: this_badger_sorted }, 'days': days }
return render_template('index.html', **kwargs)
|
27cf03175184cc839a64d931aa3477a0196c24aa
| 3,646,308
|
import numpy
def ifft(a, axis):
"""
Fourier transformation from grid to image space, along a given axis.
(inverse Fourier transform)
:param a: numpy array, 1D or 2D (`uv` grid to transform)
:param axis: int; axes over which to calculate
:return: numpy array (an image in `lm` coordinate space)
"""
return numpy.fft.fftshift(
numpy.fft.ifft(numpy.fft.ifftshift(a, axis), axis=axis), axis
)
|
3a96d6b615c8da63deaeca5e98a4f82f18fec8dd
| 3,646,309
|
import random
def transitions_and_masks_to_proposals(t1,
t2,
m1,
m2,
max_samples=10,
max_ccs=6):
"""
assumes set-based s and a... so shape should be (n_components, *component_shape)
Takes two transitions with their masks, and combines them
using connected-component relabeling to form proposals
Returns a list of tuples of ((s1, a1, s2) proposal, disconnected_component_idxs).
"""
sa1, s21 = t1
sa2, s22 = t2
# get_dcs_from_mask should return a set of tuples of indices, inc. the empty tuple
# where the subgraph represented by each tuple is disconnected from the result of
# the graph. Note that mask should be square, so columns corresp. to action idxs are
# dummy columns.
#
# E.g., if mask is [[1,0,0,0],[0,1,0,0],[0,0,1,1],[0,0,1,1]],
# this function should return:
# set([ (,), (0,), (1,), (0,1), (2, 3), (0, 2, 3), (1, 2, 3), (0, 1, 2, 3) ])
dc1 = get_dcs_from_mask(m1, max_ccs)
dc2 = get_dcs_from_mask(m2, max_ccs)
# get shared connected components in random order
shared_dc = list(dc1.intersection(dc2))
random.shuffle(shared_dc)
# subsample shared_dc down to max_samples
if len(shared_dc) > max_samples:
shared_dc = shared_dc[:max_samples]
all_idxs = set(range(len(sa1)))
res = []
for dc in shared_dc:
not_dc = list(all_idxs - set(dc))
dc = list(dc) # (0, 2)
proposed_sa = np.zeros_like(sa1)
proposed_s2 = np.zeros_like(sa1)
proposed_sa[dc] = sa1[dc]
proposed_sa[not_dc] = sa2[not_dc]
proposed_s2[dc] = s21[dc]
proposed_s2[not_dc] = s22[not_dc]
proposed_t = (proposed_sa, proposed_s2)
res.append((proposed_t, tuple(dc)))
return res
|
146b937e7a46d6d051b10f900574378874535932
| 3,646,310
|
import os
from pathlib import Path
import logging
def make_manifest(root: AnyPath) -> FileManifest:
"""
Returns the file manifest for the given directory.
"""
manifest = {}
for (dirpath, dirnames, filenames) in os.walk(root):
dirnames[:] = sorted(dirnames)
for filename in sorted(filenames):
path = Path(dirpath) / filename
logging.info(f"PATH {path}")
st_info = path.lstat()
rel_path = os.fspath(path.relative_to(root))
manifest[rel_path] = FileMetadata(
size=st_info.st_size, digest=make_digest(path, st_info.st_mode),
)
return manifest
|
8bd85903615f6da14686599425ea824ae9846955
| 3,646,311
|
import requests
import re
def exists(url):
"""Check based on protocol if url exists."""
parsed_url = urlparse(url)
if parsed_url.scheme == "":
raise RuntimeError("Invalid url: %s" % url)
if parsed_url.scheme in ('http', 'https'):
r = requests.head(url, verify=False)
if r.status_code == 200:
return True
elif r.status_code == 404:
return False
else:
r.raise_for_status()
elif parsed_url.scheme in ('s3', 's3s'):
s3_eps = boto.regioninfo.load_regions()['s3']
region = None
for r, e in list(s3_eps.items()):
if re.search(e, parsed_url.netloc):
region = r
break
if region is None:
raise RuntimeError("Failed to find region for endpoint %s." %
parsed_url.netloc)
conn = boto.s3.connect_to_region(region,
aws_access_key_id=parsed_url.username,
aws_secret_access_key=parsed_url.password)
match = re.search(r'/(.*?)/(.*)$', parsed_url.path)
if not match:
raise RuntimeError("Failed to parse bucket & key from %s." %
parsed_url.path)
bn, kn = match.groups()
try:
bucket = conn.get_bucket(bn)
except boto.exception.S3ResponseError as e:
if e.status == 404:
return False
else:
raise
key = bucket.get_key(kn)
if key is None:
return False
else:
return True
else:
raise NotImplementedError("Failed to check existence of %s url." %
parsed_url.scheme)
|
bb91fd5fb93ec6441125a1aa4874ad6d7f103535
| 3,646,312
|
def inverse_chirality_symbol(symbol):
"""
Inverses a chirality symbol, e.g., the 'R' character to 'S', or 'NS' to 'NR'.
Note that chiral double bonds ('E' and 'Z') must not be inversed (they are not mirror images of each other).
Args:
symbol (str): The chirality symbol.
Returns:
str: The inverse chirality symbol.
Raises:
InputError: If ``symbol`` could not be recognized.
"""
inversion_dict = {'R': 'S', 'S': 'R', 'NR': 'NS', 'NS': 'NR', 'E': 'E', 'Z': 'Z'}
if symbol not in list(inversion_dict.keys()):
raise InputError(f"Recognized chirality symbols are 'R', 'S', 'NR', 'NS', 'E', and 'Z', got {symbol}.")
return inversion_dict[symbol]
|
e87fae6ad9169efac0b3c95f53dfb92e0c450909
| 3,646,313
|
def simplify_columns(df):
"""
Simplify column labels for use as snake_case database fields.
All columns will be re-labeled by:
* Replacing all non-alphanumeric characters with spaces.
* Forcing all letters to be lower case.
* Compacting internal whitespace to a single " ".
* Stripping leading and trailing whitespace.
* Replacing all remaining whitespace with underscores.
Args:
df (pandas.DataFrame): The DataFrame to clean.
Returns:
pandas.DataFrame: The cleaned DataFrame.
Todo:
Update docstring.
"""
df.columns = (
df.columns.
str.replace(r'[^0-9a-zA-Z]+', ' ', regex=True).
str.strip().
str.lower().
str.replace(r'\s+', ' ', regex=True).
str.replace(' ', '_')
)
return df
|
9ee85c1a9f4aa97f1e3760db8a7dccf32c288802
| 3,646,314
|
from typing import Optional
def delete(
request: HttpRequest,
wid: Optional[int] = None,
workflow: Optional[Workflow] = None,
) -> JsonResponse:
"""Delete a workflow."""
if request.method == 'POST':
# Log the event
Log.objects.register(
request.user,
Log.WORKFLOW_DELETE,
None,
{
'id': workflow.id,
'name': workflow.name})
# Nuke the logs pointing to the workflow
for litem in workflow.logs.all():
litem.workflow = None
litem.save()
# Perform the delete operation
workflow.delete()
# In this case, the form is valid anyway
return JsonResponse({'html_redirect': reverse('home')})
return JsonResponse({
'html_form': render_to_string(
'workflow/includes/partial_workflow_delete.html',
{'workflow': workflow},
request=request),
})
|
07b6de0d66a5101660f1bf4aa37abe4be71568ff
| 3,646,315
|
def at_threshold(FPR, TPR, parameter, threshold):
"""
False positive rate (FPR) and True positive rate (TPR) at the selected threshold.
:param FPR: False positive rates of given receiver operating characteristic (ROC) curve
:param TPR: True positive rate of given receiver operating characteristic (ROC) curve
:param parameter: possible thresholds
:param threshold: selected threshold
"""
index = np.argmin(np.abs(parameter - threshold))
FPR_at_threshold = FPR[index]
TPR_at_threshold = TPR[index]
return FPR_at_threshold, TPR_at_threshold
|
d66edc0e43a18a5fdf8b6d216e4130aef8a7b17b
| 3,646,316
|
def _check_kl_estimator(estimator_fn, distribution_fn, num_samples=10000,
rtol=1e-1, atol=1e-3, grad_rtol=2e-1, grad_atol=1e-1):
"""Compares the estimator_fn output and gradient to exact KL."""
rng_key = jax.random.PRNGKey(0)
def expected_kl(params):
distribution_a = distribution_fn(**params[0])
distribution_b = distribution_fn(**params[1])
return distribution_a.kl_divergence(distribution_b)
def estimate_kl(params):
distribution_a = distribution_fn(**params[0])
distribution_b = distribution_fn(**params[1])
return estimator_fn(distribution_a, distribution_b, rng_key=rng_key,
num_samples=num_samples)
params = (
dict(loc=0.0, scale=1.0),
dict(loc=0.1, scale=1.0),
)
expected_value, expected_grad = jax.value_and_grad(expected_kl)(params)
value, grad = jax.value_and_grad(estimate_kl)(params)
np.testing.assert_allclose(expected_value, value, rtol=rtol, atol=atol)
chex.assert_tree_all_close(expected_grad, grad, rtol=grad_rtol,
atol=grad_atol)
|
b4e34f35f6531f795c8621fee2082993c3b518bd
| 3,646,317
|
def relative_bias(simu, reco, relative_scaling_method='s1'):
"""
Compute the relative bias of a reconstructed variable as
`median(reco-simu)/relative_scaling(simu, reco)`
Parameters
----------
simu: `numpy.ndarray`
reco: `numpy.ndarray`
relative_scaling_method: str
see `ctaplot.ana.relative_scaling`
Returns
-------
"""
assert len(reco) == len(simu)
if len(simu) == 0:
return 0
return np.median((reco - simu) / relative_scaling(simu, reco, method=relative_scaling_method))
|
1bc611b1ea135d593bc9b8c83a02a50eeaf18a7e
| 3,646,318
|
from typing import Dict
from typing import Any
def addon_config() -> Dict[str, Any]:
"""Sample addon config."""
return {
"package-name": "djangocms-blog",
"installed-apps": [
"filer",
"easy_thumbnails",
"aldryn_apphooks_config",
"parler",
"taggit",
"taggit_autosuggest",
"meta",
"djangocms_blog",
"sortedm2m",
],
"settings": {
"META_SITE_PROTOCOL": "https",
"META_USE_SITES": True,
"MIDDLEWARE": ["django.middleware.gzip.GZipMiddleware"],
},
"urls": [["", "djangocms_blog.taggit_urls"]],
"message": "Please check documentation to complete the setup",
}
|
f4266735ef2f0809e5802abed54dfde4c1cbd708
| 3,646,319
|
def join_mutations_regions(
out_path: str, sample1_id: int, sample2_id: int, mutations_file: File, regions_file: File
) -> File:
"""
Join mutations and regions together to compute an allele frequence.
"""
def iter_mut_points(muts):
for pos, count in muts:
yield pos, "mut", count
def iter_region_points(regions):
for start, end, depth in regions:
yield start - 0.5, "region", depth
def iter_allele_freqs(points):
denom = 0
for pos, kind, count in points:
if kind == "region":
denom = count
elif kind == "mut":
yield pos, count, denom, count / denom
points1 = iter_mut_points(read_mutations(mutations_file))
points2 = iter_region_points(read_regions(regions_file))
points = iter_merge(points1, points2)
allele_freqs = iter_allele_freqs(points)
allele_freqs_path = f"{out_path}/allele_freqs/{sample1_id}_{sample2_id}.allele_freqs"
return write_allele_freqs(allele_freqs_path, allele_freqs)
|
4d712a914e2f4c221df982fbd3352eb4f572ad11
| 3,646,320
|
def credibility_interval(post, alpha=1.):
"""Calculate bayesian credibility interval.
Parameters:
-----------
post : array_like
The posterior sample over which to calculate the bayesian credibility
interval.
alpha : float, optional
Confidence level.
Returns:
--------
med : float
Median of the posterior.
low : float
Lower part of the credibility interval.
up : float
Upper part of the credibility interval.
"""
z = erf(alpha/sp.sqrt(2))
lower_percentile = 100 * (1 - z) / 2
upper_percentile = 100 * (1 + z) / 2
low, med, up = sp.percentile(
post, [lower_percentile, 50, upper_percentile]
)
return med, low, up
|
b31009918324980ba2ffc53a1f29af1f4e421f95
| 3,646,321
|
def svn_ra_invoke_replay_revstart_callback(*args):
"""
svn_ra_invoke_replay_revstart_callback(svn_ra_replay_revstart_callback_t _obj, svn_revnum_t revision,
void replay_baton, svn_delta_editor_t editor,
void edit_baton, apr_hash_t rev_props,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_invoke_replay_revstart_callback, args)
|
4c792a16d6dcdbb588062f1f47b3caed84bbd610
| 3,646,322
|
import click
def tree(ctx, rootpage):
"""Export metadata of a page tree."""
if not rootpage:
click.serror("No root page selected via --entity!")
return 1
outname = getattr(ctx.obj.outfile, 'name', None)
with api.context() as cf:
results = []
try:
#page = content.ConfluencePage(cf, rootpage, expand='metadata.labels,metadata.properties')
#results.append(page.json)
pagetree = cf.walk(rootpage, depth_1st=True,
expand='metadata.labels,metadata.properties,version')
for depth, data in pagetree:
data.update(dict(depth=depth))
results.append(data)
except api.ERRORS as cause:
# Just log and otherwise ignore any errors
api.diagnostics(cause)
else:
ctx.obj.log.info('Got {} results.'.format(len(results)))
if results:
print_result(ctx, results)
|
d055d8dc5fc5a3a267500362ca89b6e895d9d50f
| 3,646,323
|
import math
def to_half_life(days):
"""
Return the constant [1/s] from the half life length [day]
"""
s= days * 3600*24
return -math.log(1/2)/s
|
af7724dfb9442bf1f5e931df5dd39b31d0e78091
| 3,646,324
|
import struct
def Send (dst_ip, data, sequence=0, spoof_source=False, dst_port=MDNS_PORT, src_port=MDNS_PORT, dns_name=TEST_QUERY):
"""
Send one packet of MDNS with data.
:param dst_ip: IP as string.
:param data: Data as bytes/string.
:param sequence: Number to use for sequence. Int.
:param spoof_source: Default:False. Set as IP for spoofing.
:param dst_port: ....
:param src_port: ...
:param dns_name: DNS name to put in the MDNS request.
:return: semper vera!!!
"""
payload = ""
payload += "\x00" # TransID is 2 bytes. Using one for sequence.
payload += struct.pack('B', sequence)
payload += "\x00\x00" # Stndrt qry
payload += "\x00\x01" # 1 questions
payload += "\x00\x00" # 0 ans RRs
payload += "\x00\x00" # 0 authority RRs
payload += "\x00\x00" # 0 additional RRs
# Start of query:
payload += struct.pack('B', len(dns_name)) # Length? -> YES it is!
payload += dns_name # name
payload += "\x00" # Query Terminator
payload += "\x00\x0c" # PTR request
payload += "\x00\x01" # class IN
if spoof_source is False:
pkt = IP(
dst = dst_ip
# src = "1.1.1.1"
) / UDP(
sport = src_port,
dport = dst_port
) / Raw(
load = payload
)
else:
pkt = IP(
dst = dst_ip,
src = spoof_source
) / UDP(
sport = src_port,
dport = dst_port
) / Raw(
load = data
)
send(pkt)
return True
|
9541c71d52dcbaa09ffba1aa1bf4d4d422d66ed6
| 3,646,325
|
from units.models import Unit
def accreds_logs_list(request):
"""Display the list of accreds"""
main_unit = Unit.objects.get(pk=settings.ROOT_UNIT_PK)
main_unit.set_rights_can_select(lambda unit: Accreditation.static_rights_can('LIST', request.user, unit))
main_unit.set_rights_can_edit(lambda unit: Accreditation.static_rights_can('CREATE', request.user, unit))
main_unit.check_if_can_use_hidden(request.user)
if request.GET.get('upk'):
update_current_unit(request, request.GET.get('upk'))
return render(request, 'units/accreds/logs_list.html', {'main_unit': main_unit})
|
41961a3cd4f351d13ae5132cfb37e83be7050cc5
| 3,646,326
|
from typing import Counter
def build_dict(file_name, max_vocab_size):
"""
reads a list of sentences from a file and returns
- a dictionary which maps the most frequent words to indices and
- a table which maps indices to the most frequent words
"""
word_freq = Counter()
with open(file_name) as file:
for line in file:
word_freq.update(line.split())
if max_vocab_size <= 0:
max_vocab_size = len(word_freq)
words, _ = zip(*word_freq.most_common(max_vocab_size))
# ID of pad_string must be 0
words = [pad_string, unk_string] + list(words)
word2ID = {w:i for i,w in enumerate(words)}
return word2ID, words
|
ec2067e1fbf8d0f6845024ae69f8531c1f776348
| 3,646,327
|
import functools
import logging
def from_net(func):
"""
为进行相似度数据收集的函数装饰,作用是忽略env中的数据获取模式,改变数据获取模式,
只使用网络数据模式进行数据收集,完成整个任务后,再恢复之前的数据获取模式
:param func: 进行相似度应用且有数据收集行为的函数
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
# 临时保存env设置中的g_data_fetch_mode
fetch_mode = ABuEnv.g_data_fetch_mode
# 设置数据获取模式为强制网络模式
ABuEnv.g_data_fetch_mode = ABuEnv.EMarketDataFetchMode.E_DATA_FETCH_FORCE_NET
if fetch_mode != ABuEnv.EMarketDataFetchMode.E_DATA_FETCH_FORCE_NET:
# 如果原有设置不是强制网络模式,warning提示
logging.warning('data from net!!!')
result = func(*args, **kwargs)
# 恢复之前的g_data_fetch_mode
ABuEnv.g_data_fetch_mode = fetch_mode
return result
return wrapper
|
68c3a9302d83cf0e02a74c104fd2b5894b85020a
| 3,646,328
|
def create_index(connection, table_name, index):
"""Create index.
Args:
connection: pyodbc.connect() object, Connection to use when running Sql
table_name: string, Table name including db schema (ex: my_schema.my_table)
index: string, Column name of index (can put multiple columns comma delimited if desired)
Returns:
cursor object, Results of the call to pyodb.connection().cursor().execute(query)
"""
cursor = connection.cursor()
table_split = table_name.split('.')
table = table_split[-1]
if len(table_split) > 1:
use_db = "USE {0}; ".format(table_split[0])
run_sql(connection, use_db)
if index is not None:
idx_name = table + '_idx'
sql = "SELECT name FROM sys.indexes where name = '{0}' and object_id = OBJECT_ID('{1}')".format(idx_name, table)
log.debug("SQL to run: " + sql)
try:
exists = sql_get_query_data(connection, sql)
val = exists.fetchone()[0]
if val != idx_name:
ddl2 = 'CREATE INDEX {0} ON {1}({2});'.format(idx_name, table_name, index)
try:
cursor.execute(ddl2.encode('utf-8'))
connection.commit()
except UnicodeDecodeError:
cursor.execute(ddl2)
connection.commit()
except TypeError:
log.info("Index does not exist, will attempt to create it")
ddl2 = 'CREATE INDEX {0} ON {1}({2});'.format(idx_name, table_name, index)
try:
cursor.execute(ddl2.encode('utf-8'))
connection.commit()
except UnicodeDecodeError:
cursor.execute(ddl2)
connection.commit()
return cursor
|
e52ccedda6dc097f58a2d0c826ea2e6010623c4e
| 3,646,329
|
def embedding_lookup(ids, params):
"""
Returns the embeddings lookups.
The difference of this function to TensorFlow's function is that this
function expects the ids as the first argument and the parameters as the
second; while, in TensorFlow, is the other way around.
:param ids: the ids
:type ids: tf.Tensor
:param params: the parameters
:type params: tf.Tensor
:return: the lookup
:rtype: tf.Tensor
"""
return tf.nn.embedding_lookup(params, ids)
|
ef85f95cfa5d2a426616ee9203707877ae202051
| 3,646,330
|
def in_auto_mode(conx: Connection) -> bool:
"""Determine whether the controller is in AUTO or one of the MANUAL
modes.
Wraps the Karel IN_AUTO_MODE routine.
NOTE: this method is moderately expensive, as it executes a Karel
program on the controller.
:returns: True if the controller is in AUTO mode
:rtype: bool
"""
ret = exec_karel_prg(conx, prg_name='dmh_autom')
if not ret[JSON_SUCCESS]:
raise DominhException("Select_TPE error: " + ret[JSON_REASON])
return ret['in_auto_mode']
|
c2819344130a1562fab5a9ece177f8b400a15fbc
| 3,646,331
|
def pref(pref_name, default=None):
"""Return a preference value.
Since this uses CFPreferencesCopyAppValue, Preferences can be defined
several places. Precedence is:
- MCX
- /var/root/Library/Preferences/com.github.salopensource.sal.plist
- /Library/Preferences/com.github.salopensource.sal.plist
- default_prefs defined here.
"""
default_prefs = {
'ServerURL': 'http://sal',
'osquery_launchd': 'com.facebook.osqueryd.plist',
'SkipFacts': [],
'SyncScripts': True,
'BasicAuth': True,
'GetGrains': False,
'GetOhai': False,
'LastRunWasOffline': False,
'SendOfflineReport': False,
}
pref_value = CFPreferencesCopyAppValue(pref_name, BUNDLE_ID)
if pref_value is None and default:
pref_value = default
elif pref_value is None and pref_name in default_prefs:
pref_value = default_prefs.get(pref_name)
# we're using a default value. We'll write it out to
# /Library/Preferences/<BUNDLE_ID>.plist for admin
# discoverability
set_pref(pref_name, pref_value)
if isinstance(pref_value, NSDate):
# convert NSDate/CFDates to strings
pref_value = str(pref_value)
return pref_value
|
10102f3dde316e473d5943fee059f729d6e9454c
| 3,646,332
|
def tRange(tStart, tStop, *, timedelta=300):
"""
Generate datetime list between tStart and tStop with fixed timedelta.
Parameters
----------
tStart: datetime
start time.
tStop: datetime
stop time.
Keywords
--------
timedelta: int
time delta in seconds (default: 300).
Returns
-------
tList: list
datetime between tStart and tStop with fixed timedelta.
Examples
--------
>>> import datetime as dt
>>> tList = tRange(dt.datetime(2011, 1, 1), dt.datetime(2011, 1, 2), ...
>>> timedelta=3600 * 12)
>>> tList
[datetime.datetime(2011, 1, 1, 0, 0), datetime.datetime(2011, 1, 1, 12, 0),
datetime.datetime(2011, 1, 2, 0, 0)]
History
-------
2020-02-25 First version.
"""
nTimedelta = int((tStop - tStart) / dt.timedelta(seconds=timedelta)) + 1
tList = [tStart + dt.timedelta(seconds=timedelta * i)
for i in range(0, nTimedelta)
if tStart + dt.timedelta(seconds=timedelta * i) <= tStop]
return tList
|
4dec7a624bcd2b349d361831993b8108e99725a8
| 3,646,333
|
import numpy
def TransformInversePoints(T,points):
"""Transforms a Nxk array of points by the inverse of an affine matrix"""
kminus = T.shape[1]-1
return numpy.dot(points-numpy.tile(T[0:kminus,kminus],(len(points),1)),T[0:kminus,0:kminus])
|
7e04a741c6ad0ec08e40ab393a703a1878ef784a
| 3,646,334
|
def act_func(act):
"""function that can choose activation function
Args:
act: (str) activation function name
Returns:
corresponding Pytorch activation function
"""
return nn.ModuleDict([
['relu', nn.ReLU(inplace=True)],
['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)],
['selu', nn.SELU(inplace=True)]
])[act]
|
ffd0e6f2ec3ea419c4c3fbb618e4734d59420826
| 3,646,335
|
def ajax_get_hashtags():
"""Flask Ajax Get Hashtag Route."""
f = request.args.get('f', 0, type=int)
t = request.args.get('t', 0, type=int)
hashtags_list = get_hashtags()
try:
if t == 0:
return jsonify(dict(hashtags_list[f:]))
elif t > len(hashtags_list):
return jsonify(dict(hashtags_list[f:]))
else:
return jsonify(dict(hashtags_list[f:t]))
except:
return False
|
3c9249a5fefb93d422c6e2c4be56394711bf1d7a
| 3,646,336
|
def extract_pdf_information(pdf_path):
""" Print and return pdf information
"""
# read binary
with open(pdf_path, 'rb') as f:
pdf = PdfFileReader(f)
information = pdf.getDocumentInfo()
number_of_pages = pdf.getNumPages()
txt = f"""
Information about {pdf_path}:
Author: {information.author}
Creator: {information.creator}
Producer: {information.producer}
Subject: {information.subject}
Title: {information.title}
Number of pages: {number_of_pages}
"""
print(txt)
return information
|
bec3667aba872f8e7bf53da09a9fb1905bcf5eec
| 3,646,337
|
def normalize_string(subject: str) -> str:
"""Deprecated function alias"""
logger.warn("normalize_string is deprecated")
return string_to_title(subject)
|
6531a6e7211c61d8439bfa8ddc0e609c35b8b6f3
| 3,646,338
|
import inspect
def get_default_args(func):
"""
Return dict for parameter name and default value.
Parameters
----------
func : Callable
A function to get parameter name and default value.
Returns
-------
Dict
Parameter name and default value.
Examples
--------
>>> def test_func(a: int, b: str = "c") -> int:
... return a+1
>>> get_default_args(test_func)
{'b': 'c'}
>>> def test_func2(a: int = 1, b="c") -> int:
... return a+1
>>> get_default_args(test_func2)
{'a': 1, 'b': 'c'}
"""
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
|
dcc75dceae1385868866d668aa021584547190df
| 3,646,339
|
def sec_to_time(seconds):
"""Transform seconds into a formatted time string.
Parameters
-----------
seconds : int
Seconds to be transformed.
Returns
-----------
time : string
A well formatted time string.
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%02d:%02d:%02d" % (h, m, s)
|
59fcfe2f53d11ea7daac736b59b5eaeb72172dba
| 3,646,340
|
def power_oos(dmap_object, Y):
"""
Performs out-of-sample extension to calculate the values of the diffusion coordinates at each given point using the power-like method.
Parameters
----------
dmap_object : DiffusionMap object
Diffusion map upon which to perform the out-of-sample extension.
Y : array-like, shape (n_query, n_features)
Data for which to perform the out-of-sample extension.
Returns
-------
phi : numpy array, shape (n_query, n_eigenvectors)
Transformed value of the given values.
"""
m = int(Y.shape[0])
k_yx, y_bandwidths = dmap_object.local_kernel.compute(Y, return_bandwidths=True) # Evaluate on ref points
yy_right_norm_vec = dmap_object._make_right_norm_vec(k_yx, y_bandwidths)[1]
k_yy_diag = dmap_object.local_kernel.kernel_fxn(0, dmap_object.epsilon_fitted)
data_full = np.vstack([dmap_object.local_kernel.data, Y])
k_full = sps.hstack([k_yx, sps.eye(m) * k_yy_diag])
right_norm_full = np.hstack([dmap_object.right_norm_vec, yy_right_norm_vec])
weights = dmap_object._compute_weights(data_full)
P = dmap_object._left_normalize(dmap_object._right_normalize(k_full, right_norm_full, weights))
L = dmap_object._build_generator(P, dmap_object.epsilon_fitted, y_bandwidths)
L_yx = L[:, :-m]
L_yy = np.array(L[:, -m:].diagonal())
adj_evals = dmap_object.evals - L_yy.reshape(-1, 1)
dot_part = np.array(L_yx.dot(dmap_object.dmap))
return (1. / adj_evals) * dot_part
|
4de7d75324cd05a7d1ada0e8f6e724ecd551930c
| 3,646,341
|
def detect_face_landmarks(image, face_rect=None):
"""
detect face landmarks,
if face_rect is None, the face_rect is the same size as image
-> object
:param image:
:param face_rect: where the face is
"""
if(face_rect == None):
face_rect = dlib.rectangle(0, 0, image.shape[0], image.shape[1])
return _detect_face_landmarks(image, face_rect)
|
70c299ae2ce98409e2359e11fa9def0d35e7554f
| 3,646,342
|
from typing import Iterable
from typing import Mapping
def ensure_iterable(obj):
"""Ensure ``obj`` is either a sequential iterable object that is not a
string type.
1. If ``obj`` is :const:`None` return an empty :class:`tuple`.
2. If ``obj`` is an instance of :class:`str`, :class:`bytes`, or :class:`Mapping`,
or not :class:`Iterable` return a list containing ``obj``
3. Return ``obj``
Parameters
----------
obj : object
The object to ensure iterability of
Returns
-------
:class:`Sequence`
Returns either ``obj`` or a wrapepr around ``obj``
"""
if obj is None:
return tuple()
if not isinstance(obj, Iterable) or isinstance(obj, basestring) or isinstance(obj, Mapping):
return [obj]
return obj
|
56c2db3d87c5927b1f2dbb51b64e7be73956d2b8
| 3,646,343
|
def test_dist(**kwargs):
"""
Test Distance
"""
a = np.random.random((2, 3))
d = ahrs.utils.metrics.euclidean(a[0], a[1])
result = np.allclose(d, np.linalg.norm(a[0] - a[1]))
return result
|
46a9343fda3445fe0f07bfbb41fc321e6572e4a7
| 3,646,344
|
def get_pca(coords):
"""
Parameters
-----------
coords : 2D np.array of points
Returns
---------
new_coords : 2D np.array of points
keeps original number of dimension as input coords
variance_ratio : tuple
"""
pca = PCA(n_components=3)
# pca.fit(coords)
# new_coords = pca.transform(coords)
new_coords = pca.fit_transform(coords)
return new_coords, pca.explained_variance_ratio_
|
a0bce6a7c4b50139502cbdedc6f0f456f21d26b6
| 3,646,345
|
def get_form_info(email):
"""Gets all existing application form info from the database."""
user_id = get_user_id(email)
if not user_id:
return (False, "Invalid user ID. Please contact the organizers.")
query = """
SELECT * FROM applications WHERE user_id = %s AND application_year = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [user_id, app_year.year + "0000"])
application = cursor.fetchone()
query = """
SELECT * FROM members WHERE user_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [user_id])
member = cursor.fetchone()
query = """
SELECT * FROM diet WHERE user_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [user_id])
diet = cursor.fetchall()
query = """
SELECT * FROM race WHERE user_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, [user_id])
race = cursor.fetchall()
validationForm = ValidationForm()
validationForm.fill(application, member)
return (FormInfo(application, member, diet, race), validationForm)
|
f612b83aeb63ff138cc637dc04446ce59f6ecc6b
| 3,646,346
|
import logging
def getLog():
"""simple wrapper around basic logger"""
return logging
|
b51942d2ed02f9ea7faf0a626715ec07e1677c88
| 3,646,347
|
from datetime import datetime
def _date(defval, t):
"""
支持的格式:
unix 时间戳
yyyy-mm-dd 格式的日期字符串
yyyy/mm/dd 格式的日期字符串
yyyymmdd 格式的日期字符串
如果年月日其中有一项是0,将被转换成 1
"""
if t is None:
return defval
if isinstance(t, (int, float)):
return datetime.fromtimestamp(t).strftime('%Y-%m-%d %H:%M:%S')
lt = len(t)
if lt < 8:
return defval
if lt == 8:
format_str = '%Y%m%d'
else:
t = t.replace('/', '-')
format_str = '%Y-%m-%d %H:%M:%S'
if lt > 19:
format_str += '.%f'
try:
return str(datetime.strptime(t, format_str))
except:
return defval
|
e8a1121da89d9dc46bdce5d1b8c70ec973909abb
| 3,646,348
|
import math
def compute_lat_long_distance(point1, point2):
"""
Compute the distance between two records that have fields 'lat' and 'lon'.
See details and reference implementation at http://andrew.hedges.name/experiments/haversine/
:param point1: a record with { 'lat', 'lon' }
:param point2: a record with { 'lat', 'lon' }
:return:
"""
lat1 = degree_to_rad(point1['lat'])
lat2 = degree_to_rad(point2['lat'])
lon1 = degree_to_rad(point1['lon'])
lon2 = degree_to_rad(point2['lon'])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2) * math.sin(dlat / 2) + \
math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) * math.sin(dlon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
earth_radius = 3961 # Use 6373 for km
d = earth_radius * c # In miles
return round(d, 3)
|
8058df18106636a0bc6c1f7471f912e07e61ae21
| 3,646,349
|
import logging
def entropy_analysis(data_df):
"""
Masked Shannon entropy analysis for sequences
Parameters
----------
data_df: pandas.DataFrame
merged Pandas dataframe
Returns
-------
H_list: list
entropy values for all positions
null_freq_list: list
masked percentage for all positions
"""
seq_list = data_df['sequence'].values.tolist()
base_set = set([])
for seq in seq_list:
base_set.update(set(seq))
H_list = []
null_freq_list = []
STEP = ceil(len(seq_list[0]) / 10)
for base_idx in range(len(seq_list[0])):
if base_idx % STEP == 0:
logging.info('Entropy analysis in progress: {}% completed.'.format(10 * base_idx // STEP))
H, null_freq = base_entropy_masked(seq_list, base_set, base_idx)
H_list.append(H,)
null_freq_list.append(null_freq)
logging.info('Entropy analysis in progress: DONE.')
return H_list, null_freq_list
|
8b1d887f2c39b39a833c13780864bd47d0d8d648
| 3,646,350
|
import re
def get_requirements(filename):
"""
Helper function to read the list of requirements from a file
"""
dependency_links = []
with open(filename) as requirements_file:
requirements = requirements_file.read().strip('\n').splitlines()
requirements = [req for req in requirements if not req.startswith('#')]
for i, req in enumerate(requirements):
if ':' in req:
match_obj = re.match(r"git\+(?:https|ssh|http):.*#egg=(.*)-(.*)", req)
assert match_obj, "Cannot make sense of url {}".format(req)
requirements[i] = "{req}=={ver}".format(req=match_obj.group(1), ver=match_obj.group(2))
dependency_links.append(req)
return requirements, dependency_links
|
292d45ab8e7f8523734326869bb1dd05c6f395f1
| 3,646,351
|
def nigam_and_jennings_response(acc, dt, periods, xi):
"""
Implementation of the response spectrum calculation from Nigam and Jennings (1968).
Ref: Nigam, N. C., Jennings, P. C. (1968) Digital calculation of response spectra from strong-motion earthquake
records. National Science Foundation.
:param acc: acceleration in m/s2
:param periods: response periods of interest
:param dt: time step of the acceleration time series
:param xi: critical damping factor
:return: response displacement, response velocity, response acceleration
"""
acc = -np.array(acc, dtype=np.float)
periods = np.array(periods, dtype=np.float)
if periods[0] == 0:
s = 1
else:
s = 0
w = 6.2831853 / periods[s:]
dt = np.float(dt)
xi = np.float(xi)
# implement: delta_t should be less than period / 20
a, b = compute_a_and_b(xi, w, dt)
resp_u = np.zeros([len(periods), len(acc)], dtype=np.float)
resp_v = np.zeros([len(periods), len(acc)], dtype=np.float)
for i in range(len(acc) - 1): # possibly speed up using scipy.signal.lfilter
# x_i+1 = A cross (u, v) + B cross (acc_i, acc_i+1) # Eq 2.7a
resp_u[s:, i + 1] = (a[0][0] * resp_u[s:, i] + a[0][1] * resp_v[s:, i] + b[0][0] * acc[i] + b[0][1] * acc[i + 1])
resp_v[s:, i + 1] = (a[1][0] * resp_u[s:, i] + a[1][1] * resp_v[s:, i] + b[1][0] * acc[i] + b[1][1] * acc[i + 1])
w2 = w ** 2
if s:
sdof_acc = np.zeros_like(resp_u, dtype=np.float)
sdof_acc[s:] = -2 * xi * w[:, np.newaxis] * resp_v[s:] - w2[:, np.newaxis] * resp_u[s:]
sdof_acc[0] = acc
else:
sdof_acc = -2 * xi * w[:, np.newaxis] * resp_v[s:] - w2[:, np.newaxis] * resp_u[s:]
return resp_u, resp_v, sdof_acc
|
4e9853b660d85d12701bafe9e328bc91499df73a
| 3,646,352
|
def binary_hamiltonian(op, nqubits, qubits1, qubits2, weights, device=None):
"""Generates tt-tensor classical Ising model Hamiltonian (two-qubit interaction terms in a single basis).
Hamiltonian of the form:
H = sum_i omega_i sigma_ind1(i) sigma_ind2(i)
where omega_i are the Hamiltonian weights, sigma is the operator specified by op, and ind1, ind2 are
the qubit numbers specified of index i.
spins and weight values.
Parameters
----------
op : tt-tensor, single-qubit operator to encode MaxCut graph
nqubits : int, number of qubits (vertices) to encode in MaxCut problem
qubits1 : List/tensor of ints, qubit indices
qubits2 : List/tensor of ints, qubit indices
weights : List/tensor of real floats, graph weights
Returns
-------
Hamiltonian encoding specified classical Ising model graph.
"""
H, inds_min, inds_max = [], minimum(qubits1, qubits2), maximum(qubits1, qubits2)
for i in range(0, len(qubits1)):
#H = tt_matrix_sum(H, _two_qubit_interaction(op, op, inds_min[i], inds_max[i], weights[i], nqubits))
H = tt_matrix_sum(H, _two_qubit_interaction(op, op, inds_min[i], inds_max[i], weights[i], nqubits), device=device)
return [*H]
|
50360d50123c44719a8875a59d02e913cd95f2ad
| 3,646,353
|
def map_entry(entry, fields):
"""
Retrieve the entry from the given fields and replace it if it should
have a different name within the database.
:param entry: is one of the followings:
- invalid field name
- command (i.g. $eq)
- valid field with no attribute name
- valid field with an attribute name to use instead
"""
field = fields.get(entry)
if isinstance(field, ListField) and isinstance(field.inner, EmbeddedField):
fields = field.inner.embedded_document_cls.schema.fields
elif isinstance(field, EmbeddedField):
fields = field.embedded_document_cls.schema.fields
return getattr(field, 'attribute', None) or entry, fields
|
05d392f3ab387381b0f114db05834d642350d817
| 3,646,354
|
def seqlogo_hairpin(N, target='none', ligand='theo', pam=None):
"""
Randomize the stem linking the aptamer to the sgRNA and the parts of the
sgRNA that were the most conserved after being randomized in previous
screens. Specifically, I identified these conserved positions by looking
at a sequence logo of the relatively few (≈20) clones I sequenced from my
previous screen. The theory behind this strategy is that positions with
a clear preference for some nucleotides over others are more likely to be
important for sensor activity.
In this case, the previous screen was ``mhf`` and the sequence logo showed
that all three positions in the ruler that were randomized had a preference
for a non-native nucleotide. (In fact, the preference was for C in all
three cases.) The ``mhf`` screen kept two positions in the ruler fixed,
but since these positions were flanked by important-seeming positions on
both sides, I decided to randomize the whole ruler this time.
I am also randomizing the stem (often called a communication module) that
connects the aptamer to the sgRNA. The ``N`` parameter dictates how long
this stem should be, in base pairs, not counting any base pairs that are
implicitly included with the aptamer. (Note: I realized that including one
base pair on the end of the aptamer domain makes simulating the whole
construct easier, so all the new aptamers include one base pair like that.
But the theophylline aptamer predates this realization, so it doesn't.)
Parameters
----------
N: int
The length of the communication module, in base pairs. Recommended
values are 3 and 4.
"""
# Make sure the length of the communication module makes sense.
if N < 0:
raise ValueError('qh: N must be >= 0')
# Base this library on the optimized sgRNA described by Dang et al.
sgrna = on(pam=pam, target=target)
# Randomize the entire ruler.
sgrna['ruler'].seq = 'GU' + 'N' * (len(sgrna['ruler']) - 2)
# Randomize the communication module.
sgrna['hairpin/5'].seq = N * 'N'
sgrna['hairpin/3'].seq = N * 'N'
# Insert the aptamer above the communication module.
sgrna['hairpin/o'].attachment_sites = 0,4
sgrna.attach(aptamer(ligand), 'hairpin/o', 0, 'hairpin/o', 4)
return sgrna
|
a6be46325d80a23e5afa820b042fa4c878370e45
| 3,646,355
|
from typing import Dict
from typing import Any
def azure_firewall_ip_group_list_command(client: AzureFirewallClient, args: Dict[str, Any]) -> CommandResults:
"""
List IP groups in resource group or subscription.
Args:
client (AzureFirewallClient): Azure Firewall API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
resource = args.get('resource')
limit = arg_to_number(args.get('limit') or '50')
page = arg_to_number(args.get('page') or '1')
validate_pagination_arguments(limit, page)
readable_message = get_pagination_readable_message(header='IP Group List:',
limit=limit, page=page)
start_offset = (page - 1) * limit
end_offset = start_offset + limit
complete_requests = False
total_response = {'value': []}
response = client.azure_firewall_ip_group_list_request(resource=resource)
while not complete_requests:
total_response['value'].extend(response.get('value'))
if len(total_response['value']) >= end_offset or not response.get('nextLink'):
complete_requests = True
else:
response = client.azure_firewall_ip_group_list_request(resource=resource,
next_link=response.get('nextLink'))
return generate_ip_group_command_output(total_response.get('value')[start_offset: end_offset],
readable_header=readable_message)
|
c52108af9903f952adf316b11098f726d7280153
| 3,646,356
|
def plot_heatmap(df, title=""):
"""
Plotly heatmap wrapper
:param df: pd.DataFrame
:param title: str
"""
fig = go.Figure(
data=go.Heatmap(z=df.values, x=df.columns, y=df.index, colorscale="RdBu")
)
fig.update_layout(template=_TEMPLATE, title=title, legend_orientation="h")
return fig
|
46c3d362bdbe742b54ad09a56f4638ef1497bcc2
| 3,646,357
|
import os
def normalise_dir_pattern(repo_dir, d):
"""
if d is a relative path, prepend the repo_dir to it
"""
if not d.startswith(repo_dir):
return os.path.join(repo_dir, d)
else:
return d
|
ea240eda35f7c85e652f78f5e80eb3ac16ce4e98
| 3,646,358
|
import tempfile
import os
import subprocess
import shutil
def patch(diff, orig_file, filename, request=None):
"""Apply a diff to a file.
This delegates out to ``patch`` because noone except Larry Wall knows how
to patch.
Args:
diff (bytes):
The contents of the diff to apply.
orig_file (bytes):
The contents of the original file.
filename (unicode):
The name of the file being patched.
request (django.http.HttpRequest, optional):
The HTTP request, for use in logging.
Returns:
bytes:
The contents of the patched file.
Raises:
reviewboard.diffutils.errors.PatchError:
An error occurred when trying to apply the patch.
"""
log_timer = log_timed('Patching file %s' % filename, request=request)
if not diff.strip():
# Someone uploaded an unchanged file. Return the one we're patching.
return orig_file
# Prepare the temporary directory if none is available
tempdir = tempfile.mkdtemp(prefix='reviewboard.')
try:
orig_file = convert_line_endings(orig_file)
diff = convert_line_endings(diff)
(fd, oldfile) = tempfile.mkstemp(dir=tempdir)
f = os.fdopen(fd, 'w+b')
f.write(orig_file)
f.close()
newfile = '%s-new' % oldfile
process = subprocess.Popen(['patch', '-o', newfile, oldfile],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=tempdir)
with controlled_subprocess('patch', process) as p:
stdout, stderr = p.communicate(diff)
failure = p.returncode
try:
with open(newfile, 'rb') as f:
new_file = f.read()
except Exception:
new_file = None
if failure:
rejects_file = '%s.rej' % newfile
try:
with open(rejects_file, 'rb') as f:
rejects = f.read()
except Exception:
rejects = None
error_output = force_text(stderr.strip() or stdout.strip())
# Munge the output to show the filename instead of
# randomly-generated tempdir locations.
base_filename = os.path.basename(filename)
error_output = (
error_output
.replace(rejects_file, '%s.rej' % base_filename)
.replace(oldfile, base_filename)
)
raise PatchError(filename=filename,
error_output=error_output,
orig_file=orig_file,
new_file=new_file,
diff=diff,
rejects=rejects)
return new_file
finally:
shutil.rmtree(tempdir)
log_timer.done()
|
2d2481fec0151c74ad8c7993fb21bfeef60ee30b
| 3,646,359
|
def shift_transactions_forward(index, tindex, file, pos, opos):
"""Copy transactions forward in the data file
This might be done as part of a recovery effort
"""
# Cache a bunch of methods
seek=file.seek
read=file.read
write=file.write
index_get=index.get
# Initialize,
pv=z64
p1=opos
p2=pos
offset=p2-p1
# Copy the data in two stages. In the packing stage,
# we skip records that are non-current or that are for
# unreferenced objects. We also skip undone transactions.
#
# After the packing stage, we copy everything but undone
# transactions, however, we have to update various back pointers.
# We have to have the storage lock in the second phase to keep
# data from being changed while we're copying.
pnv=None
while 1:
# Read the transaction record
seek(pos)
h=read(TRANS_HDR_LEN)
if len(h) < TRANS_HDR_LEN: break
tid, stl, status, ul, dl, el = unpack(TRANS_HDR,h)
status = as_text(status)
if status=='c': break # Oops. we found a checkpoint flag.
tl=u64(stl)
tpos=pos
tend=tpos+tl
otpos=opos # start pos of output trans
thl=ul+dl+el
h2=read(thl)
if len(h2) != thl:
raise PackError(opos)
# write out the transaction record
seek(opos)
write(h)
write(h2)
thl=TRANS_HDR_LEN+thl
pos=tpos+thl
opos=otpos+thl
while pos < tend:
# Read the data records for this transaction
seek(pos)
h=read(DATA_HDR_LEN)
oid,serial,sprev,stloc,vlen,splen = unpack(DATA_HDR, h)
assert not vlen
plen=u64(splen)
dlen=DATA_HDR_LEN+(plen or 8)
tindex[oid]=opos
if plen: p=read(plen)
else:
p=read(8)
p=u64(p)
if p >= p2: p=p-offset
elif p >= p1:
# Ick, we're in trouble. Let's bail
# to the index and hope for the best
p=index_get(oid, 0)
p=p64(p)
# WRITE
seek(opos)
sprev=p64(index_get(oid, 0))
write(pack(DATA_HDR,
oid, serial, sprev, p64(otpos), 0, splen))
write(p)
opos=opos+dlen
pos=pos+dlen
# skip the (intentionally redundant) transaction length
pos=pos+8
if status != 'u':
index.update(tindex) # Record the position
tindex.clear()
write(stl)
opos=opos+8
return opos
|
c19009c15a04b4a55389b584fad1744ebde03187
| 3,646,360
|
def draw_disturbances(seed, shocks_cov, num_periods, num_draws):
"""Creates desired number of draws of a multivariate standard normal distribution."""
# Set seed
np.random.seed(seed)
# Input parameters of the distribution
mean = [0, 0, 0]
shocks_cov_matrix = np.zeros((3, 3), float)
np.fill_diagonal(shocks_cov_matrix, shocks_cov)
# Create draws from the standard normal distribution
draws = np.random.multivariate_normal(
mean, shocks_cov_matrix, (num_periods, num_draws)
)
# Return function output
return draws
|
d467a1d5fde3eb32debca2711597ef24dc117aaa
| 3,646,361
|
import argparse
from datetime import datetime
import asyncio
def main():
"""Entrypoint function."""
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--username',
help='Hydro Quebec username')
parser.add_argument('-p', '--password',
help='Password')
parser.add_argument('-j', '--json', action='store_true',
default=False, help='Json output')
parser.add_argument('-i', '--influxdb', action='store_true',
default=False, help='InfluxDb output')
parser.add_argument('-c', '--contract',
default=None, help='Contract number')
parser.add_argument('-l', '--list-contracts', action='store_true',
default=False, help='List all your contracts')
parser.add_argument('-H', '--hourly', action='store_true',
default=False, help='Show yesterday hourly consumption')
parser.add_argument('-t', '--timeout',
default=REQUESTS_TIMEOUT, help='Request timeout')
parser.add_argument('-V', '--version', action='store_true',
default=False, help='Show version')
raw_group = parser.add_argument_group('Detailled-energy raw download option')
raw_group.add_argument('--detailled-energy', action='store_true',
default=False, help='Get raw json output download')
raw_group.add_argument('--start-date',
default=(datetime.datetime.now(HQ_TIMEZONE) -
datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
help='Start date for detailled-output')
raw_group.add_argument('--end-date',
default=datetime.datetime.now(HQ_TIMEZONE).strftime("%Y-%m-%d"),
help="End date for detailled-output")
args = parser.parse_args()
if args.version:
print(VERSION)
return 0
if not args.username or not args.password:
parser.print_usage()
print("pyhydroquebec: error: the following arguments are required: "
"-u/--username, -p/--password")
return 3
client = HydroQuebecClient(args.username, args.password, args.timeout)
loop = asyncio.get_event_loop()
if args.detailled_energy is False:
async_func = client.fetch_data()
else:
start_date = datetime.datetime.strptime(args.start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(args.end_date, '%Y-%m-%d')
async_func = client.fetch_data_detailled_energy_use(start_date,
end_date)
try:
fut = asyncio.wait([async_func])
loop.run_until_complete(fut)
except BaseException as exp:
print(exp)
return 1
finally:
close_fut = asyncio.wait([client.close_session()])
loop.run_until_complete(close_fut)
if not client.get_data():
return 2
if args.list_contracts:
print("Contracts: {}".format(", ".join(client.get_contracts())))
elif args.influxdb:
output_influx(client.get_data(args.contract))
elif args.json or args.detailled_energy:
output_json(client.get_data(args.contract))
else:
output_text(args.username, client.get_data(args.contract), args.hourly)
return 0
|
9a50f0944688236cd8786896b9c41c012ef3dca9
| 3,646,362
|
def wheel(pos):
"""Generate rainbow colors across 0-255 positions."""
if pos>1280:
pos = 1280
if pos <= 255:
r = 255-pos
g = 0
b = 255
else:
pos = pos-256
if pos <= 255:
r = 0
g = pos
b = 255
else:
pos = pos-256
if pos <= 255:
r = 0
g = 255
b = 255-pos
else:
pos = pos-256
if pos <= 255:
r = pos
g = 255
b = 0
else:
pos = pos-256
if pos <= 255:
r = 255
g = 255-pos
b = 0
return (r, g, b)
|
765df4262ce3b04fb8b06f9256ca51670e2f5bfb
| 3,646,363
|
def get_physical_connectivity(port):
"""Get local_link_information from specified port.
@param port a port object
@return lli a list of following dict
{"switch_id": "MAC_of_switch", "port_id": "1/1/0/1",
"switch_info": "switch_name"}
"""
# TODO(yushiro) replace following characters to constant value
binding_profile = port['binding:profile']
lli = binding_profile.get("local_link_information", {})
is_all_specified = True if lli else False
for i in lli:
if not (i.get('switch_id') and i.get('port_id') and
i.get('switch_info')):
is_all_specified = False
if is_all_specified:
return lli
LOG.error(_LE("Some physical network param is missing:%s"), lli)
raise ml2_exc.MechanismDriverError(method="get_physical_connectivity")
|
cddd8c1191d9e73c55bdd5eecb6086773c3d1a86
| 3,646,364
|
def optimize_profile(diff_matrix, x_points, dc_init, exp_norm_profiles,
display_result=True, labels=None):
"""
Fit the diffusion matrix
Parameters
----------
diff_matrix : tuple
tuple of (eigenvalues, eigenvectors) in reduced basis (dim n-1)
x_points : 1-D array_like
spatial coordinates
dc_init : array
concentration difference between endmembers
exp_norm_profiles : list of arrays
profiles to be fitted, of length the nb of experiments, with n
profiles for each experiment. Profiles are normalized, that is, an
estimation of the estimated mean concentration should be substracted.
"""
n_comp = len(dc_init[0]) - 1
n_exp = len(x_points)
def cost_function(coeffs, x_points, dc_init, exp_norm_profiles):
n_comp = len(dc_init[0]) - 1
diag = coeffs[:n_comp]
n_exp = len(x_points)
P = np.matrix(coeffs[n_comp: n_comp + n_comp**2].reshape((n_comp,
n_comp)))
adjust_cmeans = coeffs[n_comp + n_comp**2:
n_comp + n_comp**2 +
(n_comp) * n_exp].reshape((n_exp, n_comp))
adjust_dc = coeffs[n_comp + n_comp**2 + (n_comp) * n_exp:
n_comp + n_comp**2 +
2 * (n_comp) * n_exp].reshape((n_exp, n_comp))
errors = np.array([])
for i in range(n_exp):
dc_corr = np.copy(dc_init[i])
dc_corr[:-1] -= adjust_dc[i]
profile_corr = np.copy(exp_norm_profiles[i])
profile_corr[:-1, :] -= adjust_cmeans[i][:, None]
error = evolve_profile((diag, P), x_points[i], dc_corr, profile_corr, plot=False)
errors = np.concatenate((errors, error))
return errors
diag, P = diff_matrix
coeffs = np.concatenate((diag, np.array(P).ravel(),
np.zeros(2 * n_exp * n_comp)))
res = optimize.leastsq(cost_function, coeffs,
args=(x_points, dc_init, exp_norm_profiles),
full_output=True, factor=10)[0]
diags, eigvecs, shifts = res[:n_comp], \
res[n_comp: n_comp + n_comp**2].reshape((n_comp, n_comp)), \
res[n_comp + n_comp**2:].reshape((2, n_exp, n_comp))
if display_result:
for i in range(n_exp):
dc_corr = np.copy(dc_init[i])
dc_corr[:-1] -= shifts[1, i]
prof_corr = np.copy(exp_norm_profiles[i])
prof_corr[:-1] -= shifts[0, i][:, None]
_ = evolve_profile((diags, eigvecs), x_points[i], dc_corr,
exp_norm_profiles=prof_corr, labels=labels)
return diags, eigvecs, shifts
|
f2550f6fe4cb267559676d30ef0156ce528178cf
| 3,646,365
|
def getargsfromdoc(obj):
"""Get arguments from object doc"""
if obj.__doc__ is not None:
return getargsfromtext(obj.__doc__, obj.__name__)
|
d49510388be36a60259683f4560b1d01fe9f9bf6
| 3,646,366
|
def nms(dets, thresh):
"""Dispatch to either CPU or GPU NMS implementations.\
Accept dets as tensor"""
return pth_nms(dets, thresh)
|
e6dbe7b44e1975c080e58d02d6e07ef22b2d3711
| 3,646,367
|
import subprocess
def get_disk_usage():
"""
Handle determining disk usage on this VM
"""
disk = {}
# Get the amount of general disk space used
cmd_out = subprocess.getstatusoutput('df -h | grep "/dev/xvda1"')[1]
cmd_parts = cmd_out.split()
disk["gen_disk_used"] = cmd_parts[2]
disk["gen_disk_total"] = cmd_parts[3]
disk["gen_disk_percent"] = cmd_parts[4]
# Get the amount of Docker disk space used
cmd_out = subprocess.getstatusoutput('df -h | grep "tmpfs"')[1]
cmd_parts = cmd_out.split()
disk["docker_disk_used"] = cmd_parts[2]
disk["docker_disk_total"] = cmd_parts[3]
disk["docker_disk_percent"] = cmd_parts[4]
return disk
|
e4f65e1c652a74086c111a3c80d5f6c9db94a66e
| 3,646,368
|
import argparse
def generate_arg_parser():
"""
this function receives input arguments for various functions.
:return:
"""
project_path = get_project_path()
# load data
default_db_path = "".join([project_path, "/data/DisasterResponseDataBase.db"])
default_model_path = "".join([str(project_path), "/models/dr_trained_model.lzma"])
parser = argparse.ArgumentParser(
description="Load data from database, load model, and run the webapp."
)
parser.add_argument(
"--db_file",
action="store",
dest="db_file",
type=str,
default=default_db_path,
help="Path to disaster response database",
)
parser.add_argument(
"--model_file",
action="store",
dest="model_file",
type=str,
default=default_model_path,
help="path to store trained machine leaning model.",
)
return parser.parse_args(), parser
|
c8e654da2edcd241e5d9e2c2414909ffd0e40f0c
| 3,646,369
|
def QFont_from_Font(font):
""" Convert the given Enaml Font into a QFont.
Parameters
----------
font : Font
The Enaml Font object.
Returns
-------
result : QFont
The QFont instance for the given Enaml font.
"""
qfont = QFont(font.family, font.pointsize, font.weight)
qfont.setStyle(FONT_STYLES[font.style])
qfont.setCapitalization(FONT_CAPS[font.caps])
qfont.setStretch(FONT_STRETCH[font.stretch])
return qfont
|
bb62daf4d46315a7a55135894dc78e1d2898fee2
| 3,646,370
|
from typing import OrderedDict
def _find_in_iterable_case_insensitive(iterable, name):
"""
Return the value matching ``name``, case insensitive, from an iterable.
"""
iterable = list(OrderedDict.fromkeys([k for k in iterable]))
iterupper = [k.upper() for k in iterable]
try:
match = iterable[iterupper.index(name.upper())]
except (ValueError, AttributeError):
match = None
return match
|
548c951b08fb07251fda1b8918282462c8d0351a
| 3,646,371
|
import os
import logging
def RunInSeparateProcess(fn, *args):
"""Runs a function in a separate process.
Note: Only boolean return values are supported.
@type fn: callable
@param fn: Function to be called
@rtype: bool
@return: Function's result
"""
pid = os.fork()
if pid == 0:
# Child process
try:
# In case the function uses temporary files
utils_wrapper.ResetTempfileModule()
# Call function
result = int(bool(fn(*args)))
assert result in (0, 1)
except: # pylint: disable=W0702
logging.exception("Error while calling function in separate process")
# 0 and 1 are reserved for the return value
result = 33
os._exit(result) # pylint: disable=W0212
# Parent process
# Avoid zombies and check exit code
(_, status) = os.waitpid(pid, 0)
if os.WIFSIGNALED(status):
exitcode = None
signum = os.WTERMSIG(status)
else:
exitcode = os.WEXITSTATUS(status)
signum = None
if not (exitcode in (0, 1) and signum is None):
raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
(exitcode, signum))
return bool(exitcode)
|
4770f6091d7eb6d96b5fdb2bb983da7be68b7e59
| 3,646,372
|
def predict_all_points(data, order, coefficients):
"""
:param data: input data to create least squares prediction of order(order) of
:param order: order for least squares prediction
:param coefficients: coefficients of LPC
:return: returns estimation of entire data set. Will be of length (len(data) - order)
"""
predicted_set = np.zeros((1, len(data) - order))
index = 0
for i in np.arange(order, len(data)):
y = data[i - order:i]
predicted_set[0][index] = np.sum(np.multiply(data[i - order:i], -coefficients))
index += 1
return predicted_set[0]
|
4725c735241f439bf986743cafdee0e995373966
| 3,646,373
|
def _unpack(msg, decode=True):
"""Unpack and decode a FETCHed message dictionary."""
if 'UID' in msg and 'BODY[]' in msg:
uid = msg['UID']
body = msg['BODY[]']
if decode:
idate = msg.get('INTERNALDATE', None)
flags = msg.get('FLAGS', ())
return (uid, IMAP4Message(body, uid, idate, flags))
else:
return (uid, body)
return (None, None)
|
5c027dcd54d29f6d95647b66ad2d28998866dc3c
| 3,646,374
|
import logging
def video_in(filename=INPUTPATH):
"""reads (max.20sec!) video file and stores every frame as PNG image for processing
returns image name and image files (as np array?)"""
#create video capture object
cap = cv2.VideoCapture(filename)
name = filename.split('/')[-1].split('.')[0]
i=0
if (cap.isOpened()==False):
logging.error('Error opening video stream or file')
while(cap.isOpened()):
#capture frame-by-frame
ret, frame = cap.read()
if ret == True:
i=i+1
cv2.imshow('Frame', frame)
Image.fromarray(frame).save(f"images/{name}_{i}.png")
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
return f'Frame count of {name}: {i}'
|
cb82d7c6865c3bfe5f3f52f9cb7adc55a8d2e002
| 3,646,375
|
from typing import List
def convert_all_timestamps(results: List[ResponseResult]) -> List[ResponseResult]:
"""Replace all date/time info with datetime objects, where possible"""
results = [convert_generic_timestamps(result) for result in results]
results = [convert_observation_timestamps(result) for result in results]
return results
|
f81121fcd387626a2baa0ecfb342d3381f6def7f
| 3,646,376
|
def convert(s):
""" Take full markdown string and swap all math spans with img.
"""
matches = find_inline_equations(s) + find_display_equations(s)
for match in matches:
full = match[0]
latex = match[1]
img = makeimg(latex)
s = s.replace(full, img)
return s
|
684a6be3812aad8b602631c45af407ca878f9453
| 3,646,377
|
from sys import path
from sys import prefix
def file_parser(input_file: str = 'stocks.json') -> dict:
"""Reads the input file and loads the file as dictionary.
Args:
input_file: Takes the input file name as an argument.
Returns:
dict:
Returns a json blurb.
"""
if path.isfile(input_file):
with open(input_file) as stock_file:
try:
return load(fp=stock_file)
except JSONDecodeError:
print(f"\033[31m{prefix(level='ERROR')}Unable to load stocks.json.\033[00m")
|
c4383ab4037595aeaa2aca2bc9ab77de1777a4fe
| 3,646,378
|
def _amplify_ep(text):
"""
check for added emphasis resulting from exclamation points (up to 4 of them)
"""
ep_count = text.count("!")
if ep_count > 4:
ep_count = 4
# (empirically derived mean sentiment intensity rating increase for
# exclamation points)
ep_amplifier = ep_count * 0.292
return ep_amplifier
|
8f78a5f24aa22b5f2b4927131bfccf22ccc69ff3
| 3,646,379
|
def inline_singleton_lists(dsk):
""" Inline lists that are only used once
>>> d = {'b': (list, 'a'),
... 'c': (f, 'b', 1)} # doctest: +SKIP
>>> inline_singleton_lists(d) # doctest: +SKIP
{'c': (f, (list, 'a'), 1)}
Pairs nicely with lazify afterwards
"""
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items() if istask(v) and v
and v[0] is list
and len(dependents[k]) == 1]
return inline(dsk, keys, inline_constants=False)
|
a4c2a8b6d96d0bfac8e9ba88a4bed301c3054f0a
| 3,646,380
|
def vegasflowplus_sampler(*args, **kwargs):
"""Convenience wrapper for sampling random numbers
Parameters
----------
`integrand`: tf.function
`n_dim`: number of dimensions
`n_events`: number of events per iteration
`training_steps`: number of training_iterations
Returns
-------
`sampler`: a reference to the generate_random_array method of the integrator class
"""
return sampler(VegasFlowPlus, *args, **kwargs)
|
1b53d83bd010a8113640858d46d66c9c0ef76ff8
| 3,646,381
|
def remove_recalculated_sectors(df, prefix='', suffix=''):
"""Return df with Total gas (sum of all sectors) removed
"""
idx = recalculated_row_idx(df, prefix='', suffix='')
return df[~idx]
|
54272933f72d45cf555f76086c809eba14713242
| 3,646,382
|
def unparse_headers(hdrs):
"""Parse a dictionary of headers to a string.
Args:
hdrs: A dictionary of headers.
Returns:
The headers as a string that can be used in an NNTP POST.
"""
return "".join([unparse_header(n, v) for n, v in hdrs.items()]) + "\r\n"
|
7c06127752d0c6be19894703ba95f2e827e89b8f
| 3,646,383
|
def modify_natoms(row, BBTs, fg):
"""This function takes a row of a pandas data frame and calculates the new number of atoms
based on the atom difference indicated in itw functional groups
BBTs : list of instances of BBT class
fg : instance of the Parameters class (fg parameters)
returns : n_atoms (int)"""
n_atoms = row['N_ATOMS']
for i in BBTs[row['BBT']].BBT:
n_atoms += fg.par[i]['atom_dif']
if n_atoms < 1:
return np.nan
return n_atoms
|
2c2df3d2859d33128f982b936011c73bafb723bc
| 3,646,384
|
def recreate_cursor(collection, cursor_id, retrieved, batch_size):
"""
Creates and returns a Cursor object based on an existing cursor in the
in the server. If cursor_id is invalid, the returned cursor will raise
OperationFailure on read. If batch_size is -1, then all remaining documents
on the cursor are returned.
"""
if cursor_id == 0:
return None
cursor_info = {'id': cursor_id, 'firstBatch': []}
_logger.info(
"collection: {0} cursor_info: {1} retrieved {2} batch_size {3}"
.format(collection, cursor_id, retrieved, batch_size))
cursor = CommandCursor(collection, cursor_info, 0,
retrieved=retrieved)
cursor.batch_size(batch_size)
return cursor
|
1a4987715e35f1cf09ac3046c36c752289797ee6
| 3,646,385
|
def nut00b(date1, date2):
"""
Wrapper for ERFA function ``eraNut00b``.
Parameters
----------
date1 : double array
date2 : double array
Returns
-------
dpsi : double array
deps : double array
Notes
-----
The ERFA documentation is below.
- - - - - - - - - -
e r a N u t 0 0 b
- - - - - - - - - -
Nutation, IAU 2000B model.
Given:
date1,date2 double TT as a 2-part Julian Date (Note 1)
Returned:
dpsi,deps double nutation, luni-solar + planetary (Note 2)
Notes:
1) The TT date date1+date2 is a Julian Date, apportioned in any
convenient way between the two arguments. For example,
JD(TT)=2450123.7 could be expressed in any of these ways,
among others:
date1 date2
2450123.7 0.0 (JD method)
2451545.0 -1421.3 (J2000 method)
2400000.5 50123.2 (MJD method)
2450123.5 0.2 (date & time method)
The JD method is the most natural and convenient to use in
cases where the loss of several decimal digits of resolution
is acceptable. The J2000 method is best matched to the way
the argument is handled internally and will deliver the
optimum resolution. The MJD method and the date & time methods
are both good compromises between resolution and convenience.
2) The nutation components in longitude and obliquity are in radians
and with respect to the equinox and ecliptic of date. The
obliquity at J2000.0 is assumed to be the Lieske et al. (1977)
value of 84381.448 arcsec. (The errors that result from using
this function with the IAU 2006 value of 84381.406 arcsec can be
neglected.)
The nutation model consists only of luni-solar terms, but
includes also a fixed offset which compensates for certain long-
period planetary terms (Note 7).
3) This function is an implementation of the IAU 2000B abridged
nutation model formally adopted by the IAU General Assembly in
2000. The function computes the MHB_2000_SHORT luni-solar
nutation series (Luzum 2001), but without the associated
corrections for the precession rate adjustments and the offset
between the GCRS and J2000.0 mean poles.
4) The full IAU 2000A (MHB2000) nutation model contains nearly 1400
terms. The IAU 2000B model (McCarthy & Luzum 2003) contains only
77 terms, plus additional simplifications, yet still delivers
results of 1 mas accuracy at present epochs. This combination of
accuracy and size makes the IAU 2000B abridged nutation model
suitable for most practical applications.
The function delivers a pole accurate to 1 mas from 1900 to 2100
(usually better than 1 mas, very occasionally just outside
1 mas). The full IAU 2000A model, which is implemented in the
function eraNut00a (q.v.), delivers considerably greater accuracy
at current dates; however, to realize this improved accuracy,
corrections for the essentially unpredictable free-core-nutation
(FCN) must also be included.
5) The present function provides classical nutation. The
MHB_2000_SHORT algorithm, from which it is adapted, deals also
with (i) the offsets between the GCRS and mean poles and (ii) the
adjustments in longitude and obliquity due to the changed
precession rates. These additional functions, namely frame bias
and precession adjustments, are supported by the ERFA functions
eraBi00 and eraPr00.
6) The MHB_2000_SHORT algorithm also provides "total" nutations,
comprising the arithmetic sum of the frame bias, precession
adjustments, and nutation (luni-solar + planetary). These total
nutations can be used in combination with an existing IAU 1976
precession implementation, such as eraPmat76, to deliver GCRS-
to-true predictions of mas accuracy at current epochs. However,
for symmetry with the eraNut00a function (q.v. for the reasons),
the ERFA functions do not generate the "total nutations"
directly. Should they be required, they could of course easily
be generated by calling eraBi00, eraPr00 and the present function
and adding the results.
7) The IAU 2000B model includes "planetary bias" terms that are
fixed in size but compensate for long-period nutations. The
amplitudes quoted in McCarthy & Luzum (2003), namely
Dpsi = -1.5835 mas and Depsilon = +1.6339 mas, are optimized for
the "total nutations" method described in Note 6. The Luzum
(2001) values used in this ERFA implementation, namely -0.135 mas
and +0.388 mas, are optimized for the "rigorous" method, where
frame bias, precession and nutation are applied separately and in
that order. During the interval 1995-2050, the ERFA
implementation delivers a maximum error of 1.001 mas (not
including FCN).
References:
Lieske, J.H., Lederle, T., Fricke, W., Morando, B., "Expressions
for the precession quantities based upon the IAU /1976/ system of
astronomical constants", Astron.Astrophys. 58, 1-2, 1-16. (1977)
Luzum, B., private communication, 2001 (Fortran code
MHB_2000_SHORT)
McCarthy, D.D. & Luzum, B.J., "An abridged model of the
precession-nutation of the celestial pole", Cel.Mech.Dyn.Astron.
85, 37-49 (2003)
Simon, J.-L., Bretagnon, P., Chapront, J., Chapront-Touze, M.,
Francou, G., Laskar, J., Astron.Astrophys. 282, 663-683 (1994)
Copyright (C) 2013-2017, NumFOCUS Foundation.
Derived, with permission, from the SOFA library. See notes at end of file.
"""
dpsi, deps = ufunc.nut00b(date1, date2)
return dpsi, deps
|
a5235543aca0d6de6e79878ac3db1d208d237a0d
| 3,646,386
|
def Join_Factors(*factor_data, merge_names=None, new_name=None, weight=None, style='SAST'):
"""合并因子,按照权重进行加总。只将非缺失的因子的权重重新归一合成。
Parameters:
===========
factor_data: dataframe or tuple of dataframes
merge_names: list
待合并因子名称,必须是data_frame中列的子集
new_name: str
合成因子名称
weight: list or None
待合并因子的权重
style : str, 'SAST" or 'AST'
字段、品种、时间三个维度在factor_data中的排布类型。SAST(Stack Attribute-Symbol-Time)是最常用的,
索引是Time-Symbol的MultiIndex,列是字段;AST(Attribute-Symbol-Time),Index是时间,Columns是Symbol.
"""
def nansum(a, w):
nanind = np.isfinite(a)
if np.sum(nanind) == 0.0:
return np.nan
return np.sum(a[nanind] * w[nanind]) / np.sum(w[nanind])
if new_name is None:
new_name = 'new'
if isinstance(merge_names, str):
merge_names = [merge_names]
if len(factor_data) == 1:
if merge_names is None:
factor_values = factor_data[0].values
else:
factor_values = factor_data[0][merge_names].values
elif style == 'SAST':
factor_data = align_dataframes(*factor_data)
factor_values = np.hstack((x.values for x in factor_data))
else:
factor_data = align_dataframes(*factor_data, axis='both')
factor_values = np.stack((x.values for x in factor_data))
nfactors = factor_values.shape[1] if factor_values.ndim == 2 else factor_values.shape[0]
if weight is None:
weight = np.asarray([1.0 / nfactors] * nfactors)
else:
weight = np.asarray(weight) / np.sum(weight)
if factor_values.ndim == 2:
weight_array = np.tile(weight, (factor_values.shape[0],1))
na_ind = np.isnan(factor_values)
weight_array[na_ind] = 0.0
weight_array = weight_array / weight_array.sum(axis=1)[:, np.newaxis]
new_values = np.nansum(factor_values * weight_array, axis=1)
new_values[np.all(na_ind, axis=1)] = np.nan
return pd.DataFrame(new_values, index=factor_data[0].index, columns=[new_name])
else:
new_values = np.apply_along_axis(nansum, 0, factor_values, w=weight)
return pd.DataFrame(new_values, index=factor_data[0].index, columns=factor_data[0].columns)
|
95db1eda297cb8cb05a1db9b1fae9c25a034685f
| 3,646,387
|
from pathlib import Path
def _check_for_file_changes(filepath: Path, config: Config) -> bool:
"""Returns True if a file was modified in a working dir."""
# Run 'git add' to avoid false negatives, as 'git diff --staged' is used for
# detection. This is important when there are external factors that impact the
# committing process (like pre-commit).
_call_git(config, "add", [filepath.as_posix()])
git_diff_out = _get_git_output(config, "diff", ["--staged", filepath.as_posix()])
# If 'git diff' output is empty, the file wasn't modified.
return git_diff_out != b""
|
c99da7e993e74f7dbe5789c48832afc59638762c
| 3,646,388
|
import time
def wait_or_cancel(proc, title, message):
"""
Display status dialog while process is running and allow user to cancel
:param proc: subprocess object
:param title: title for status dialog
:param message: message for status dialog
:return: (process exit code, stdout output or None)
"""
pDialog = xbmcgui.DialogProgress()
pDialog.create(title, "")
while proc and proc.poll() is None and not pDialog.iscanceled():
pDialog.update(50, message)
try:
if not pDialog.iscanceled():
msg = proc.communicate()[0]
exitcode = proc.returncode
if exitcode == 0:
stdout = msg
pDialog.update(100, "Complete!")
time.sleep(3)
else:
xbmcgui.Dialog().ok(
"Error during {desc}".format(desc=title.lower()), msg)
stdout = msg
else:
proc.terminate()
stdout = None
exitcode = 1
except:
pass
pDialog.close()
return (exitcode, stdout)
|
8b60e459523933ee205210d4761b6b7d9d8acbfb
| 3,646,389
|
def getg_PyInteractiveBody_one_in_two_out():
"""Return a graph that has a PyInteractiveBody with one input
and two outputs.
"""
@dl.Interactive(
[("num", dl.Int(dl.Size(32)))],
[('num_out', dl.Int(dl.Size(32))), ('val_out', dl.Bool())]
)
def interactive_func(node: dl.PythonNode):
for _ in range(2):
num = node.receive("num")
print(f"received num: {num}")
node.send(num_out=None, val_out=False)
node.send(num_out=14, val_out=False)
s0 = dl.lib.StateSaver(bool, condition=lambda x: x)
s1 = dl.lib.StateSaver(int, verbose=True)
with dl.DeltaGraph() as graph:
int_func = interactive_func.call(4)
s0.save_and_exit_if(int_func.val_out)
s1.save_and_exit(int_func.num_out)
return graph
|
31af32a5ece2f4c76635a8f37a0ac644c5f0e364
| 3,646,390
|
def batch_norm_relu(inputs, is_training):
"""Performs a batch normalization followed by a ReLU."""
# We set fused=True for a performance boost.
inputs = tf.layers.batch_normalization(
inputs=inputs,
axis=FLAGS.input_layout.find('C'),
momentum=FLAGS.batch_norm_decay,
epsilon=FLAGS.batch_norm_epsilon,
center=True,
scale=True,
training=is_training,
fused=FLAGS.use_fused_batchnorm)
return tf.nn.relu(inputs)
|
ab771b9d8747bc27d747dd9dce42a6bc9a1d59d3
| 3,646,391
|
def knn(points, p, k):
"""
Calculates the k nearest neighbours of a point.
:param points: list of points
:param p: reference point
:param k: amount of neighbours
:return: list of k neighbours
"""
return sorted(points, key=lambda x: distance(p, x))[:k]
|
e1a806cd4c16b5ecbf66301406dafeb2b12c46db
| 3,646,392
|
def ruleset_detail(request, slug):
"""
View for return the specific ruleset that user pass by using its slug in JSON format.
:param request: WSGI request from user
:return: Specific ruleset metadata in JSON format.
"""
# try to fetch ruleset from database
try:
ruleset = Ruleset.objects.get(slug=slug)
except Ruleset.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = RulesetSerializer(ruleset)
return JsonResponse(serializer.data)
|
a122a2e20641a13d6a934c0261f199ff304ae622
| 3,646,393
|
import requests
import json
def send_slack_notification(message):
"""
Send slack notification
Arguments:
message {string} -- Slack notification message
Returns:
response {Response} -- Http response object
"""
response = requests.post(
SLACK_WEBHOOK,
data=json.dumps(
{
"text": message,
"username": USERNAME,
"channel": CHANNEL,
"icon_emoji": ICON_EMOJI,
}
),
)
return response
|
6c5f0e51c1bfce19ff9a4aec77c1e4c98cd359fa
| 3,646,394
|
import argparse
def options_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Downloads XSOAR packs as zip and their latest docker images as tar.")
parser.add_argument('-p', '--packs',
help="A list of pack names as they appear in https://xsoar.pan.dev/marketplaceEither provided "
"via a path to a file that contains the packs list (separated by new lines) or "
"a string of comma separated packs (e.g. Base,AutoFocus)",
required=False)
parser.add_argument('-o', '--output_path',
help="The path where the files will be saved to.",
required=False, default=".")
parser.add_argument('-sp', '--skip_packs',
help="Don't download packs.",
required=False, action='store_true')
parser.add_argument('-sd', '--skip_docker',
help="Don't download docker images.",
required=False, action='store_true')
parser.add_argument('--insecure',
help="Skip certificate validation.", dest='feature', action='store_true')
parser.set_defaults(skip_packs=False, skip_docker=False, insecure=False)
return parser.parse_args()
|
d3a650d58d0444981b010e230cb5111245a00bc7
| 3,646,395
|
def method_detect(method: str):
"""Detects which method to use and returns its object"""
if method in POSTPROCESS_METHODS:
if method == "rtb-bnb":
return RemovingTooTransparentBordersHardAndBlurringHardBorders()
elif method == "rtb-bnb2":
return RemovingTooTransparentBordersHardAndBlurringHardBordersTwo()
else:
return None
else:
return False
|
cb1dafba5a7c225c093ab602c6e383cb7f499bba
| 3,646,396
|
def approve_pipelines_for_publishing(pipeline_ids): # noqa: E501
"""approve_pipelines_for_publishing
# noqa: E501
:param pipeline_ids: Array of pipeline IDs to be approved for publishing.
:type pipeline_ids: List[]
:rtype: None
"""
return util.invoke_controller_impl()
|
585d4972955e240f146c3d06d5a181dcad36d111
| 3,646,397
|
def get_x(document_id, word2wid, corpus_termfrequency_vector):
"""
Get the feature vector of a document.
Parameters
----------
document_id : int
word2wid : dict
corpus_termfrequency_vector : list of int
Returns
-------
list of int
"""
word_list = list(reuters.words(document_id))
word_count = float(len(word_list))
assert word_count > 0
document_tf_vec = get_termfrequency_vector(word2wid, word_list)
x = []
for i, wd_count in enumerate(document_tf_vec):
x.append(wd_count / (word_count * corpus_termfrequency_vector[i]))
return x
|
fca6e5a6071a6b48b83effb37d3b77a88ddf4046
| 3,646,398
|
def process_chain_of_trust(host: str, image: Image, req_delegations: list):
"""
Processes the whole chain of trust, provided by the notary server (`host`)
for any given `image`. The 'root', 'snapshot', 'timestamp', 'targets' and
potentially 'targets/releases' are requested in this order and afterwards
validated, also according to the `policy_rule`.
Returns the the signed image targets, which contain the digests.
Raises `NotFoundExceptions` should no required delegetions be present in
the trust data, or no image targets be found.
"""
tuf_roles = ["root", "snapshot", "timestamp", "targets"]
trust_data = {}
key_store = KeyStore()
# get all trust data and collect keys (from root and targets), as well as
# hashes (from snapshot and timestamp)
for role in tuf_roles:
trust_data[role] = get_trust_data(host, image, TUFRole(role))
key_store.update(trust_data[role])
# if the 'targets.json' has delegation roles defined, get their trust data
# as well
if trust_data["targets"].has_delegations():
for delegation in trust_data["targets"].get_delegations():
trust_data[delegation] = get_trust_data(host, image, TUFRole(delegation))
# validate all trust data's signatures, expiry dates and hashes
for role in trust_data:
trust_data[role].validate(key_store)
# validate needed delegations
if req_delegations:
if trust_data["targets"].has_delegations():
delegations = trust_data["targets"].get_delegations()
req_delegations_set = set(req_delegations)
delegations_set = set(delegations)
delegations_set.discard("targets/releases")
# make an intersection between required delegations and actually
# present ones
if not req_delegations_set.issubset(delegations_set):
missing = list(req_delegations_set - delegations_set)
raise NotFoundException(
"could not find delegation roles {} in trust data.".format(
str(missing)
)
)
else:
raise NotFoundException("could not find any delegations in trust data.")
# if certain delegations are required, then only take the targets fields of the
# required delegation JSON's. otherwise take the targets field of the targets JSON, as
# long as no delegations are defined in the targets JSON. should there be delegations
# defined in the targets JSON the targets field of the releases JSON will be used.
if req_delegations:
image_targets = [
trust_data[target_role].signed.get("targets", {})
for target_role in req_delegations
]
else:
targets_key = (
"targets/releases" if trust_data["targets"].has_delegations() else "targets"
)
image_targets = [trust_data[targets_key].signed.get("targets", {})]
if not any(image_targets):
raise NotFoundException("could not find any image digests in trust data.")
return image_targets
|
391024aeaa814f3159c8f45a925afce105b7b339
| 3,646,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.