content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def check_seal(item):
"""
Given a message object, use the "seal" attribute - a cryptographic
signature to prove the provenance of the message - to check it is valid.
Returns a boolean indication of validity.
"""
try:
item_dict = to_dict(item)
raw_sig = item_dict['seal']
signature = binascii.unhexlify(raw_sig.encode('ascii'))
key = rsa.PublicKey.load_pkcs1(item_dict['sender'].encode('ascii'))
del item_dict['seal']
del item_dict['message']
root_hash = _get_hash(item_dict).hexdigest()
return rsa.verify(root_hash.encode('ascii'), signature, key)
except:
pass
return False
| 14,700
|
def plot_win_prob(times, diff, end_lim, probs, team_abr, bools):
""" This function plots the win probability and
score differential for the game
@param times (list): list containing actual_times
and times. times contains all of the times at
which win probability was calculated
@param diff (list): List of score differentials
corresponding to all times in actual_times
@param end_lim (int): Time at which the last win
probability value is calculated
@param probs (list): List of win probability
lists (probs_home and probs_away). probs_home
contains all of the home win probability
values for all times in the times list.
probs_away is the same, but for win probability
for the away team
@param team_abr (list): List contraining the
home team abbreviation in the first index
and the away team abbreviation in the
second index
@param bools (list): List of booleans controlling
which figures are plotted
Returns:
- fig (matplotlib.figure.Figure): Figure
containing score differential and/or
win probability. None if all of the
booleans are False
"""
actual_times, times = times
probs_home, probs_away = probs
plot_diff, plot_home, plot_away = bools
plt.rcParams["figure.figsize"] = (20,6)
# Score differential
if plot_diff:
fig, pltting = \
plot_score_differential(actual_times,
diff,
end_lim)
else:
fig,ax = plt.subplots()
pltting = ax
# Quarter deliniation
for normal_q in range(0,4):
pltting.plot([2880-normal_q*12*60, 2880-normal_q*12*60],
[0,1], 'gray')
# OT deliniation
for ot in range(0,10):
pltting.plot([-ot*5*60, -ot*5*60],
[0,1], 'gray')
# Win probability
if plot_home:
pltting.plot(times, probs_home, 'blue', label=team_abr[0])
if plot_away:
pltting.plot(times, probs_away, 'orange', label=team_abr[-1])
pltting.set_xlim(2880, end_lim)
pltting.set_ylim(0.0, 1.0)
pltting.set_title("Win Probability")
plt.legend(loc='best')
plt.show()
return fig
| 14,701
|
def add_office():
"""Given that i am an admin i should be able to add a political office
When i visit ../api/v2/offices endpoint using POST method"""
if is_admin() is not True:
return is_admin()
errors = []
try:
if not request.get_json(): errors.append(
make_response(jsonify({'status': 409, "message": "missing input data"}), 409))
office_data = request.get_json()
check_missingfields = validate.missing_value_validator(['name', 'type'], office_data)
if check_missingfields is not True:
return check_missingfields
check_emptyfield = validate.empty_string_validator(['name', 'type'], office_data)
if check_emptyfield is not True:
return check_emptyfield
check_if_text_only = validate.text_arrayvalidator(['name', 'type'], office_data)
if check_if_text_only is not True:
return check_if_text_only
office_name = office_data['name']
office_type = office_data['type']
if len(errors) > 0:
for e in errors:
return e
res = office.add_office(office_name, office_type)
return res
except Exception as e:
return make_response(jsonify({'message': "something went wrong " + str(e.args[0]), 'status': 400}), 400)
| 14,702
|
def gen(iterable):
"""
gen(iter: iterable)
accept a iterable object; return a generator
implement:
for e in iterable: yield e
"""
for e in iterable:
yield e
| 14,703
|
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
cycle3 = pd.read_csv(Path(input_filepath) / FILE_NAME)
cycle3 = preprocess(cycle3)
cycle3.to_csv(Path(output_filepath) / 'cycle3.csv')
| 14,704
|
def send_message(
mg: mailgun.MailGun,
templates: t.Tuple[str, str],
contact_name: str,
contact_email: str,
sender: str,
reply_to: str,
sponsorship_package: t.Optional[Path],
dry_run: bool,
) -> bool:
"""
Send an individual email and report if it was successful
:param mg: the MailGun instance
:param templates: the text and html templates respectively
:param contact_name: the name of the contact at the company
:param contact_email: the email of the contact at the company
:param sender: the name of the person sending the email
:param reply_to: the email which replies are directed to
:param sponsorship_package: an optional file for the sponsorship package
:param dry_run: whether to actually send the email
:return: whether the sending was successful
"""
text, html = templates
# Format the sender email
sender_email = f"{sender[0]}{sender[sender.index(' ') + 1:].replace('-', '')}@{mg.domain}".lower()
# Get and format the contact email(s)
pairs = getaddresses([contact_email.replace(" ", "")])
emails = []
for _, email in pairs:
if email == "":
logger.error(f'invalid email address found in "{contact_email}"')
return False
emails.append(f"{contact_name} <{email.lower()}>")
# Print out the content on dry runs
if dry_run:
click.echo(
f"To: {', '.join(emails)}\n"
f"From: {sender} <{sender_email}>\n"
f"Subject: WaffleHacks Sponsorship Opportunity\n"
f"Reply To: {reply_to}\n\n\n"
f"{text}",
file=open(f"./dry-run-out/{contact_name} - {uuid4()}", "w"),
)
return True
try:
# Open the package if necessary
files = []
if sponsorship_package:
files.append(sponsorship_package.open("rb"))
mg.send(
from_=f"{sender} <{sender_email}>",
to=emails,
subject="WaffleHacks Sponsorship Opportunity",
text=text,
html=html,
files=files,
headers={"Reply-To": reply_to},
)
except mailgun.MailGunException as e:
logger.error(f"failed to send message: {e}")
return False
return True
| 14,705
|
def UABD(cpu_context: ProcessorContext, instruction: Instruction):
"""Unsigned absolute difference (vector form)"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
| 14,706
|
def test_var_length_list_of_lists():
"""Check that length of dict <= number of lists when passed a list of lists."""
plots = make_qqplot(data_lists,print_plots=False)
assert len(plots.keys()) <= len(data_lists)
| 14,707
|
def test_default():
""" Tests default metadata cube generated """
cube = generate_metadata(MANDATORY_ATTRIBUTE_DEFAULTS)
assert cube.name() == NAME_DEFAULT
assert cube.standard_name == NAME_DEFAULT
assert cube.units == UNITS_DEFAULT
assert cube.ndim == NDIMS_DEFAULT
assert cube.shape == (ENSEMBLE_MEMBERS_DEFAULT, NPOINTS_DEFAULT, NPOINTS_DEFAULT)
spatial_grid_values = SPATIAL_GRID_ATTRIBUTE_DEFAULTS[SPATIAL_GRID_DEFAULT]
assert cube.coords()[0].name() == "realization"
assert cube.coords()[1].name() == spatial_grid_values["y"]
assert cube.coords()[2].name() == spatial_grid_values["x"]
for axis in ("y", "x"):
assert cube.coord(axis=axis).units == spatial_grid_values["units"]
assert cube.coord(axis=axis).coord_system == spatial_grid_values["coord_system"]
assert np.diff(cube.coord(axis=axis).points)[0] == pytest.approx(
spatial_grid_values["grid_spacing"]
)
assert np.count_nonzero(cube.data) == 0
assert iris_time_to_datetime(cube.coord("time"))[0] == TIME_DEFAULT
assert cube.coord("time").bounds is None
assert (
iris_time_to_datetime(cube.coord("forecast_reference_time"))[0] == FRT_DEFAULT
)
assert cube.coord("forecast_period").points == FORECAST_PERIOD_DEFAULT
assert cube.attributes == MANDATORY_ATTRIBUTE_DEFAULTS
| 14,708
|
def multi_class5_classification_dataset_sparse_labels() -> tf.data.Dataset:
"""
TensorFlow dataset instance with multi-class sparse labels (5 classes)
:return: Multi-class sparse (labels) classification dataset
"""
# Create features
X = tf.random.normal(shape=(100, 3))
# Create one multi-class (one hot) labels
y = tf.random.uniform(minval=0, maxval=5, dtype=tf.int32, shape=(100,))
return tf.data.Dataset.from_tensor_slices((X, y))
| 14,709
|
def skipIfNAN(proteinPath):
""" Test if there is a NAN (not a number) in the lists """
overlapArrayWhole = None
overlapArrayInterface = None
overlapTApproxWhole = None
overlapTApproxInterface = None
try:
overlapArrayWhole = np.loadtxt(proteinPath+"overlapArrayWhole.txt")
except IOError:
pass
try:
overlapArrayInterface = np.loadtxt(proteinPath+"overlapArrayInterface.txt")
except IOError:
pass
try:
overlapTApproxWhole = np.loadtxt(proteinPath+"overlapTApproxWhole.txt")
except IOError:
pass
try:
overlapTApproxInterface = np.loadtxt(proteinPath+"overlapTApproxInterface.txt")
except IOError:
pass
if overlapArrayWhole is not None and np.isnan(overlapArrayWhole).any():
print "skipped"
return True
if overlapArrayInterface is not None and np.isnan(overlapArrayInterface).any():
print "skipped"
return True
if overlapTApproxWhole is not None and np.isnan(overlapTApproxWhole).any():
print "skipped"
return True
if overlapTApproxInterface is not None and np.isnan(overlapTApproxInterface).any():
print "skipped"
return True
return False
| 14,710
|
def alignment_view(request, project_uid, alignment_group_uid):
"""View of a single AlignmentGroup.
"""
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=project_uid)
alignment_group = get_object_or_404(AlignmentGroup,
reference_genome__project=project, uid=alignment_group_uid)
# Initial javascript data.
init_js_data = json.dumps({
'project': adapt_model_instance_to_frontend(project),
'alignment_group': adapt_model_instance_to_frontend(alignment_group)
})
context = {
'project': project,
'tab_root': TAB_ROOT__DATA,
'alignment_group': alignment_group,
'experiment_sample_to_alignment_list_json': adapt_model_to_frontend(
ExperimentSampleToAlignment,
{'alignment_group': alignment_group}),
'init_js_data': init_js_data,
'flag_genome_finishing_enabled': int(settings.FLAG__GENOME_FINISHING_ENABLED)
}
return render(request, 'alignment.html', context)
| 14,711
|
async def get_latest_disclosure(compass_id: int, api: ci.CompassInterface = Depends(ci_user)) -> Optional[ci.MemberDisclosure]:
"""Gets the latest disclosure for the member given by `compass_id`."""
logger.debug(f"Getting /{{compass_id}}/latest-disclosure for {api.user.membership_number}")
async with error_handler:
return api.people.latest_disclosure(compass_id)
| 14,712
|
def get_matching_string(matches, inputText, limit=0.99):
"""Return the matching string with all of the license IDs matched with the input license text if none matches then it returns empty string.
Arguments:
matches {dictionary} -- Contains the license IDs(which matched with the input text) with their respective sorensen dice score as valus.
limit {float} -- limit at which we will consider the match as a perfect match.
inputText {string} -- license text input by the user.
Returns:
string -- matching string containing the license IDs that actually matched else returns empty string.
"""
if not matches:
matchingString = 'There is not enough confidence threshold for the text to match against the SPDX License database.'
return matchingString
elif 1.0 in matches.values() or all(limit < score for score in matches.values()):
matchingString = 'The following license ID(s) match: ' + ", ".join(matches.keys())
return matchingString
else:
for licenseID in matches:
listedLicense = getListedLicense(licenseID)
isTextStandard = checkTextStandardLicense(listedLicense, inputText)
if not isTextStandard:
matchingString = 'The following license ID(s) match: ' + licenseID
return matchingString
else:
return ''
| 14,713
|
def get_review(annotation):
"""
Get annotation's review (if exists).
"""
try:
review = Comment.objects.get(annotation=annotation)
return review
except Comment.DoesNotExist:
return None
| 14,714
|
def load_grid(grdfiles, blocks, dimpart, nsigma, **kwargs):
"""Setup a `grid` by reading `grdfiles` on `blocks`
"""
ncgrid = nct.MDataset(grdfiles, blocks, dimpart, **kwargs)
# dummy time, to be updated later
time = ma.Marray(np.arange(10), dims=tdims)
lat = nct.readmarray(ncgrid, "lat_rho", hdims)
lon = nct.readmarray(ncgrid, "lon_rho", hdims)
if ncgrid.halow > 0:
halow = ncgrid.halow
# extend lon-lat on the halow=1 to infer correctly @ f-points
fill_halo(lat, halow)
fill_halo(lon, halow)
depth = nct.readmarray(ncgrid, "h", hdims)
angle = nct.readmarray(ncgrid, "angle", hdims)
mask = nct.readmarray(ncgrid, "mask_rho", hdims)
pm = nct.readmarray(ncgrid, "pm", hdims)
pn = nct.readmarray(ncgrid, "pn", hdims)
f = nct.readmarray(ncgrid, "f", hdims)
sigma = ma.Marray((np.arange(nsigma)+0.5)/nsigma, dims=vdims)
coords = {"t": time, "sigma": sigma, "eta": lat, "xi": lon}
return gr.Grid(coords, dims, depth=depth, angle=angle, mask=mask, pm=pm, pn=pn, f=f, **kwargs)
| 14,715
|
def with_patch_inspect(f):
"""decorator for monkeypatching inspect.findsource"""
def wrapped(*args, **kwargs):
save_findsource = inspect.findsource
save_getargs = inspect.getargs
inspect.findsource = findsource
inspect.getargs = getargs
try:
return f(*args, **kwargs)
finally:
inspect.findsource = save_findsource
inspect.getargs = save_getargs
return wrapped
| 14,716
|
def apply_hux_f_model(r_initial, dr_vec, dp_vec, r0=30 * 695700, alpha=0.15, rh=50 * 695700, add_v_acc=True,
omega_rot=(2 * np.pi) / (25.38 * 86400)):
"""Apply 1d upwind model to the inviscid burgers equation.
r/phi grid. return and save all radial velocity slices.
:param r_initial: 1d array, initial condition (vr0). units = (km/sec).
:param dr_vec: 1d array, mesh spacing in r. units = (km)
:param dp_vec: 1d array, mesh spacing in p. units = (radians)
:param alpha: float, hyper parameter for acceleration (default = 0.15).
:param rh: float, hyper parameter for acceleration (default r=50*695700). units: (km)
:param r0: float, initial radial location. units = (km).
:param add_v_acc: bool, True will add acceleration boost.
:param omega_rot: differential rotation.
:return: velocity matrix dimensions (nr x np)
"""
v = np.zeros((len(dr_vec) + 1, len(dp_vec) + 1)) # initialize array vr.
v[0, :] = r_initial
if add_v_acc:
v_acc = alpha * (v[0, :] * (1 - np.exp(-r0 / rh)))
v[0, :] = v_acc + v[0, :]
for i in range(len(dr_vec)):
for j in range(len(dp_vec) + 1):
if j == len(dp_vec): # force periodicity
v[i + 1, j] = v[i + 1, 0]
else:
if (omega_rot * dr_vec[i]) / (dp_vec[j] * v[i, j]) > 1:
print(dr_vec[i] - dp_vec[j] * v[i, j] / omega_rot)
print(i, j) # courant condition
frac1 = (v[i, j + 1] - v[i, j]) / v[i, j]
frac2 = (omega_rot * dr_vec[i]) / dp_vec[j]
v[i + 1, j] = v[i, j] + frac1 * frac2
return v
| 14,717
|
def requires_request_arg(method):
"""
Helper function to handle deprecation of old ActionMenuItem API where get_url, is_show,
get_context and render_html all accepted both 'request' and 'parent_context' as arguments
"""
try:
# see if this is a pre-2.15 get_url method that takes both request and context kwargs
inspect.signature(method).bind({})
except TypeError:
return True
else:
return False
| 14,718
|
def show_output_to_df(
show_output: str,
spark_session: SparkSession,
default_data_type: str = 'string'
):
"""
Takes a string containing the output of a Spark DataFrame.show() call and
"rehydrates" it into a new Spark DataFrame instance. Example input:
+--------+--------+
|column_a|column_b|
+--------+--------+
|value 1a|value 1b|
|value 2a|value 2b|
+--------+--------+
Optionally, row delimiters can be omitted, and comment lines can be present
(whether or not row delimiters are provided):
|column_a|column_b|
|value 1a|value 1b|
# This is a comment that gets ignored.
|value 2a|value 2b|
Optionally, data types can be specified in a second header line, prefixed
with the DATA_TYPE_START_INDICATOR ("["):
+-------------+----------+------------+-------------------+-----------+
|string_column|int_column|float_column|timestamp_column |bool_column|
[string |int |float |timestamp |boolean ]
+-------------+----------+------------+-------------------+-----------+
|one |1 |1.1 |2018-01-01 00:00:00|true |
|two |2 |2.2 |2018-01-02 12:34:56|false |
+-------------+----------+------------+-------------------+-----------+
:param show_output: A string that resembles the output of a call to
DataFrame.show()
:param spark_session: A SparkSession used to create the new DataFrame instance
:param default_data_type: The default data type that will be used for all
columns for which the data type is not specified in the data type
declaration line
:return: A DataFrame containing the values represented in the input string
"""
if not show_output:
raise ValueError('show_output is required.')
rows = []
column_names = None
types = None
# Added a schema because createDataFrame() does introspection otherwise and
# sometimes gets it wrong with int/bigint and nulls.
schema = None
for line in show_output.strip().splitlines():
line = line.strip()
if not line.startswith(tuple(f'|{DATA_TYPE_START_INDICATOR}')):
continue
line_parts = line.split('|')[1:-1]
values = [part.strip() for part in line_parts]
if column_names is None:
column_names = values
continue
if line.startswith(DATA_TYPE_START_INDICATOR):
if types is None:
line = line.replace(DATA_TYPE_START_INDICATOR, '|', 1)\
.rstrip(f'{DATA_TYPE_END_INDICATOR}|') + '|'
types = [part.strip() for part in line.split('|')[1:-1]]
types = [data_type if len(data_type) > 0 else default_data_type
for data_type in types]
continue
else:
raise ValueError('Cannot have more than one data type declaration line.')
if types is None:
types = [default_data_type] * len(column_names)
_cast_types(values, types)
row_dict = dict(zip(column_names, values))
rows.append(Row(**row_dict))
if types is None:
# This can happen if data types are not specified and no data rows are
# provided.
types = [default_data_type] * len(column_names)
schema = _get_schema(column_names, types)
# Return a DataFrame with the columns in the original order:
return spark_session.createDataFrame(rows, schema=schema).select(column_names)
| 14,719
|
def Schwefel(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Schwefel function."""
del seed
dim = len(arr)
bernoulli_arr = np.array([pow(-1, i + 1) for i in range(dim)])
x_opt = 4.2096874633 / 2.0 * bernoulli_arr
x_hat = 2.0 * (bernoulli_arr * arr) # Element-wise multiplication
z_hat = np.zeros([dim, 1])
z_hat[0, 0] = x_hat[0]
for i in range(1, dim):
z_hat[i, 0] = x_hat[i] + 0.25 * (x_hat[i - 1] - 2 * abs(x_opt[i - 1]))
x_opt.shape = (dim, 1)
z_vec = 100 * (
np.matmul(LambdaAlpha(10, dim), z_hat - 2 * abs(x_opt)) + 2 * abs(x_opt))
total = sum([z * math.sin(abs(z)**0.5) for z in z_vec.flat])
return -(total / (100.0 * dim)) + 4.189828872724339 + 100 * Fpen(z_vec / 100)
| 14,720
|
def article_idx_to_words_row(article_idx):
"""
Given a tuple with an article and an index, return a Row with the
index ad a list of the words in the article.
The words in the article are normalized, by removing all
non-'a-z|A-Z' characters.
Any stop words (words of less than 2 characters) are ignored.
:param article_idx: tuple
:type article_idx: tuple(defoe.papers.article.Article, int)
:return: Row
:rtype: pyspark.sql.Row
"""
article, idx = article_idx
words = []
for word in article.words:
normalized_word = query_utils.normalize(word)
if len(word) > 2: # Anything less is a stop word
words.append(normalized_word)
return Row(idx=idx, words=words)
| 14,721
|
def get_device_strategy(device, half=False, XLA=False, verbose=True):
"""
Returns the distributed strategy object, the tune policy anb the number of replicas.
Parameters
----------
device : str
Possible values are "TPU", "GPU", "CPU"
verbose : bool
Whether to print the output messages or not
Returns
-------
tf.distribute.TPUStrategy
The distributed strategy object
int
The auto tune constant
int
Number of TPU cores, to adjust batch size and learning rate
tf.distribute.cluster_resolver.TPUClusterResolver
The tpu object
"""
device = device.upper()
v = tf.__version__
tpu = None
if device == "TPU":
_log("connecting to TPU...", verbose)
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
_log('Running on TPU ' + tpu.master(), verbose)
except ValueError:
_log("Could not connect to TPU", verbose)
tpu = None
if tpu:
try:
_log("initializing TPU ...", verbose)
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu) if v >= '2.3.0' else tf.distribute.experimental.TPUStrategy(
tpu)
_log("TPU initialized", verbose)
if half:
from tensorflow.keras.mixed_precision import experimental as mixed_precision
policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
mixed_precision.set_policy(policy)
print('Mixed precision enabled')
if XLA:
tf.config.optimizer.set_jit(True)
print('Accelerated Linear Algebra enabled')
except:
_log("failed to initialize TPU", verbose)
device = "GPU"
else:
device = "GPU"
if device != "TPU":
_log("Using default strategy for CPU and single GPU", verbose)
strategy = tf.distribute.get_strategy()
if device == "GPU":
_log("Num GPUs Available: " + str(len(tf.config.experimental.list_physical_devices('GPU') if v < '2.1.0' else
tf.config.list_physical_devices('GPU'))), verbose)
tune = tf.data.experimental.AUTOTUNE
replicas = strategy.num_replicas_in_sync
_log(f'REPLICAS: {replicas}', verbose)
return strategy, tune, replicas, tpu
| 14,722
|
def make_mask(
pois_gdf,
link_gdf,
):
"""
:param pois_gdf:
:param link_gdf:
:return:
"""
mask = np.array([])
enum = np.array([])
return mask, enum
| 14,723
|
def SEMIMINUS(r1, r2):
"""aka NOT MATCHING
(macro)"""
return MINUS(r1, SEMIJOIN(r1, r2))
| 14,724
|
def send(url: str, webhookname: str, messages: List[str]) -> None:
"""SQS経由でWebhookにメッセージを送る.
Args:
url (str): SQSのURL
webhookname (str): Webhook名
messages (List[str]): メッセージのリスト
"""
logger.info(f'sqs.send: sending {len(messages)} message(s).')
splited = (messages[idx:idx + 10] for idx in range(0, len(messages), 10))
for msgs in splited:
msg_dicts = ({'webhookname': webhookname,
'message': msg} for msg in msgs)
entries = [{'Id': str(i), 'MessageBody': json.dumps(d)}
for i, d in enumerate(msg_dicts)]
client.send_message_batch(QueueUrl=url, Entries=entries)
logger.info(
f'sqs.send: send_message_batch sent {len(entries)} message(s).')
logger.info(f'sqs.send: sent {len(messages)} message(s).')
| 14,725
|
def Ineg_wrapper(valS, valI):
"""
Function used to wrap Inequalities into a suitable form for optimisation
valS > valI --> Inequality is satisfied
valS and valI can be float or 1d array
"""
epsilon = 1e-6
top = 1e3
ecart = valI - valS
if ecart < epsilon:
out = np.exp(ecart) * epsilon / np.exp(epsilon)
elif ecart > top:
out = np.log(ecart) * top / np.log(top)
else:
out = ecart
return out
| 14,726
|
def preprocess_imgs(set_name, img_size):
"""
Resize and apply VGG-15 preprocessing
"""
set_new = []
for img in set_name:
img = cv2.resize(
img,
dsize=img_size,
interpolation=cv2.INTER_CUBIC
)
set_new.append(tf.keras.applications.vgg16.preprocess_input(img))
return np.array(set_new)
| 14,727
|
def get_shapes(galsim_img, center):
""" Get shapes
This function compute the moments of an image. Then return the sigma of the
window function used (size of the object) and the amplitude
(flux of the object).
Parameters
---------
galsim_img : galsim.image.Image
Galsim.image object containing the image.
center : tuple
Center of the object (x, y).
Returns
-------
sigma : float
Sigma of the window function, or -1 if an error occured.
amp : float
Moments amplitude, or -1 if an error occured.
"""
shapes = galsim.hsm.FindAdaptiveMom(galsim_img,
guess_centroid=galsim.PositionD(center),
strict=False)
if shapes.error_message == '':
return shapes.moments_sigma, shapes.moments_amp
else:
return -1, -1
| 14,728
|
def consolidate_mouse_probes(data_containers, filename_or_fileobj, object_name='mouse_data_frame', poobah_column='poobah_pval', poobah_sig=0.05):
""" these probes have 'Multi'|'Random' in `design` col of mouse manifest. used to populate 'mouse_probes.pkl'.
pre v1.4.6: ILLUMINA_MOUSE specific probes (starting with 'rp' for repeat sequence or 'mu' for murine, 'uk' for unknown-experimental)
stored as data_container.mouse_data_frame.
saves as a dataframe just like controls:
a dict of dataframes like processed.csv format, but only mouse probes.
keys are sample_ids -- values are dataframes"""
out = dict()
for idx,sample in enumerate(data_containers):
sample_id = f"{sample.sample.sentrix_id}_{sample.sample.sentrix_position}"
data_frame = getattr(sample, object_name)
data_frame = data_frame.round({'noob_meth':0, 'noob_unmeth':0, 'm_value':3, 'beta_value':3, 'cm_value':3,
'meth':0, 'unmeth':0, 'poobah_pval':3})
out[sample_id] = data_frame
if is_file_like(filename_or_fileobj):
pickle.dump(out, filename_or_fileobj)
else: #except TypeError: # File must have a write attribute
with open(filename_or_fileobj, 'wb') as f:
pickle.dump(out, f)
return
| 14,729
|
def test_field_map_ctf():
"""Test that field mapping can be done with CTF data."""
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = make_fixed_length_events(raw, duration=0.5)
evoked = Epochs(raw, events).average()
evoked.pick_channels(evoked.ch_names[:50]) # crappy mapping but faster
# smoke test
make_field_map(evoked, trans=trans_fname, subject='sample',
subjects_dir=subjects_dir)
| 14,730
|
def ascending_super_operator(hamAB, hamBA, w_isometry, v_isometry, unitary,
refsym):
"""
ascending super operator for a modified binary MERA
ascends 'hamAB' and 'hamBA' up one layer
Args:
hamAB (tf.Tensor): local Hamiltonian on the A-B lattice
hamBA (tf.Tensor): local Hamiltonian on the B-A lattice
w_isometry (tf.Tensor): MERA isometry
v_isometry (tf.Tensor): MERA isometry
unitary (tf.Tensor): MERQA disentangler
refsym (bool): if true, enforce reflection symmetry
Returns:
hamABout (tf.Tensor): ascended Hamiltonian on A-B lattice
hamBAout (tf.Tensor): ascended Hamiltonian on B-A lattice
"""
indList1 = [[6, 4, 1, 2], [1, 3, -3], [6, 7, -1], [2, 5, 3, 9],
[4, 5, 7, 10], [8, 9, -4], [8, 10, -2]]
indList2 = [[3, 4, 1, 2], [5, 6, -3], [5, 7, -1], [1, 2, 6, 9],
[3, 4, 7, 10], [8, 9, -4], [8, 10, -2]]
indList3 = [[5, 7, 2, 1], [8, 9, -3], [8, 10, -1], [4, 2, 9, 3],
[4, 5, 10, 6], [1, 3, -4], [7, 6, -2]]
indList4 = [[3, 6, 2, 5], [2, 1, -3], [3, 1, -1], [5, 4, -4], [6, 4, -2]]
hamBAout = tn.ncon([
hamAB, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList1)
if refsym:
hamBAout = hamBAout + tf.transpose(hamBAout, (1, 0, 3, 2))
else:
hamBAout = hamBAout + tn.ncon([
hamAB, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList3)
hamBAout = hamBAout + tn.ncon([
hamBA, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList2)
hamABout = tn.ncon([
hamBA, v_isometry,
tf.conj(v_isometry), w_isometry,
tf.conj(w_isometry)
], indList4)
return hamABout, hamBAout
| 14,731
|
def filteredhash(repo, maxrev):
"""build hash of filtered revisions in the current repoview.
Multiple caches perform up-to-date validation by checking that the
tiprev and tipnode stored in the cache file match the current repository.
However, this is not sufficient for validating repoviews because the set
of revisions in the view may change without the repository tiprev and
tipnode changing.
This function hashes all the revs filtered from the view and returns
that SHA-1 digest.
"""
cl = repo.changelog
if not cl.filteredrevs:
return None
key = cl._filteredrevs_hashcache.get(maxrev)
if not key:
revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
if revs:
s = hashutil.sha1()
for rev in revs:
s.update(b'%d;' % rev)
key = s.digest()
cl._filteredrevs_hashcache[maxrev] = key
return key
| 14,732
|
def test_NodeBipartite_observation(solving_model):
"""Observation of NodeBipartite is a type with array attributes."""
obs = make_obs(O.NodeBipartite(), solving_model)
assert isinstance(obs, O.NodeBipartiteObs)
assert_array(obs.column_features, ndim=2)
assert_array(obs.row_features, ndim=2)
assert_array(obs.edge_features.values)
assert_array(obs.edge_features.indices, ndim=2, dtype=np.uint64)
| 14,733
|
def filter_blast_by_amplicon(blast_hits, min_amplicon_len, max_amplicon_len):
"""
Filtering primers by putative amplicon that would be generated.
If the amplicon size is outsize of the min/max, then the primers not legit off-targets.
"""
logging.info('Filtering to only hits producing a legitimate amplicon...')
nonspec_primers = set()
for primer_id,d in blast_hits.items():
status = {'no_amp' : 0, 'hit' : 0, 'wrong_strand' : 0}
for saccver,dd in d.items():
if primer_id in nonspec_primers:
break
# hits for primer pair?
try:
_,_ = dd['f'],dd['r']
except KeyError:
status['no_amp'] += 1
continue
# calc amplicon size of any expanded fwd-rev pair
for x in dd['f']:
if primer_id in nonspec_primers:
break
for y in dd['r']:
amp_len = calc_amp_len(x[0], x[1], y[0], y[1])
if (x[2] != y[2] and amp_len >= min_amplicon_len
and amp_len <= max_amplicon_len):
# legit hit: different strand & amplicon_len w/in size range
nonspec_primers.add(primer_id)
status['hit'] += 1
break
elif (x[2] == y[2] and amp_len >= min_amplicon_len
and amp_len <= max_amplicon_len):
# same strand, but correct amplicon size
status['wrong_strand'] += 1
# summary
msg = ' Primer {}: legit amplicon: {}, no amplicon: {}'
logging.info(msg.format(primer_id, status['hit'], status['no_amp']))
# summary
msg = ' No. of primers producing a legit non-target amplicon: {}'
logging.info(msg.format(len(nonspec_primers)))
return nonspec_primers
| 14,734
|
def cropImage(img):
"""
Crop the screen for only the relevant inventory section.
Args:
img (ndarray): The image of the Warframe inventory.
Returns:
ndarray: The image of only the inventory section containing items.
"""
#TODO: Allow user to manually define inventory section instead of hard cropping.
img = img[200:950, 80:1380]
return img
| 14,735
|
def determine_channel(channel_as_text):
"""Determine which channel the review is for according to the channel
parameter as text, and whether we should be in content-review only mode."""
if channel_as_text == 'content':
# 'content' is not a real channel, just a different review mode for
# listed add-ons.
content_review = True
channel = 'listed'
else:
content_review = False
# channel is passed in as text, but we want the constant.
channel = amo.CHANNEL_CHOICES_LOOKUP.get(
channel_as_text, amo.RELEASE_CHANNEL_LISTED)
return channel, content_review
| 14,736
|
def parse_workflow_args(input: List[str] = None) -> argparse.Namespace:
"""Parses command-line style flags for the workflow.
All unknown args are discarded to allow multiple parses on args.
Args:
input: An optional list of strings in the style of sys.argv. Will
default to argparse's interpretation of sys.argv if omitted.
Returns:
An argparse Namespace with the parsed, known arguments.
"""
parser = argparse.ArgumentParser(description='LiteX SoC')
parser.add_argument('--build', action='store_true', help='Build bitstream')
parser.add_argument('--load', action='store_true', help='Load bitstream')
parser.add_argument('--toolchain',
help=('Specify toolchain for implementing '
'gateware (\'vivado\' or \'symbiflow\')'))
parser.add_argument('--sys-clk-freq', type=float,
help='System clock frequency')
builder_args(parser)
soc_core_args(parser)
vivado_build_args(parser)
parser.add_argument('--with-ethernet',
action='store_true',
help='Enable Ethernet support')
parser.add_argument('--with-etherbone',
action='store_true',
help='Enable Etherbone support')
parser.add_argument('--with-mapped-flash',
action='store_true',
help='Add litespi SPI flash')
parser.add_argument("--with-spi-sdcard",
action="store_true",
help="Enable SPI-mode SDCard support")
parser.add_argument("--with-video-framebuffer",
action="store_true",
help="Enable Video Framebuffer (HDMI)")
parser.add_argument('--target',
default='digilent_arty',
help='Specify target board')
parser.set_defaults(csr_csv='csr.csv',
uart_name='serial',
uart_baudrate=921600,
cpu_variant='full+cfu+debug',
with_etherbone=False)
# Return only the known args
if input:
return parser.parse_known_args(input)[0]
else:
return parser.parse_known_args()[0]
| 14,737
|
def get_device_config(device_name, dnac_jwt_token):
"""
This function will get the configuration file for the device with the name {device_name}
:param device_name: device hostname
:param dnac_jwt_token: DNA C token
:return: configuration file
"""
device_id = get_device_id_name(device_name, dnac_jwt_token)
url = DNAC_URL + '/api/v1/network-device/' + device_id + '/config'
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
response = requests.get(url, headers=header, verify=False)
config_json = response.json()
config_file = config_json['response']
return config_file
| 14,738
|
def release_not_found(pp, release_task, nick):
"""
Return a deferred that when fired returns a message about this missing
release.
:param pp: txproductpages.Connection object
:param release_task: ReleaseTask object
:param nick: str, user who asked about this release_task.
"""
template = '{nick}, I could not find release {release} in {pp} .'
try:
# Note: upcoming_releases() does not capture releases without a GA date
# set (ie. early in the cycle), or releases in the maintenance phase
# (z-streams). It's not perfect, but at least it's something.
upcoming = yield pp.upcoming_releases(release_task.product)
except ProductPagesException:
upcoming = ()
if len(upcoming) == 1:
suggestion = release_to_text(upcoming[0])
template += ' Maybe you meant "%s"?' % suggestion
if len(upcoming) > 1:
template += ' Upcoming %s releases:' % release_task.product
for release in upcoming:
template += ' "%s"' % release_to_text(release)
product_url = pp.product_url(release_task.product)
message = template.format(nick=nick,
release=release_task.shortname,
pp=product_url)
defer.returnValue(message)
| 14,739
|
def get_ideas():
"""
Gets all ideas from mongo
"""
return find('ideas')
| 14,740
|
def uppercase_dtype(dtype):
""" Convert a dtype to upper case. A helper function.
Do not use.
"""
pairs = dict([(key.upper(), dtype.fields[key]) for key in dtype.names])
dtype = numpy.dtype(pairs)
return dtype
| 14,741
|
def test_no_header_without_setting():
"""Make sure that the endpoint has no authorization header if either username or password is missing"""
client1 = PublicationDataApi("no-url", "dummy", None)
client2 = PublicationDataApi("no-url", None, "abc")
client3 = PublicationDataApi("no-url", None, None)
headers = [ client.endpoint.base_headers for client in [client1, client2, client3] ]
assert 'Authorization' not in headers[0]
assert 'Authorization' not in headers[1]
assert 'Authorization' not in headers[2]
| 14,742
|
def download_files_by_name(accession, file_name, ftp_download_enabled, input_folder, output_folder):
"""
This script download files from FTP or copy from the file system
"""
raw_files = Files()
logging.info("accession: " + accession)
if ftp_download_enabled:
logging.info("Data will be download from ftp")
raw_files.download_file_from_ftp_by_name(accession, file_name, output_folder)
else:
logging.info("Data will be copied from file system " + output_folder)
raw_files.copy_file_from_dir_by_name(accession, file_name, input_folder)
| 14,743
|
async def async_setup(hass, config):
"""Set up the sisyphus component."""
from sisyphus_control import Table
tables = hass.data.setdefault(DATA_SISYPHUS, {})
table_configs = config.get(DOMAIN)
session = async_get_clientsession(hass)
async def add_table(host, name=None):
"""Add platforms for a single table with the given hostname."""
table = await Table.connect(host, session)
if name is None:
name = table.name
tables[name] = table
_LOGGER.debug("Connected to %s at %s", name, host)
hass.async_create_task(async_load_platform(
hass, 'light', DOMAIN, {
CONF_NAME: name,
}, config
))
hass.async_create_task(async_load_platform(
hass, 'media_player', DOMAIN, {
CONF_NAME: name,
CONF_HOST: host,
}, config
))
if isinstance(table_configs, dict): # AUTODETECT_SCHEMA
for ip_address in await Table.find_table_ips(session):
await add_table(ip_address)
else: # TABLES_SCHEMA
for conf in table_configs:
await add_table(conf[CONF_HOST], conf[CONF_NAME])
async def close_tables(*args):
"""Close all table objects."""
tasks = [table.close() for table in tables.values()]
if tasks:
await asyncio.wait(tasks)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_tables)
return True
| 14,744
|
def mlp_prior(input_dim: int, zdim: int = 2) -> Dict[str, jnp.array]:
"""Priors over weights and biases in the default Bayesian MLP"""
hdim = [64, 32]
def _bnn_prior(task_dim: int):
w1 = sample_weights("w1", input_dim, hdim[0], task_dim)
b1 = sample_biases("b1", hdim[0], task_dim)
w2 = sample_weights("w2", hdim[0], hdim[1], task_dim)
b2 = sample_biases("b2", hdim[1], task_dim)
w3 = sample_weights("w3", hdim[1], zdim, task_dim)
b3 = sample_biases("b3", zdim, task_dim)
return {"w1": w1, "b1": b1, "w2": w2, "b2": b2, "w3": w3, "b3": b3}
return _bnn_prior
| 14,745
|
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--apk', dest='apk_path', type=str, default=None,
help='Path to an APK to install on the device.')
parser.add_argument('--refresh-bins', action='store_true', default=False,
help='Pass this flag to have the test run adb sync')
parser.add_argument('--serial', '-s', type=str, default=None,
help='Serial number of device to test against')
parser.add_argument(
'--test-filter', default=TEST_FILTER_ALL,
choices=[TEST_FILTER_ALL, TEST_FILTER_JAVA, TEST_FILTER_NATIVE])
parser.add_argument('--verbose', '-v', action='store_true', default=False)
args = parser.parse_args()
run_test(args.test_filter in (TEST_FILTER_ALL, TEST_FILTER_NATIVE),
args.test_filter in (TEST_FILTER_ALL, TEST_FILTER_JAVA),
apk_path=args.apk_path, refresh_binaries=args.refresh_bins,
device_serial=args.serial, verbose=args.verbose)
| 14,746
|
def get_biggest_spread_by_symbol(exchanges, symbol):
"""Get biggest spread by symbol."""
ask_exchange_id = ""
min_ask_price = 99999999
bid_exchange_id = ""
max_bid_price = 0
for exchange_id in exchanges:
exchange = eval("ccxt.{0}()".format(exchange_id))
try:
order_book = exchange.fetch_order_book(symbol)
bid_price = (
order_book["bids"][0][0] if len(order_book["bids"]) > 0 else None
)
ask_price = (
order_book["asks"][0][0] if len(order_book["asks"]) > 0 else None
)
if ask_price < min_ask_price:
ask_exchange_id = exchange_id
min_ask_price = ask_price
if bid_price > max_bid_price:
bid_exchange_id = exchange_id
max_bid_price = bid_price
increase_percentage = (bid_price - ask_price) / ask_price * 100
if increase_percentage >= 1:
return ask_exchange_id, min_ask_price, bid_exchange_id, max_bid_price
except Exception as e:
# pass
print(e)
print("{0} - There is an error!".format(exchange_id))
min_ask_price += 0.235
max_bid_price -= 0.235
return ask_exchange_id, min_ask_price, bid_exchange_id, max_bid_price
| 14,747
|
def gmag_filename(dates, stations):
"""Create a list of tuples for downloading: remote_file, local_file"""
prefs = pyspedas.get_spedas_prefs()
if 'themis_remote' in prefs:
remote_path = prefs['themis_remote']
else:
raise NameError('remote_path is not found in spd_prefs_txt.py')
if 'data_dir' in prefs:
data_dir = prefs['data_dir']
if ('data_dir_unix' in prefs) and (os.name != 'nt'):
data_dir = os.path.expanduser(prefs['data_dir_unix'])
else:
raise NameError('data_dir is not found in spd_prefs.txt')
dates = pyspedas.get_dates(dates)
file_list = []
probe = 'thg'
level = 'l2'
instrument = 'mag'
version = '1'
if stations[0] == 'idx':
level = 'l1'
for sdate in dates:
year = sdate[0:4]
month = sdate[5:7]
day = sdate[8:10]
for station in stations:
# file_dir = 'tha/l2/fgm/2015/'
if station == 'idx':
level = 'l1'
file_dir = probe + '/' + level + '/' + instrument + '/' \
+ station + '/' + year
filename = probe + '_' + level + '_' + station + '_' + year \
+ month + day + '_v0' + version + '.cdf'
elif check_greenland(station):
# thg/greenland_gmag/l2
file_dir = probe + '/greenland_gmag/' + level + '/' \
+ station + '/' + year
filename = probe + '_' + level + '_' + instrument + '_' \
+ station + '_' + year + month + day + '_v0' \
+ version + '.cdf'
else:
# thg/l2/mag/
file_dir = probe + '/' + level + '/' + instrument + '/' \
+ station + '/' + year
filename = probe + '_' + level + '_' + instrument + '_' \
+ station + '_' + year + month + day + '_v0' \
+ version + '.cdf'
file_dir_local = os.path.join(probe, level, instrument,
station, year)
# thg_l2_mag_amd_20170109_v01.cdf
remote_file = remote_path + '/' + file_dir + '/' + filename
local_file = os.path.join(data_dir, file_dir_local, filename)
file_list.append((remote_file, local_file))
return file_list
| 14,748
|
def always_mocked_kubernetes_client(mocker):
"""
We do not test the Kubernetes client, so everything there should be mocked.
Also, no external calls must be made under any circumstances.
"""
mocker.patch('kubernetes.watch')
mocker.patch('kubernetes.client')
| 14,749
|
def test_compile_hourly_statistics_unchanged(hass_recorder):
"""Test compiling hourly statistics, with no changes during the hour."""
hass = hass_recorder()
recorder = hass.data[DATA_INSTANCE]
setup_component(hass, "sensor", {})
zero, four, states = record_states(hass)
hist = history.get_significant_states(hass, zero, four)
assert dict(states) == dict(hist)
recorder.do_adhoc_statistics(period="hourly", start=four)
wait_recording_done(hass)
stats = statistics_during_period(hass, four)
assert stats == {
"sensor.test1": [
{
"statistic_id": "sensor.test1",
"start": process_timestamp_to_utc_isoformat(four),
"mean": 20.0,
"min": 20.0,
"max": 20.0,
"last_reset": None,
"state": None,
"sum": None,
}
]
}
| 14,750
|
def count_ends(d):
"""
count number of consecutive letters
:param d:
:return:
"""
con=0
for i in range(len(d)-1):
if d[i][-1] == d[i+1][-1]:
con+=1
print("{} consecutive letters".format(con))
| 14,751
|
def print_results(results):
"""Prints `results` (the results of validation) to stdout.
Args:
results: A list of FileValidationResults instances.
"""
for file_result in sorted(results, key=operator.attrgetter("filepath")):
print_horizontal_rule()
print_level(logger.info, "[-] Results for: %s", 0, file_result.filepath)
if file_result.is_valid:
marker = _GREEN + "[+]"
verdict = "Valid"
log_func = logger.info
else:
marker = _RED + "[X]"
verdict = "Invalid"
log_func = logger.error
print_level(log_func, "%s STIX JSON: %s", 0, marker, verdict)
for object_result in file_result.object_results:
if object_result.warnings:
print_warning_results(object_result, 1)
if object_result.errors:
print_schema_results(object_result, 1)
if file_result.fatal:
print_fatal_results(file_result.fatal, 1)
| 14,752
|
def update_user():
"""User update route
:return: action status
"""
if 'data' in request.json:
data = request.json['data']
if ('profile' in data) and ('theme' in data['profile']):
current_user.profile.theme = data['profile']['theme']
services.db.session.commit()
return jsonify({
'status': 'success',
'message': 'User profile updated successfully.'
})
| 14,753
|
def on_deck(elements: List[int], all_vars):
"""all of the elements must be within the deck"""
rules = []
for element in elements:
var = all_vars[element - 1]
rules.append(var >= 1)
rules.append(var <= 52)
return rules
| 14,754
|
def transform_data_to_dictionary(elements):
"""Parses each element in the list and parses it in a dictionary
Args:
elements (list): list of html elements
Returns:
dictionary: treated information.
"""
url_informations = {}
for n in range(0, len(elements), 2):
url_informations[clean_names(elements[n].text)] = elements[n+1]
return url_informations
| 14,755
|
def postprocess_new(u, x, lr_min, lr_max, num_itr, rho=0.0, with_l1=False,s=math.log(9.0)):
"""
:param u: utility matrix, u is assumed to be symmetric, in batch
:param x: RNA sequence, in batch
:param lr_min: learning rate for minimization step
:param lr_max: learning rate for maximization step (for lagrangian multiplier)
:param num_itr: number of iterations
:param rho: sparsity coefficient
:param with_l1:
:return:
"""
m = constraint_matrix_batch(x).float()
# u with threshold
# equivalent to sigmoid(u) > 0.9
# u = (u > math.log(9.0)).type(torch.FloatTensor) * u
u = soft_sign(u - s) * u
# initialization
a_hat = (torch.sigmoid(u)) * soft_sign(u - s).detach()
lmbd = F.relu(torch.sum(contact_a(a_hat, m), dim=-1) - 1).detach()
# gradient descent
for t in range(num_itr):
grad_a = (lmbd * soft_sign(torch.sum(contact_a(a_hat, m), dim=-1) - 1)).unsqueeze_(-1).expand(u.shape) - u / 2
grad = a_hat * m * (grad_a + torch.transpose(grad_a, -1, -2))
a_hat -= lr_min * grad
lr_min = lr_min * 0.99
if with_l1:
a_hat = F.relu(torch.abs(a_hat) - rho * lr_min)
lmbd_grad = F.relu(torch.sum(contact_a(a_hat, m), dim=-1) - 1)
lmbd += lr_max * lmbd_grad
lr_max = lr_max * 0.99
# print
# if t % 20 == 19:
# n1 = torch.norm(lmbd_grad)
# grad_a = (lmbd * soft_sign(torch.sum(contact_a(a_hat, m), dim=-1) - 1)).unsqueeze_(-1).expand(u.shape) - u / 2
# grad = a_hat * m * (grad_a + torch.transpose(grad_a, -1, -2))
# n2 = torch.norm(grad)
# print([t, 'norms', n1, n2, aug_lagrangian(u, m, a_hat, lmbd), torch.sum(contact_a(a_hat, u))])
a = a_hat * a_hat
a = (a + torch.transpose(a, -1, -2)) / 2
a = a * m
return a
| 14,756
|
def _assert_zone_state(hass, mode, hvac, current_temp, target_temp, preset, action):
"""Assert zone climate state."""
state = hass.states.get("climate.zone_1")
assert hass.states.is_state("climate.zone_1", hvac)
assert state.attributes["current_temperature"] == current_temp
assert state.attributes["max_temp"] == Zone.MAX_TARGET_TEMP
assert state.attributes["min_temp"] == Zone.MIN_TARGET_HEATING_TEMP
assert state.attributes["temperature"] == target_temp
assert state.attributes["hvac_action"] == action
assert state.attributes["preset_mode"] == preset
expected_modes = {HVAC_MODE_OFF, HVAC_MODE_AUTO, HVAC_MODE_FAN_ONLY}
zone = SystemManagerMock.data.get("get_zones")[0]
if zone.cooling:
expected_modes.update({HVAC_MODE_COOL})
assert set(state.attributes["hvac_modes"]) == expected_modes
| 14,757
|
def upgrade(ctx, config_file_path, skip_config_decryption):
"""Upgrade existing CSE installation/entities to match CSE 3.1.1.
\b
- Add CSE, RDE version, Legacy mode info to VCD's extension data for CSE
- Register defined entities schema of CSE k8s clusters with VCD
- Create placement compute policies used by CSE
- Remove old sizing compute policies created by CSE 2.6 and below
- Currently installed templates that are no longer compliant with
new CSE template cookbook will not be recognized by CSE 3.1. Admins can
safely delete them once new templates are installed and the existing
clusters are upgraded to newer template revisions.
- Update existing CSE k8s cluster's to match CSE 3.1 k8s clusters.
- Upgrading legacy clusters would require new template creation supported
by CSE 3.1.1
- legacy_mode is config property that is used to configure CSE with
desired version of VCD.
- Set legacy_mode=true if CSE 3.1 is configured with VCD whose maximum
supported api_version < 35.
- Set legacy_mode=false if CSE 3.1 is configured with VCD whose maximum
supported api_version >= 35.
- NOTE: legacy_mode=true is a valid condition for CSE 3.1 configured with
VCD whose maximum supported api_version >= 35. However, it is strongly
recommended to set the property to false to leverage
the new functionality.
- When legacy_mode=true, supported template information are based on
remote-template-cookbook version "1.0.0".
- When legacy_mode=false, supported template information are based on
remote-template-cookbook version "2.0.0".
"""
# NOTE: For CSE 3.0, if `enable_tkg_plus` in the config is set to false,
# an exception is thrown if
# 1. If there is an existing TKG+ template
# 2. If remote template cookbook contains a TKG+ template.
SERVER_CLI_LOGGER.debug(f"Executing command: {ctx.command_path}")
console_message_printer = utils.ConsoleMessagePrinter()
utils.check_python_version(console_message_printer)
password = None
if not skip_config_decryption:
password = os.getenv('CSE_CONFIG_PASSWORD') or utils.prompt_text(
PASSWORD_FOR_CONFIG_DECRYPTION_MSG,
color='green',
hide_input=True
)
try:
config = config_validator.get_validated_config(
config_file_name=config_file_path,
pks_config_file_name='',
skip_config_decryption=skip_config_decryption,
decryption_password=password,
log_wire_file=INSTALL_WIRELOG_FILEPATH,
logger_debug=INSTALL_LOGGER,
msg_update_callback=console_message_printer
)
configure_cse.upgrade_cse(
config_file_name=config_file_path,
config=config,
msg_update_callback=console_message_printer
)
except Exception as err:
SERVER_CLI_LOGGER.error(str(err))
console_message_printer.error(str(err))
sys.exit(1)
finally:
# block the process to let telemetry handler to finish posting data to
# VAC. HACK!!!
time.sleep(3)
| 14,758
|
def test_orion(provider, keyid, secret, token, imageid, subscription, tenant, projectid,
instancetype, user, localpath, region, zone, sriov, kernel):
"""
Run Orion test.
:param provider Service provider to be used e.g. azure, aws, gce.
:param keyid: user key for executing remote connection
:param secret: user secret for executing remote connection
:param token: GCE refresh token obtained with gcloud sdk
:param subscription: Azure specific subscription id
:param tenant: Azure specific tenant id
:param projectid: GCE specific project id
:param imageid: AWS OS AMI image id or
Azure image references offer and sku: e.g. 'UbuntuServer#16.04.0-LTS'.
:param instancetype: AWS instance resource type e.g 'd2.4xlarge' or
Azure hardware profile vm size e.g. 'Standard_DS14_v2'.
:param user: remote ssh user for the instance
:param localpath: localpath where the logs should be downloaded, and the
default path for other necessary tools
:param region: EC2 region to connect to
:param zone: EC2 zone where other resources should be available
:param sriov: Enable or disable SR-IOV
:param kernel: custom kernel name provided in localpath
"""
disk_size = 0
if provider == constants.AWS:
disk_size = 100
elif provider == constants.AZURE:
disk_size = 513
elif provider == constants.GCE:
# pd-ssd 30iops/gb => 167GB = 5010 iops
disk_size = 167
connector, vm_ips, device, ssh_client = setup_env(provider=provider, vm_count=1,
test_type=constants.VM_DISK,
disk_size=disk_size, raid=False,
keyid=keyid, secret=secret,
token=token, subscriptionid=subscription,
tenantid=tenant, projectid=projectid,
imageid=imageid, instancetype=instancetype,
user=user, localpath=localpath,
region=region, zone=zone, sriov=sriov,
kernel=kernel)
results_path = None
try:
if all(client for client in ssh_client.values()):
current_path = os.path.dirname(os.path.realpath(__file__))
ssh_client[1].put_file(os.path.join(localpath, 'orion_linux_x86-64.gz'),
'/tmp/orion_linux_x86-64.gz')
ssh_client[1].put_file(os.path.join(current_path, 'tests', 'run_orion.sh'),
'/tmp/run_orion.sh')
ssh_client[1].run('chmod +x /tmp/run_orion.sh')
ssh_client[1].run("sed -i 's/\r//' /tmp/run_orion.sh")
cmd = '/tmp/run_orion.sh {}'.format(device)
log.info('Running command {}'.format(cmd))
ssh_client[1].run(cmd, timeout=constants.TIMEOUT * 4)
results_path = os.path.join(localpath, 'orion{}_{}.zip'.format(str(time.time()),
instancetype))
ssh_client[1].get_file('/tmp/orion.zip', results_path)
except Exception as e:
log.error(e)
raise
finally:
if connector:
connector.teardown()
if results_path:
upload_results(localpath=localpath, table_name='Perf_{}_Orion'.format(provider),
results_path=results_path, parser=OrionLogsReader,
test_case_name='{}_Orion_perf_tuned'.format(provider),
host_type=shortcut.host_type(provider), instance_size=instancetype,
disk_setup='1 x SSD {}GB'.format(disk_size))
| 14,759
|
def parse_api_error(response):
"""
Parse the error-message from the API Response.
Assumes, that a check if there is an error present was done beforehand.
:param response: Dict of the request response ([imdata][0][....])
:type response: ``dict``
:returns: Parsed Error-Text
:rtype: ``str``
"""
if "error" in response["imdata"][0]:
return (
"API-Errorcode "
+ str(response["imdata"][0]["error"]["attributes"]["code"])
+ ": "
+ str(response["imdata"][0]["error"]["attributes"]["text"])
)
else:
return "Unparseable: " + str(response)
| 14,760
|
def test_dataset_pubtabnet_returns_image() -> None:
"""
test dataset pubtabnet returns image
"""
# Arrange
pubtabnet = Pubtabnet()
pubtabnet.dataflow.get_workdir = get_test_path # type: ignore
pubtabnet.dataflow.annotation_files = {"all": "test_file.jsonl"}
df = pubtabnet.dataflow.build()
# Act
df_list = collect_datapoint_from_dataflow(df)
assert len(df_list) == 3
| 14,761
|
def set_api_file(name, file):
"""Set an URL generator populated with data from `file`.
Use `file` to populate a new `DocUrlGenerator` instance and register it
as `name`.
:Parameters:
`name` : `str`
the name of the generator to be registered
`file` : `str` or file
the file to parse populate the URL generator
"""
generator = DocUrlGenerator()
generator.load_index(file)
register_api(name, generator)
| 14,762
|
def test_check_ensemble_build_no_lc():
"""[Utils] check_ensemble_build : raises error on no LC."""
lc = clone(LAYER_CONTAINER)
del lc.stack
np.testing.assert_raises(AttributeError, check_ensemble_build, lc)
| 14,763
|
def access_settings(service, groupId, settings):
"""Retrieves a group's settings and updates the access permissions to it.
Args:
service: object service for the Group Settings API.
groupId: string identifier of the group@domain.
settings: dictionary key-value pairs of properties of group.
"""
# Get the resource 'group' from the set of resources of the API.
# The Group Settings API has only one resource 'group'.
group = service.groups()
# Retrieve the group properties
g = group.get(groupUniqueId=groupId).execute()
print('\nGroup properties for group %s\n' % g['name'])
pprint.pprint(g)
# If dictionary is empty, return without updating the properties.
if not settings.keys():
print('\nGive access parameters to update group access permissions\n')
return
body = {}
# Settings might contain null value for some keys(properties).
# Extract the properties with values and add to dictionary body.
for key in settings.iterkeys():
if settings[key] is not None:
body[key] = settings[key]
# Update the properties of group
g1 = group.update(groupUniqueId=groupId, body=body).execute()
print('\nUpdated Access Permissions to the group\n')
pprint.pprint(g1)
| 14,764
|
def test_breadth_first_traversal(testing_bst):
"""test that values will come out correctly for a breath first traversal"""
answer = []
def do_this(current):
answer.append(current.val.val)
breadth_first_traversal(testing_bst, do_this)
assert answer == [20, 17, 21, 16, 18, 22]
| 14,765
|
def test_instantiate():
"""Test whether UncertaintyQuantificationBase fails to instantiate."""
if sys.version[0] == 2:
d = UncertaintyQuantificationBase()
else:
# abstract base class type error not raised
# in python 3.
raise (TypeError)
| 14,766
|
def create_tasks(
tasks: List[Union[Task, Dict]],
*,
pool: Union[Pool, Training, Dict, str, None] = None,
toloka_conn_id: str = 'toloka_default',
additional_args: Optional[Dict] = None,
) -> None:
"""Create a list of tasks for a given pool.
Args:
tasks: List of either a `Task` objects or a task conofigurations.
pool: Allow to set tasks pool if it's not present in the tasks themselves.
May be either a `Pool` or `Training` object or config or a pool_id value.
toloka_conn_id: Airflow connection with toloka credentials.
additional_args: Any other args presented in `toloka.client.task.CreateTasksParameters`.
"""
toloka_hook = TolokaHook(toloka_conn_id=toloka_conn_id)
toloka_client = toloka_hook.get_conn()
if additional_args is None:
additional_args = {}
tasks = [structure_from_conf(task, Task) for task in tasks]
if pool is not None:
try:
pool_id = extract_id(pool, Pool)
except Exception:
pool_id = extract_id(pool, Training)
for task in tasks:
task.pool_id = pool_id
tasks = toloka_client.create_tasks(tasks, **additional_args)
logger.info(f'Tasks: {tasks} created')
| 14,767
|
def login_manual_user_device(username: str, password: str, mac_address: str) -> Union[str, Token]:
"""Try to login by username and password. A token for auto-login is returned"""
possible_user = User.get_by_username(username)
if possible_user is None:
fail_msg = f"No user with username: {username}."
else:
user = possible_user
if not pwd_context.verify(password, user.password):
fail_msg = f"Wrong password"
else:
token, device_id = _add_update_device(user.id, mac_address)
_set_user_authenticated(user.id, device_id)
client_logger_security().info(f"Successfully logged in manual: device_id={device_id}, user_id={user.user_id}, "
f"token={token}")
return token
client_logger_security().info(f"Failed to login manual: {fail_msg}")
return "Wrong username or password"
| 14,768
|
def corrector_new(Ybus, Ibus, Sbus, V0, pv, pq, lam0, Sxfr, Vprv, lamprv, z, step, parametrization, tol, max_it,
verbose, max_it_internal=10):
"""
Solves the corrector step of a continuation power flow using a full Newton method
with selected parametrization scheme.
solves for bus voltages and lambda given the full system admittance
matrix (for all buses), the complex bus power injection vector (for
all buses), the initial vector of complex bus voltages, and column
vectors with the lists of bus indices for the swing bus, PV buses, and
PQ buses, respectively. The bus voltage vector contains the set point
for generator (including ref bus) buses, and the reference angle of the
swing bus, as well as an initial guess for remaining magnitudes and
angles.
Uses default options if this parameter is not given. Returns the
final complex voltages, a flag which indicates whether it converged or not,
the number of iterations performed, and the final lambda.
:param Ybus: Admittance matrix (CSC sparse)
:param Ibus: Bus current injections
:param Sbus: Bus power injections
:param V0: Bus initial voltages
:param pv: list of pv nodes
:param pq: list of pq nodes
:param lam0: initial value of lambda (loading parameter)
:param Sxfr: [delP+j*delQ] transfer/loading vector for all buses
:param Vprv: final complex V corrector solution from previous continuation step
:param lamprv: final lambda corrector solution from previous continuation step
:param z: normalized predictor for all buses
:param step: continuation step size
:param parametrization:
:param tol:
:param max_it:
:param verbose:
:return: V, CONVERGED, I, LAM
"""
"""
# CPF_CORRECTOR Solves the corrector step of a continuation power flow using a
# full Newton method with selected parametrization scheme.
# [V, CONVERGED, I, LAM] = CPF_CORRECTOR(YBUS, SBUS, V0, REF, PV, PQ, ...
# LAM0, SXFR, VPRV, LPRV, Z, STEP, parametrization, MPOPT)
# solves for bus voltages and lambda given the full system admittance
# matrix (for all buses), the complex bus power injection vector (for
# all buses), the initial vector of complex bus voltages, and column
# vectors with the lists of bus indices for the swing bus, PV buses, and
# PQ buses, respectively. The bus voltage vector contains the set point
# for generator (including ref bus) buses, and the reference angle of the
# swing bus, as well as an initial guess for remaining magnitudes and
# angles. MPOPT is a MATPOWER options struct which can be used to
# set the termination tolerance, maximum number of iterations, and
# output options (see MPOPTION for details). Uses default options if
# this parameter is not given. Returns the final complex voltages, a
# flag which indicates whether it converged or not, the number
# of iterations performed, and the final lambda.
#
# The extra continuation inputs are LAM0 (initial predicted lambda),
# SXFR ([delP+j*delQ] transfer/loading vector for all buses), VPRV
# (final complex V corrector solution from previous continuation step),
# LAMPRV (final lambda corrector solution from previous continuation step),
# Z (normalized predictor for all buses), and STEP (continuation step size).
# The extra continuation output is LAM (final corrector lambda).
#
# See also RUNCPF.
# MATPOWER
# Copyright (c) 1996-2015 by Power System Engineering Research Center (PSERC)
# by Ray Zimmerman, PSERC Cornell,
# Shrirang Abhyankar, Argonne National Laboratory,
# and Alexander Flueck, IIT
#
# Modified by Alexander J. Flueck, Illinois Institute of Technology
# 2001.02.22 - corrector.m (ver 1.0) based on newtonpf.m (MATPOWER 2.0)
#
# Modified by Shrirang Abhyankar, Argonne National Laboratory
# (Updated to be compatible with MATPOWER version 4.1)
#
# $Id: cpf_corrector.m 2644 2015-03-11 19:34:22Z ray $
#
# This file is part of MATPOWER.
# Covered by the 3-clause BSD License (see LICENSE file for details).
# See http://www.pserc.cornell.edu/matpower/ for more info.
"""
# initialize
converged = False
i = 0
V = V0
Va = angle(V)
Vm = np.abs(V)
dVa = np.zeros_like(Va)
dVm = np.zeros_like(Vm)
lam = lam0 # set lam to initial lam0
# set up indexing for updating V
npv = len(pv)
npq = len(pq)
pvpq = r_[pv, pq]
nj = npv + npq * 2
nb = len(V) # number of buses
j1 = 1
'''
# MATLAB code
j2 = npv # j1:j2 - V angle of pv buses
j3 = j2 + 1
j4 = j2 + npq # j3:j4 - V angle of pq buses
j5 = j4 + 1
j6 = j4 + npq # j5:j6 - V mag of pq buses
j7 = j6 + 1
j8 = j6 + 1 # j7:j8 - lambda
'''
# j1:j2 - V angle of pv buses
j1 = 0
j2 = npv
# j3:j4 - V angle of pq buses
j3 = j2
j4 = j2 + npq
# j5:j6 - V mag of pq buses
j5 = j4
j6 = j4 + npq
j7 = j6
j8 = j6 + 1
# evaluate F(x0, lam0), including Sxfr transfer/loading
mismatch = V * conj(Ybus * V) - Sbus - lam * Sxfr
# F = r_[mismatch[pvpq].real, mismatch[pq].imag]
# evaluate P(x0, lambda0)
P = cpf_p(parametrization, step, z, V, lam, Vprv, lamprv, pv, pq, pvpq)
# augment F(x,lambda) with P(x,lambda)
F = r_[mismatch[pvpq].real, mismatch[pq].imag, P]
# check tolerance
last_error = linalg.norm(F, Inf)
error = 1e20
if last_error < tol:
converged = True
if verbose:
print('\nConverged!\n')
# do Newton iterations
while not converged and i < max_it:
# update iteration counter
i += 1
# evaluate Jacobian
J = Jacobian(Ybus, V, Ibus, pq, pvpq)
dF_dlam = -r_[Sxfr[pvpq].real, Sxfr[pq].imag]
dP_dV, dP_dlam = cpf_p_jac(parametrization, z, V, lam, Vprv, lamprv, pv, pq, pvpq)
# augment J with real/imag - Sxfr and z^T
'''
J = [ J dF_dlam
dP_dV dP_dlam ]
'''
J = vstack([hstack([J, dF_dlam.reshape(nj, 1)]),
hstack([dP_dV, dP_dlam])], format="csc")
# compute update step
dx = -spsolve(J, F)
# reassign the solution vector
if npv:
dVa[pv] = dx[j1:j2]
if npq:
dVa[pq] = dx[j3:j4]
dVm[pq] = dx[j5:j6]
# update lambda
lam += dx[j7:j8][0]
# reset mu
mu_ = 1.0
print('iter', i)
it = 0
Vm = np.abs(V)
Va = np.angle(V)
while error >= last_error and it < max_it_internal:
# update voltage the Newton way (mu=1)
Vm_new = Vm + mu_ * dVm
Va_new = Va + mu_ * dVa
V_new = Vm_new * exp(1j * Va_new)
print('\t', mu_, error, last_error)
# evaluate F(x, lam)
mismatch = V_new * conj(Ybus * V_new) - Sbus - lam * Sxfr
# evaluate P(x, lambda)
P = cpf_p(parametrization, step, z, V_new, lam, Vprv, lamprv, pv, pq, pvpq)
# compose the mismatch vector
F = r_[mismatch[pv].real,
mismatch[pq].real,
mismatch[pq].imag,
P]
# check for convergence
error = linalg.norm(F, Inf)
# modify mu
mu_ *= 0.25
it += 1
V = V_new.copy()
last_error = error
if verbose:
print('\n#3d #10.3e', i, error)
if error < tol:
converged = True
if verbose:
print('\nNewton''s method corrector converged in ', i, ' iterations.\n')
if verbose:
if not converged:
print('\nNewton method corrector did not converge in ', i, ' iterations.\n')
return V, converged, i, lam, error
| 14,769
|
def retrieve_files(dir, suffix='png|jpg'):
""" retrive files with specific suffix under dir and sub-dirs recursively
"""
def retrieve_files_recursively(dir, file_lst):
for d in sorted(os.listdir(dir)):
dd = osp.join(dir, d)
if osp.isdir(dd):
retrieve_files_recursively(dd, file_lst)
else:
if osp.splitext(d)[-1].lower() in ['.' + s for s in suffix]:
file_lst.append(dd)
if not dir:
return []
if isinstance(suffix, str):
suffix = suffix.split('|')
file_lst = []
retrieve_files_recursively(dir, file_lst)
file_lst.sort()
return file_lst
| 14,770
|
def clique_ring(n_cluster=3, n_in_cluster=5):
"""Get adjacency matrix for cluster domain used by Schapiro et al 2013.
Args:
n_cluster: number of clusters, connected in a ring.
n_in_cluster: number of nodes in each cluster. Each node is connected to all
other nodes in cluster, except the edge connecting the two nodes with
outgoing edges is severed.
Returns:
adjmat: adjacency matrix
xy: xy coordinates of each state for plotting. Obtained by arranging nodes
within a cluster into evenly spaced circles, and then arranging those
clusters evenly around a circle.
labels: (n_state) array containing cluster index of each state
"""
n_state = n_cluster * n_in_cluster
clq, _, _ = clique(n_in_cluster)
clq[0, n_in_cluster-1] = 0
clq[n_in_cluster-1, 0] = 0
adj = clq
for i in range(n_cluster-1):
adj = block_diag(adj, clq)
for i in range(n_cluster):
i_curr = np.mod(i * n_in_cluster-1, n_state)
i_next = np.mod(i * n_in_cluster, n_state)
adj[i_curr, i_next] = 1
adj[i_next, i_curr] = 1
# get xy
clu_ind = np.repeat(np.arange(0, n_cluster).reshape(-1, 1),
n_in_cluster, axis=0).reshape(-1)
ang_clu = clu_ind * 1.0 / n_cluster * 2 * np.pi
x_clu = np.cos(ang_clu).reshape(-1, 1) * 2
y_clu = np.sin(ang_clu).reshape(-1, 1) * 2
offset = np.pi - ang_clu - np.pi/n_in_cluster # turn clusters toward center
ang_in_clu = np.linspace(0, 2*np.pi, n_in_cluster+1)[:n_in_cluster]
ang_in_clus = np.stack([ang_in_clu]*n_cluster).reshape(-1)
ang_in_clus = ang_in_clus - offset
x_in_clu = np.cos(ang_in_clus).reshape(-1, 1)
y_in_clu = np.sin(ang_in_clus).reshape(-1, 1)
# get cluster labels
labels = np.concatenate([np.ones(n_in_cluster) * i for i in range(n_cluster)])
return adj, np.concatenate([x_clu+x_in_clu, y_clu+y_in_clu], axis=1), labels
| 14,771
|
def get_halfnormal_mean_from_scale(scale: float) -> float:
"""Returns the mean of the half-normal distribition."""
# https://en.wikipedia.org/wiki/Half-normal_distribution
return scale * np.sqrt(2) / np.sqrt(np.pi)
| 14,772
|
def cal_pr(y_hat, y_score):
"""
calculate the precision and recall curve
:param y_hat: ground-truth label, [n_sample]
:param y_score: predicted similarity score, [n_sample]
:return: [n_sample]
"""
thresholds = np.arange(1, -0.001, -0.001)
fps, tps = cal_binary_cls_curve(y_hat, y_score, thresholds)
pos_idx = tps > 0
tps = tps[pos_idx]
fps = fps[pos_idx]
thresholds = thresholds[pos_idx]
precision = tps / (tps + fps)
recall = tps / np.sum(y_hat)
return precision, recall, thresholds
| 14,773
|
def alert_source_create(context, values):
"""Create an alert source."""
return IMPL.alert_source_create(context, values)
| 14,774
|
def node_class_new(Node : list, name, extend, method) -> None:
"""
Node_class
/ | \
name extend method
"""
Node.append(["node_class", name, extend, method])
| 14,775
|
def sanitise_utf8(s):
"""Ensure an 8-bit string is utf-8.
s -- 8-bit string (or None)
Returns the sanitised string. If the string was already valid utf-8, returns
the same object.
This replaces bad characters with ascii question marks (I don't want to use
a unicode replacement character, because if this function is doing anything
then it's likely that there's a non-unicode setup involved somewhere, so it
probably wouldn't be helpful).
"""
if s is None:
return None
try:
s.decode("utf-8")
except UnicodeDecodeError:
return (s.decode("utf-8", 'replace')
.replace(u"\ufffd", u"?")
.encode("utf-8"))
else:
return s
| 14,776
|
def do_show_etf_chart():
""" ETF charts
"""
period_item = st.session_state.get("period", "daily")
period = PERIOD_DICT[period_item]
for sect in st.session_state.get("selected_sectors", DEFAULT_SECTORS):
st.subheader(sect)
for k,v in etf_dict[sect].items():
st.image(f"https://finviz.com/chart.ashx?t={k}&p={period}")
st.markdown(f" [{k}]({_finviz_chart_url(k, period)}) : {v} ", unsafe_allow_html=True)
# don't know how to get futures chart img
| 14,777
|
def trans_r2xy(r, phi, r_e, phi_e):
"""r,phi -> x,y """
x = np.array(r) * np.cos(phi)
y = np.array(r) * np.sin(phi)
err = np.array(
[polar_err(i, j, k, l) for i, j, k, l in zip(r, phi, r_e, phi_e)]
)
return x, y, err[:, 0], err[:, 1]
| 14,778
|
def ldpc_bp_decode(llr_vec, ldpc_code_params, decoder_algorithm, n_iters):
"""
LDPC Decoder using Belief Propagation (BP).
Parameters
----------
llr_vec : 1D array of float
Received codeword LLR values from the channel.
ldpc_code_params : dictionary
Parameters of the LDPC code.
decoder_algorithm: string
Specify the decoder algorithm type.
SPA for Sum-Product Algorithm
MSA for Min-Sum Algorithm
n_iters : int
Max. number of iterations of decoding to be done.
Returns
-------
dec_word : 1D array of 0's and 1's
The codeword after decoding.
out_llrs : 1D array of float
LLR values corresponding to the decoded output.
"""
n_cnodes = ldpc_code_params['n_cnodes']
n_vnodes = ldpc_code_params['n_vnodes']
max_cnode_deg = ldpc_code_params['max_cnode_deg']
max_vnode_deg = ldpc_code_params['max_vnode_deg']
cnode_adj_list = ldpc_code_params['cnode_adj_list']
cnode_vnode_map = ldpc_code_params['cnode_vnode_map']
vnode_adj_list = ldpc_code_params['vnode_adj_list']
vnode_cnode_map = ldpc_code_params['vnode_cnode_map']
cnode_deg_list = ldpc_code_params['cnode_deg_list']
vnode_deg_list = ldpc_code_params['vnode_deg_list']
dec_word = np.zeros(n_vnodes, int)
out_llrs = np.zeros(n_vnodes, int)
cnode_msgs = np.zeros(n_cnodes*max_cnode_deg)
vnode_msgs = np.zeros(n_vnodes*max_vnode_deg)
_limit_llr_v = np.vectorize(_limit_llr)
if decoder_algorithm == 'SPA':
check_node_update = sum_product_update
elif decoder_algorithm == 'MSA':
check_node_update = min_sum_update
else:
raise NameError('Please input a valid decoder_algorithm string.')
# Initialize vnode messages with the LLR values received
for vnode_idx in range(n_vnodes):
start_idx = vnode_idx*max_vnode_deg
offset = vnode_deg_list[vnode_idx]
vnode_msgs[start_idx : start_idx+offset] = llr_vec[vnode_idx]
# Main loop of Belief Propagation (BP) decoding iterations
for iter_cnt in range(n_iters):
continue_flag = 0
# Check Node Update
for cnode_idx in range(n_cnodes):
check_node_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs,
vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg)
# Variable Node Update
for vnode_idx in range(n_vnodes):
# Compute sum of all incoming messages at the variable node
start_idx = vnode_idx*max_vnode_deg
offset = vnode_deg_list[vnode_idx]
cnode_list = vnode_adj_list[start_idx:start_idx+offset]
cnode_list_msgs = cnode_msgs[cnode_list*max_cnode_deg + vnode_cnode_map[start_idx:start_idx+offset]]
msg_sum = np.sum(cnode_list_msgs)
# Compute messages on outgoing edges using the incoming message sum
vnode_msgs[start_idx:start_idx+offset] = _limit_llr_v(llr_vec[vnode_idx] + msg_sum -
cnode_list_msgs)
# Update output LLRs and decoded word
out_llrs[vnode_idx] = llr_vec[vnode_idx] + msg_sum
if out_llrs[vnode_idx] > 0:
dec_word[vnode_idx] = 0
else:
dec_word[vnode_idx] = 1
# Compute if early termination using parity check matrix
for cnode_idx in range(n_cnodes):
p_sum = 0
for i in range(cnode_deg_list[cnode_idx]):
p_sum ^= dec_word[cnode_adj_list[cnode_idx*max_cnode_deg + i]]
if p_sum != 0:
continue_flag = 1
break
# Stop iterations
if continue_flag == 0:
break
return dec_word, out_llrs
| 14,779
|
def test_atomic_g_day_max_exclusive_1_nistxml_sv_iv_atomic_g_day_max_exclusive_2_5(mode, save_output, output_format):
"""
Type atomic/gDay is restricted by facet maxExclusive with value ---25.
"""
assert_bindings(
schema="nistData/atomic/gDay/Schema+Instance/NISTSchema-SV-IV-atomic-gDay-maxExclusive-2.xsd",
instance="nistData/atomic/gDay/Schema+Instance/NISTXML-SV-IV-atomic-gDay-maxExclusive-2-5.xml",
class_name="NistschemaSvIvAtomicGDayMaxExclusive2",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 14,780
|
def edit_style_formats(style_format_id, **kwargs):
"""Create or edit styles formats.
:param style_format_id: identifier of a specific style format
"""
if request.method == "POST":
args = request.get_json()
errors = StyleFormatsSchema().validate(args)
if errors:
return abort(400, str(errors))
style_format = data.create_style_format(**args)
return jsonify(StyleFormatsSchema().dump(style_format)), 201
if request.method == "DELETE":
data.delete_style_format(style_format_id)
return {'message': 'deleted'}, 204
if request.method == "PUT":
args = request.get_json()
errors = StyleFormatsMetadataSchema().validate(args)
if errors:
return abort(400, str(errors))
style_format = data.update_style_format(style_format_id, **args)
return jsonify(StyleFormatsSchema().dump(style_format)), 200
| 14,781
|
def restore_snapshots():
""" Restore snapshot into correct directories.
Returns:
True on success, False otherwise.
"""
logging.info("Restoring Cassandra snapshots.")
for directory in CASSANDRA_DATA_SUBDIRS:
data_dir = "{0}/{1}/{2}/".format(APPSCALE_DATA_DIR, "cassandra",
directory)
logging.debug("Restoring in dir {0}".format(data_dir))
for path, _, filenames in os.walk(data_dir):
for filename in filenames:
logging.debug("Restoring: {0}".format(filename))
if not filename:
logging.warn("skipping...")
continue
full_path = "{0}/{1}".format(path, filename)
new_full_path = "{0}/../../{1}".format(path, filename)
logging.debug("{0} -> {1}".format(full_path, new_full_path))
# Move the files up into the data directory.
if not backup_recovery_helper.rename(full_path, new_full_path):
logging.error("Error while moving Cassandra snapshot in place. "
"Aborting restore...")
return False
logging.info("Done restoring Cassandra snapshots.")
return True
| 14,782
|
def volume_to_vtk(volelement, origin=(0.0, 0.0, 0.0)):
"""Convert the volume element to a VTK data object.
Args:
volelement (:class:`omf.volume.VolumeElement`): The volume element to
convert
"""
output = volume_grid_geom_to_vtk(volelement.geometry, origin=origin)
shp = get_volume_shape(volelement.geometry)
# Add data to output
for data in volelement.data:
arr = data.array.array
arr = np.reshape(arr, shp).flatten(order='F')
output[data.name] = arr
return output
| 14,783
|
def test_fn014_detail(api_client, project):
"""The gear detail object should return 5 basic elements - the
project code, gear code, gear description, start date and end
date.
"""
prj_cd = project.prj_cd
gr = "GL01"
eff = "038"
url = reverse(
"fn_portal_api:fn014-detail", kwargs={"prj_cd": prj_cd, "gr": gr, "eff": eff}
)
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
print(response.data)
expected_keys = {
"gear",
"eff",
"mesh",
"grlen",
"grht",
"grwid",
"grcol",
"grmat",
"gryarn",
"grknot",
"eff_des",
"slug",
}
assert set(response.data.keys()) == expected_keys
expected = {
"eff": "038",
"mesh": 38,
"grlen": 100,
}
for k, v in expected.items():
assert response.data[k] == expected[k]
| 14,784
|
def import_post_office_csv():
"""データベースに日本郵便Webサイトの郵便番号CSVデータを格納"""
post_office_csv = PostOfficeCSV()
factory = AreaAddressFactory()
for row in post_office_csv.lists:
factory.create(**row)
db = DB()
try:
service = AreaAddressService(db)
for area_address in factory.items:
service.create(area_address)
db.commit()
except (DatabaseError, DataError) as e:
db.rollback()
print(e.message)
finally:
db.close()
| 14,785
|
def plot_heat_capacity(Cv, dCv, temperature_list, file_name="heat_capacity.pdf"):
"""
Given an array of temperature-dependent heat capacity values and the uncertainties in their estimates, this function plots the heat capacity curve.
:param Cv: The heat capacity data to plot.
:type Cv: List( float )
:param dC_v: The uncertainties in the heat capacity data
:type dCv: List( float )
:param file_name: The name/path of the file where plotting output will be written, default = "heat_capacity.png"
:type file_name: str
"""
figure = pyplot.figure(1)
Tunit = temperature_list.unit
Cvunit = Cv.unit
temperature_list = np.array(temperature_list)
Cv = np.array(Cv)
dCv = np.array(dCv)
pyplot.errorbar(temperature_list, Cv, yerr=dCv, figure=figure)
pyplot.xlabel(f"Temperature ({Tunit})")
pyplot.ylabel(f"C$_v$ ({Cvunit})")
pyplot.title("Heat capacity as a function of T")
pyplot.savefig(file_name)
pyplot.close()
return
| 14,786
|
def _in_delta(value, target_value, delta) -> bool:
"""
Check if value is equal to target value within delta
"""
return abs(value - target_value) < delta
| 14,787
|
def getpar(key, file='DATA/Par_file', sep='=', cast=str):
""" Reads parameter from SPECFEM parfile
"""
val = None
with open(file, 'r') as f:
# read line by line
for line in f:
if find(line, key) == 0:
# read key
key, val = _split(line, sep)
if not key:
continue
# read val
val, _ = _split(val, '#')
val.strip()
break
if val:
if cast == float:
val = val.replace('d', 'e')
return cast(val)
else:
print 'Not found in parameter file: %s\n' % key
raise Exception
| 14,788
|
def replace_links(text: str, replace, site: 'pywikibot.site.BaseSite') -> str:
"""Replace wikilinks selectively.
The text is searched for a link and on each link it replaces the text
depending on the result for that link. If the result is just None it skips
that link. When it's False it unlinks it and just inserts the label. When
it is a Link instance it'll use the target, section and label from that
Link instance. If it's a Page instance it'll use just the target from the
replacement and the section and label from the original link.
If it's a string and the replacement was a sequence it converts it into a
Page instance. If the replacement is done via a callable it'll use it like
unlinking and directly replace the link with the text itself. It only
supports unicode when used by the callable and bytes are not allowed.
If either the section or label should be used the replacement can be a
function which returns a Link instance and copies the value which should
remaining.
.. versionchanged:: 7.0
`site` parameter is mandatory
:param text: the text in which to replace links
:param replace: either a callable which reacts like described above.
The callable must accept four parameters link, text, groups, rng and
allows for user interaction. The groups are a dict containing 'title',
'section', 'label' and 'linktrail' and the rng are the start and end
position of the link. The 'label' in groups contains everything after
the first pipe which might contain additional data which is used in
File namespace for example.
Alternatively it can be a sequence containing two items where the first
must be a Link or Page and the second has almost the same meaning as
the result by the callable. It'll convert that into a callable where
the first item (the Link or Page) has to be equal to the found link and
in that case it will apply the second value from the sequence.
:type replace: sequence of pywikibot.Page/pywikibot.Link/str or
callable
:param site: a Site object to use. It should match the origin or
target site of the text
:raises TypeError: missing positional argument 'site'
:raises ValueError: Wrong site type
:raises ValueError: Wrong replacement number
:raises ValueError: Wrong replacement types
"""
def to_link(source):
"""Return the link from source when it's a Page otherwise itself."""
if isinstance(source, pywikibot.Page):
return source._link
if isinstance(source, str):
return pywikibot.Link(source, site)
return source
def replace_callable(link, text, groups, rng):
if replace_list[0] == link:
return replace_list[1]
return None
def check_classes(replacement):
"""Normalize the replacement into a list."""
if not isinstance(replacement, (pywikibot.Page, pywikibot.Link)):
raise ValueError('The replacement must be None, False, '
'a sequence, a Link or a str but '
'is "{}"'.format(type(replacement)))
def title_section(link) -> str:
title = link.title
if link.section:
title += '#' + link.section
return title
if not isinstance(site, pywikibot.site.BaseSite):
raise ValueError('The "site" argument must be a BaseSite not {}.'
.format(type(site).__name__))
if isinstance(replace, Sequence):
if len(replace) != 2:
raise ValueError('When used as a sequence, the "replace" '
'argument must contain exactly 2 items.')
replace_list = [to_link(replace[0]), replace[1]]
if not isinstance(replace_list[0], pywikibot.Link):
raise ValueError(
'The original value must be either str, Link or Page '
'but is "{}"'.format(type(replace_list[0])))
if replace_list[1] is not False and replace_list[1] is not None:
if isinstance(replace_list[1], str):
replace_list[1] = pywikibot.Page(site, replace_list[1])
check_classes(replace_list[0])
replace = replace_callable
linktrail = site.linktrail()
link_pattern = re.compile(
r'\[\[(?P<title>.*?)(#(?P<section>.*?))?(\|(?P<label>.*?))?\]\]'
r'(?P<linktrail>{})'.format(linktrail))
extended_label_pattern = re.compile(r'(.*?\]\])({})'.format(linktrail))
linktrail = re.compile(linktrail)
curpos = 0
# This loop will run until we have finished the current page
while True:
m = link_pattern.search(text, pos=curpos)
if not m:
break
# Ignore links to sections of the same page
if not m.group('title').strip():
curpos = m.end()
continue
# Ignore interwiki links
if (site.isInterwikiLink(m.group('title').strip())
and not m.group('title').strip().startswith(':')):
curpos = m.end()
continue
groups = m.groupdict()
if groups['label'] and '[[' in groups['label']:
# TODO: Work on the link within the label too
# A link within a link, extend the label to the ]] after it
extended_match = extended_label_pattern.search(text, pos=m.end())
if not extended_match:
# TODO: Unclosed link label, what happens there?
curpos = m.end()
continue
groups['label'] += groups['linktrail'] + extended_match.group(1)
groups['linktrail'] = extended_match.group(2)
end = extended_match.end()
else:
end = m.end()
start = m.start()
# Since this point the m variable shouldn't be used as it may not
# contain all contents
del m
try:
link = pywikibot.Link.create_separated(
groups['title'], site, section=groups['section'],
label=groups['label'])
except (SiteDefinitionError, InvalidTitleError):
# unrecognized iw prefix or invalid title
curpos = end
continue
# Check whether the link found should be replaced.
# Either None, False or tuple(Link, bool)
new_link = replace(link, text, groups.copy(), (start, end))
if new_link is None:
curpos = end
continue
# The link looks like this:
# [[page_title|new_label]]new_linktrail
page_title = groups['title']
new_label = groups['label']
if not new_label:
# or like this: [[page_title]]new_linktrail
new_label = page_title
# remove preleading ":" from the link text
if new_label[0] == ':':
new_label = new_label[1:]
new_linktrail = groups['linktrail']
if new_linktrail:
new_label += new_linktrail
if new_link is False:
# unlink - we remove the section if there's any
assert isinstance(new_label, str), 'link text must be str.'
new_link = new_label
if isinstance(new_link, str):
# Nothing good can come out of the fact that bytes is returned so
# force unicode
text = text[:start] + new_link + text[end:]
# Make sure that next time around we will not find this same hit.
curpos = start + len(new_link)
continue
if isinstance(new_link, bytes):
raise ValueError('The result must be str and not bytes.')
# Verify that it's either Link, Page or str
check_classes(new_link)
# Use section and label if it's a Link and not otherwise
if isinstance(new_link, pywikibot.Link):
is_link = True
else:
new_link = new_link._link
is_link = False
new_title = new_link.canonical_title()
# Make correct langlink if needed
if new_link.site != site:
new_title = ':' + new_link.site.code + ':' + new_title
if is_link:
# Use link's label
new_label = new_link.anchor
must_piped = new_label is not None
new_section = new_link.section
else:
must_piped = True
new_section = groups['section']
if new_section:
new_title += '#' + new_section
if new_label is None:
new_label = new_title
# Parse the link text and check if it points to the same page
parsed_new_label = pywikibot.Link(new_label, new_link.site)
try:
parsed_new_label.parse()
except InvalidTitleError:
pass
else:
parsed_link_title = title_section(parsed_new_label)
new_link_title = title_section(new_link)
# compare title, but only with parts if linktrail works
if not linktrail.sub('',
parsed_link_title[len(new_link_title):]):
# TODO: This must also compare everything that was used as a
# prefix (in case insensitive)
must_piped = (
not parsed_link_title.startswith(new_link_title)
or parsed_new_label.namespace != new_link.namespace)
if must_piped:
new_text = '[[{}|{}]]'.format(new_title, new_label)
else:
new_text = '[[{}]]{}'.format(new_label[:len(new_title)],
new_label[len(new_title):])
text = text[:start] + new_text + text[end:]
# Make sure that next time around we will not find this same hit.
curpos = start + len(new_text)
return text
| 14,789
|
def create_ok_response() -> flask.Response:
"""Creates a 200 OK response.
:return: flask.Response.
"""
ok_body: Dict[str, str] = {"status": "OK"}
return make_response(jsonify(ok_body), HTTP_200_OK)
| 14,790
|
async def test_camera_off(hass):
"""Test the camera turn off service."""
await setup_platform(hass, CAMERA_DOMAIN)
with patch("abodepy.AbodeCamera.privacy_mode") as mock_capture:
await hass.services.async_call(
CAMERA_DOMAIN,
"turn_off",
{ATTR_ENTITY_ID: "camera.test_cam"},
blocking=True,
)
await hass.async_block_till_done()
mock_capture.assert_called_once_with(True)
| 14,791
|
def compile_ui_if_needed(ui_file_path: str, ignore_mtime: bool=False):
"""
The following will dynamically compile the Qt Designer '.ui' file
given by C{ui_file_path}, and import and load the generated Python
module. The generated module will have the name:
C{ui_file_path} + '_ui.py'
The '.ui' file will only be compiled if it's more recent that a
previous instance of the generated module, or if this generated
module doesn't exist at all (perhaps it's the first compilation).
@param ui_file_path: The file path for the Qt Designer 'ui' file.
@param ignore_mtime: If True, modification times for the ui file
and the corresponding generated modules will be ignored and the
ui file will ALWAYS be COMPILED.
@returns The imported and reloaded C{module} object.
"""
if not os.path.exists(ui_file_path):
raise ValueError(f"Can't find UI file {ui_file_path}")
#:
if not ui_file_path.endswith('.ui'):
raise ValueError(f"UI file path ('{ui_file_path}') must end in '.ui'!")
#:
gen_module_name = ui_file_path.strip('.ui') + '_ui'
gen_module_path = gen_module_name + '.py'
ui_mtime = os.path.getmtime(ui_file_path)
gen_mod_mtime = os.path.getmtime(gen_module_path)
if (
not os.path.exists(gen_module_path) or
(not ignore_mtime and ui_mtime > gen_mod_mtime)
):
print(f"Compiling '{ui_file_path}' to '{gen_module_path}'.", file=sys.stderr)
run_proc(['pyside6-uic', '-o', gen_module_path, ui_file_path])
print(f"Loading '{gen_module_name}' module", file=sys.stderr)
#:
# We want to make sure that the module is up to date, whether it
# was imported before or not. import_module won't really import the
# module if the module was imported before (it just returns the module
# object). OTOH, reload won't reload if the module wasn't imported
# before. That's why wee need to import and then do a reload.
invalidate_caches()
return reload_module(import_module(gen_module_name))
| 14,792
|
def test_len_drops_symlinks_from_the_job_count_to_avoid_double_counting(proj):
"""
Test that __len__ has the correct behaviour when symlinks
are present
"""
symlink = proj.basepath / "AutoPick" / "autopick2"
symlink.symlink_to(proj.basepath / "AutoPick" / "job006")
sym_autopick = proj.autopick
assert len(sym_autopick) == 3
symlink.unlink()
| 14,793
|
def pickleExists(name):
"""
Returns True if there is a pickle with name in cache, False otherwise. Used to prevent
cache misses
:param name: Name to look for in cache
:return: True on hit, False on miss
"""
fileNames = [f for f in os.listdir(PICKLE_DIR) if name in f]
return not len(fileNames) == 0
| 14,794
|
def main():
"""
This handles the command line input of the runner
(it's most often called by evennia.py)
"""
parser = OptionParser(usage="%prog [options] start",
description="This runner should normally *not* be called directly - it is called automatically from the evennia.py main program. It manages the Evennia game server and portal processes an hosts a threaded loop to restart the Server whenever it is stopped (this constitues Evennia's reload mechanism).")
parser.add_option('-s', '--noserver', action='store_true',
dest='noserver', default=False,
help='Do not start Server process')
parser.add_option('-p', '--noportal', action='store_true',
dest='noportal', default=False,
help='Do not start Portal process')
parser.add_option('-i', '--iserver', action='store_true',
dest='iserver', default=False,
help='output server log to stdout instead of logfile')
parser.add_option('-d', '--iportal', action='store_true',
dest='iportal', default=False,
help='output portal log to stdout. Does not make portal a daemon.')
parser.add_option('-S', '--profile-server', action='store_true',
dest='sprof', default=False,
help='run server under cProfile')
parser.add_option('-P', '--profile-portal', action='store_true',
dest='pprof', default=False,
help='run portal under cProfile')
options, args = parser.parse_args()
if not args or args[0] != 'start':
# this is so as to avoid runner.py be accidentally launched manually.
parser.print_help()
sys.exit()
# set up default project calls
server_argv = [TWISTED_BINARY,
'--nodaemon',
'--logfile=%s' % SERVER_LOGFILE,
'--pidfile=%s' % SERVER_PIDFILE,
'--python=%s' % SERVER_PY_FILE]
portal_argv = [TWISTED_BINARY,
'--logfile=%s' % PORTAL_LOGFILE,
'--pidfile=%s' % PORTAL_PIDFILE,
'--python=%s' % PORTAL_PY_FILE]
# Profiling settings (read file from python shell e.g with
# p = pstats.Stats('server.prof')
sprof_argv = ['--savestats',
'--profiler=cprofile',
'--profile=server.prof']
pprof_argv = ['--savestats',
'--profiler=cprofile',
'--profile=portal.prof']
# Server
pid = get_pid(SERVER_PIDFILE)
if pid and not options.noserver:
print "\nEvennia Server is already running as process %(pid)s. Not restarted." % {'pid': pid}
options.noserver = True
if options.noserver:
server_argv = None
else:
set_restart_mode(SERVER_RESTART, "shutdown")
if options.iserver:
# don't log to server logfile
del server_argv[2]
print "\nStarting Evennia Server (output to stdout)."
else:
if CYCLE_LOGFILES:
cycle_logfile(SERVER_LOGFILE)
print "\nStarting Evennia Server (output to server logfile)."
if options.sprof:
server_argv.extend(sprof_argv)
print "\nRunning Evennia Server under cProfile."
# Portal
pid = get_pid(PORTAL_PIDFILE)
if pid and not options.noportal:
print "\nEvennia Portal is already running as process %(pid)s. Not restarted." % {'pid': pid}
options.noportal = True
if options.noportal:
portal_argv = None
else:
if options.iportal:
# make portal interactive
portal_argv[1] = '--nodaemon'
set_restart_mode(PORTAL_RESTART, True)
print "\nStarting Evennia Portal in non-Daemon mode (output to stdout)."
else:
if CYCLE_LOGFILES:
cycle_logfile(PORTAL_LOGFILE)
cycle_logfile(HTTP_LOGFILE)
set_restart_mode(PORTAL_RESTART, False)
print "\nStarting Evennia Portal in Daemon mode (output to portal logfile)."
if options.pprof:
portal_argv.extend(pprof_argv)
print "\nRunning Evennia Portal under cProfile."
# Windows fixes (Windows don't support pidfiles natively)
if os.name == 'nt':
if server_argv:
del server_argv[-2]
if portal_argv:
del portal_argv[-2]
# Start processes
start_services(server_argv, portal_argv)
| 14,795
|
def procs() -> None:
""" Entry point for liftoff-procs.
"""
opts = parse_options()
display_procs(get_running_liftoffs(opts.experiment, opts.results_path))
| 14,796
|
def format_timestamp(timestamp):
"""Formats an UTC timestamp into a date string.
>>> format_timestamp("2014-04-08T12:41:34+0100")
'Tue, 08 Apr 2014 12:41:34'
"""
t = iso8601.parse_date(timestamp).timetuple()
return time.strftime("%a, %d %b %Y %H:%M:%S", t)
| 14,797
|
def _flat(xvals):
"""
Function for flat surface y=0, with boundary conditions
Parameters
----------
xvals : np.array
x-values of the surface.
Returns
-------
yvals : np.array
y-Values of the initialized surface.
"""
yvals = np.zeros_like(xvals)
return yvals
| 14,798
|
def regularized_laplacian(weights, labels, alpha):
"""Uses the laplacian graph to smooth the labels matrix by "propagating" labels
Args:
weights: Tensor of shape (batch, n, n)
labels: Tensor of shape (batch, n, n_classes)
alpha: Scaler, acts as a smoothing factor
apply_log: if True, it is assumed that the label propagation methods returns un-normalized probabilities. Hence
to return logits, applying logarithm is necessary.
epsilon: value added before applying log
Returns:
Tensor of shape (batch, n, n_classes) representing the logits of each classes
"""
n = weights.shape[1]
diag = torch.diag_embed(torch.sum(weights, dim=2))
laplacian = diag - weights
identity = torch.eye(n, dtype=laplacian.dtype, device=laplacian.device)[None, :, :]
propagator = torch.inverse(identity + alpha * laplacian)
return _propagate(labels, propagator), propagator
| 14,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.