content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import gettext
def delete(page_id):
"""Delete a page."""
page = _get_page(page_id)
page_name = page.name
site_id = page.site_id
success, event = page_service.delete_page(page.id, initiator_id=g.user.id)
if not success:
flash_error(
gettext('Page "%(name)s" could not be deleted.', name=page_name)
)
return url_for('.view_current_version', page_id=page.id)
flash_success(gettext('Page "%(name)s" has been deleted.', name=page_name))
page_signals.page_deleted.send(None, event=event)
return url_for('.index_for_site', site_id=site_id)
|
9c858d19b27f42e71d6aa19ae636e282925f0492
| 3,649,900
|
def drift_var():
"""
Concept drift:
1. n_drifts
2. concept_sigmoid_spacing (None for sudden)
3. incremental [True] or gradual [False]
4. recurring [True] or non-recurring [False]
"""
return [(10, None, False, False), (10, 5, False, False), (10, 5, True, False)]
|
34f2c55f928a16cca8c52307853ab32f56ecd954
| 3,649,901
|
def get_generators(matrix):
"""
Given a matrix in H-rep, gets the v-rep
Turns out, the code is the same as get_inequalities,
since lrs determines the directions based on the input.
Left like this for readability.
"""
return get_inequalities(matrix)
|
ab5c2059544842d5010cae1211acc7da9e021994
| 3,649,902
|
def num_instances(diff, flag=False):
"""returns the number of times the mother and daughter have
pallindromic ages in their lives, given the difference in age.
If flag==True, prints the details."""
daughter = 0
count = 0
while True:
mother = daughter + diff
if are_reversed(daughter, mother) or are_reversed(daughter, mother+1):
count = count + 1
if flag:
print daughter, mother
if mother > 120:
break
daughter = daughter + 1
return count
|
84d39159c594b25aabfc9efceef0d13ebc15a817
| 3,649,903
|
import inspect
def get_dipy_workflows(module):
"""Search for DIPY workflow class.
Parameters
----------
module : object
module object
Returns
-------
l_wkflw : list of tuple
This a list of tuple containing 2 elements:
Worflow name, Workflow class obj
Examples
--------
>>> from dipy.workflows import align # doctest: +SKIP
>>> get_dipy_workflows(align) # doctest: +SKIP
"""
return [
(m, obj)
for m, obj in inspect.getmembers(module)
if inspect.isclass(obj)
and issubclass(obj, module.Workflow)
and m not in SKIP_WORKFLOWS_LIST
]
|
a119d6defd6c741777c3fa2f1add6bc700357dbd
| 3,649,904
|
def azel_fit(coo_ref, coo_meas, nsamp=2000, ntune=2000, target_accept=0.95, random_seed=8675309):
"""
Fit full az/el pointing model using PyMC3. The terms are analogous to those used by TPOINT(tm). This fit includes
the eight normal terms used in `~pytelpoint.transform.azel` with additional terms, az_sigma and el_sigma, that
describes the intrinsic scatter.
Parameters
----------
coo_ref : `~astropy.coordinates.SkyCoord` instance
Reference coordinates
coo_meas : `~astropy.coordinates.SkyCoord` instance
Measured coordinates
nsamp : int (default: 2000)
Number of inference samples
ntune : int (default: 2000)
Number of burn-in samples
target_accept : float (default: 0.95)
Sets acceptance probability target for determining step size
random_seed : int (default: 8675309)
Seed number for random number generator
Returns
-------
idata : `~arviz.InferenceData`
Inference data from the pointing model
"""
pointing_model = pm.Model()
deg2rad = np.pi / 180
with pointing_model:
# az/el are the astrometric reference values. az_raw/el_raw are the observed encoder values.
az = pm.Data('az', coo_ref.az)
el = pm.Data('el', coo_ref.alt)
az_raw = pm.Data('az_raw', coo_meas.az)
el_raw = pm.Data('el_raw', coo_meas.alt)
ia = pm.Normal('ia', 1200., 100)
ie = pm.Normal('ie', 0., 50.)
an = pm.Normal('an', 0., 20.)
aw = pm.Normal('aw', 0., 20.)
ca = pm.Normal('ca', 0., 30.)
npae = pm.Normal('npae', 0., 30.)
tf = pm.Normal('tf', 0., 50.)
tx = pm.Normal('tx', 0., 20.)
az_sigma = pm.HalfNormal('az_sigma', sigma=1.)
el_sigma = pm.HalfNormal('el_sigma', sigma=1.)
daz = -ia
daz -= an * pm.math.sin(deg2rad * az) * pm.math.tan(deg2rad * el)
daz -= aw * pm.math.cos(deg2rad * az) * pm.math.tan(deg2rad * el)
daz -= ca / pm.math.cos(deg2rad * el)
daz -= npae * pm.math.tan(deg2rad * el)
dalt = ie
dalt -= an * pm.math.cos(deg2rad * az)
dalt += aw * pm.math.sin(deg2rad * az)
dalt -= tf * pm.math.cos(deg2rad * el)
dalt -= tx / pm.math.tan(deg2rad * el)
_ = pm.Normal('azerr', mu=0., sigma=az_sigma/3600, observed=pm.math.cos(deg2rad * el) * (az - (az_raw + daz/3600.)))
_ = pm.Normal('elerr', mu=0., sigma=el_sigma/3600, observed=el - (el_raw + dalt/3600.))
idata = pm.sample(
nsamp,
tune=ntune,
target_accept=target_accept,
return_inferencedata=True,
random_seed=random_seed
)
return idata
|
22c3989049933b55643d11bfb2aebeb4c629ed60
| 3,649,905
|
from pathlib import Path
import os
def repository_path(relative_path: str) -> Path:
"""
Resolve `relative_path` relative to the root of the repository.
"""
return Path(os.path.join(REPOSITORY_ROOT), relative_path).resolve()
|
5228678023ef53d8b8ae55e8b3a7050b8db96115
| 3,649,906
|
def geojson_to_labels(geojson_dict, crs_transformer, extent=None):
"""Convert GeoJSON to ObjectDetectionLabels object.
If extent is provided, filter out the boxes that lie "more than a little
bit" outside the extent.
Args:
geojson_dict: dict in GeoJSON format
crs_transformer: used to convert map coords in geojson to pixel coords
in labels object
extent: Box in pixel coords
Returns:
ObjectDetectionLabels
"""
features = geojson_dict['features']
boxes = []
class_ids = []
scores = []
def polygon_to_label(polygon, crs_transformer):
polygon = [crs_transformer.map_to_pixel(p) for p in polygon]
xmin, ymin = np.min(polygon, axis=0)
xmax, ymax = np.max(polygon, axis=0)
boxes.append(Box(ymin, xmin, ymax, xmax))
properties = feature['properties']
class_ids.append(properties['class_id'])
scores.append(properties.get('score', 1.0))
for feature in features:
geom_type = feature['geometry']['type']
coordinates = feature['geometry']['coordinates']
if geom_type == 'MultiPolygon':
for polygon in coordinates:
polygon_to_label(polygon[0], crs_transformer)
elif geom_type == 'Polygon':
polygon_to_label(coordinates[0], crs_transformer)
else:
raise Exception(
"Geometries of type {} are not supported in object detection \
labels.".format(geom_type))
if len(boxes):
boxes = np.array([box.npbox_format() for box in boxes], dtype=float)
class_ids = np.array(class_ids)
scores = np.array(scores)
labels = ObjectDetectionLabels(boxes, class_ids, scores=scores)
else:
labels = ObjectDetectionLabels.make_empty()
if extent is not None:
labels = ObjectDetectionLabels.get_overlapping(
labels, extent, ioa_thresh=0.8, clip=True)
return labels
|
d8e0ed7034796235c6311d47eb234bfd0f38e68a
| 3,649,907
|
def processed_transcript(df):
"""
Cleans the Transcript table by splitting value fileds and replacing nan values, drop extra columns
PARAMETERS:
transcript dataframe
RETURNS:
Cleaned transcript dataframe
"""
#expand the dictionary to coulmns (reward, amount, offre id) from value field
df['offer_id'] = df['value'].apply(lambda x: x.get('offer_id'))
df['offer id'] = df['value'].apply(lambda x: x.get('offer id'))
df['reward'] = df['value'].apply(lambda x: x.get('reward'))
df['amount'] = df['value'].apply(lambda x: x.get('amount'))
#move 'offer id' values into 'offer_id'
df['offer_id'] = df.apply(lambda x : x['offer id'] if x['offer_id'] == None else x['offer_id'], axis=1)
#drop 'offer id' column
df.drop(['offer id' , 'value'] , axis=1, inplace=True)
#replace nan
df.fillna(0 , inplace=True)
return df
|
452668d6d9616ca382f7968e0ac4dd52658be9f6
| 3,649,908
|
import six
def _TestSuiteName(dash_json_dict):
"""Extracts a test suite name from Dashboard JSON.
The dashboard JSON may contain a field "test_suite_name". If this is not
present or it is None, the dashboard will fall back to using "benchmark_name"
in the "chart_data" dict.
"""
name = None
if dash_json_dict.get('test_suite_name'):
name = dash_json_dict['test_suite_name']
else:
try:
name = dash_json_dict['chart_data']['benchmark_name']
except KeyError as e:
six.raise_from(
BadRequestError('Could not find test suite name. ' + str(e)), e)
_ValidateNameString(name, 'test_suite_name')
return name
|
1b2e6cbd820bde3b24be5cca107e49ea2dabc732
| 3,649,909
|
def create_default_prior(name, default_priors_file=None):
"""Make a default prior for a parameter with a known name.
Parameters
----------
name: str
Parameter name
default_priors_file: str, optional
If given, a file containing the default priors.
Return
------
prior: Prior
Default prior distribution for that parameter, if unknown None is
returned.
"""
if default_priors_file is None:
logger.debug(
"No prior file given.")
prior = None
else:
default_priors = PriorDict(filename=default_priors_file)
if name in default_priors.keys():
prior = default_priors[name]
else:
logger.debug(
"No default prior found for variable {}.".format(name))
prior = None
return prior
|
e0ddbaf10e0288ba939637cb1b2ce685756c966b
| 3,649,910
|
def clean_data(list_in):
"""
Inputs:
list_in - filtered list of ticket orders
Outputs:
Return list of tuples, each tuple contains
(last name, first name, note,[tickets])
"""
notes_list = []
data_out = []
for row in list_in:
trimmed_row = row[row.index('Purchaser Name: ')+16:]
name = trimmed_row[:trimmed_row.index('<br/>')].strip().title()
first_name = name[:name.rindex(' ')] #get first name
last_name = name[name.rindex(' '):] #get last name
trimmed_row = trimmed_row[len(name+'<br/>')+1:]
if 'Special Instructions:' in row: #get notes
note = trimmed_row[22:trimmed_row.index('<br/>')]
trimmed_row = trimmed_row[trimmed_row.index('<br/>')+5:]
notes_list.append((last_name,first_name,note))
else:
note = ''
orders = trimmed_row.split('<br/>')
tickets = []
for order in orders: #get ticket orders
if ('Membership Dues' in order) or ('Donation' in order):
continue
else:
tickets.append(order)
data_out.append([last_name, first_name, note, tickets])
# print(last_name, first_name,note,tickets)
# print()
data_out.sort(key=lambda item: item[1]) #sort by first name (to break last name ties)
data_out.sort(key=lambda item: item[0]) #sort by last name
# for idx, note in enumerate(notes_list): #optional print of all notes
# print(idx,note)
return data_out
|
f2cdf17895d1661e40b64f3fcc9ff92558f53bdd
| 3,649,911
|
def adfuller(
vdf,
column: str,
ts: str,
by: list = [],
p: int = 1,
with_trend: bool = False,
regresults: bool = False,
):
"""
---------------------------------------------------------------------------
Augmented Dickey Fuller test (Time Series stationarity).
Parameters
----------
vdf: vDataFrame
input vDataFrame.
column: str
Input vcolumn to test.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
by: list, optional
vcolumns used in the partition.
p: int, optional
Number of lags to consider in the test.
with_trend: bool, optional
Add a trend in the Regression.
regresults: bool, optional
If True, the full regression results are returned.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
def critical_value(alpha, N, with_trend):
if not (with_trend):
if N <= 25:
if alpha == 0.01:
return -3.75
elif alpha == 0.10:
return -2.62
elif alpha == 0.025:
return -3.33
else:
return -3.00
elif N <= 50:
if alpha == 0.01:
return -3.58
elif alpha == 0.10:
return -2.60
elif alpha == 0.025:
return -3.22
else:
return -2.93
elif N <= 100:
if alpha == 0.01:
return -3.51
elif alpha == 0.10:
return -2.58
elif alpha == 0.025:
return -3.17
else:
return -2.89
elif N <= 250:
if alpha == 0.01:
return -3.46
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.14
else:
return -2.88
elif N <= 500:
if alpha == 0.01:
return -3.44
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.13
else:
return -2.87
else:
if alpha == 0.01:
return -3.43
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.12
else:
return -2.86
else:
if N <= 25:
if alpha == 0.01:
return -4.38
elif alpha == 0.10:
return -3.24
elif alpha == 0.025:
return -3.95
else:
return -3.60
elif N <= 50:
if alpha == 0.01:
return -4.15
elif alpha == 0.10:
return -3.18
elif alpha == 0.025:
return -3.80
else:
return -3.50
elif N <= 100:
if alpha == 0.01:
return -4.04
elif alpha == 0.10:
return -3.15
elif alpha == 0.025:
return -3.73
else:
return -5.45
elif N <= 250:
if alpha == 0.01:
return -3.99
elif alpha == 0.10:
return -3.13
elif alpha == 0.025:
return -3.69
else:
return -3.43
elif N <= 500:
if alpha == 0.01:
return 3.98
elif alpha == 0.10:
return -3.13
elif alpha == 0.025:
return -3.68
else:
return -3.42
else:
if alpha == 0.01:
return -3.96
elif alpha == 0.10:
return -3.12
elif alpha == 0.025:
return -3.66
else:
return -3.41
check_types(
[
("ts", ts, [str],),
("column", column, [str],),
("p", p, [int, float],),
("by", by, [list],),
("with_trend", with_trend, [bool],),
("regresults", regresults, [bool],),
],
vdf=["vdf", vdf],
)
columns_check([ts, column] + by, vdf)
ts = vdf_columns_names([ts], vdf)[0]
column = vdf_columns_names([column], vdf)[0]
by = vdf_columns_names(by, vdf)
schema = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema):
schema = "public"
name = "{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
schema, gen_name([column]).upper()
)
relation_name = "{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, gen_name([column]).upper()
)
try:
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP MODEL IF EXISTS {}".format(name)
)
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP VIEW IF EXISTS {}".format(relation_name)
)
except:
pass
lag = [
"LAG({}, 1) OVER ({}ORDER BY {}) AS lag1".format(
column, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts
)
]
lag += [
"LAG({}, {}) OVER ({}ORDER BY {}) - LAG({}, {}) OVER ({}ORDER BY {}) AS delta{}".format(
column,
i,
"PARTITION BY {}".format(", ".join(by)) if (by) else "",
ts,
column,
i + 1,
"PARTITION BY {}".format(", ".join(by)) if (by) else "",
ts,
i,
)
for i in range(1, p + 1)
]
lag += [
"{} - LAG({}, 1) OVER ({}ORDER BY {}) AS delta".format(
column, column, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts
)
]
query = "CREATE VIEW {} AS SELECT {}, {} AS ts FROM {}".format(
relation_name,
", ".join(lag),
"TIMESTAMPDIFF(SECOND, {}, MIN({}) OVER ())".format(ts, ts)
if vdf[ts].isdate()
else ts,
vdf.__genSQL__(),
)
vdf._VERTICAPY_VARIABLES_["cursor"].execute(query)
model = LinearRegression(name, vdf._VERTICAPY_VARIABLES_["cursor"])
model.fit(
relation_name,
["ts"] + ["lag1"] + ["delta{}".format(i) for i in range(1, p + 1)],
"delta",
)
coef = model.coef_
vdf._VERTICAPY_VARIABLES_["cursor"].execute("DROP MODEL IF EXISTS {}".format(name))
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP VIEW IF EXISTS {}".format(relation_name)
)
if regresults:
return coef
coef = coef.transpose()
DF = coef.values["lag1"][0] / (max(coef.values["lag1"][1], 1e-99))
p_value = coef.values["lag1"][3]
count = vdf.shape()[0]
result = tablesample(
{
"index": [
"ADF Test Statistic",
"p_value",
"# Lags used",
"# Observations Used",
"Critical Value (1%)",
"Critical Value (2.5%)",
"Critical Value (5%)",
"Critical Value (10%)",
"Stationarity (alpha = 1%)",
],
"value": [
DF,
p_value,
p,
count,
critical_value(0.01, count, with_trend),
critical_value(0.025, count, with_trend),
critical_value(0.05, count, with_trend),
critical_value(0.10, count, with_trend),
DF < critical_value(0.01, count, with_trend) and p_value < 0.01,
],
}
)
return result
|
8f78b2128c981af15a84ac94f54435da4aee0c6c
| 3,649,912
|
from typing import Union
from typing import List
from typing import Dict
from typing import Optional
def get_routes_bend180(
ports: Union[List[Port], Dict[str, Port]],
bend: ComponentOrFactory = bend_euler,
cross_section: CrossSectionFactory = strip,
bend_port1: Optional[str] = None,
bend_port2: Optional[str] = None,
**kwargs,
) -> Routes:
"""Returns routes made by 180 degree bends.
Args:
ports: List or dict of ports
bend: function for bend
cross_section:
**kwargs: bend settings
"""
ports = list(ports.values()) if isinstance(ports, dict) else ports
bend = bend(angle=180, cross_section=cross_section, **kwargs)
bend_ports = bend.get_ports_list()
bend_port1 = bend_port1 or bend_ports[0].name
bend_port2 = bend_port2 or bend_ports[1].name
references = [bend.ref() for port in ports]
references = [ref.connect(bend_port1, port) for port, ref in zip(ports, references)]
ports = [ref.ports[bend_port2] for i, ref in enumerate(references)]
lengths = [bend.info.length] * len(ports)
return Routes(references=references, ports=ports, lengths=lengths)
|
f5ec1539a04c0c9eee9184d190e265af4e187ef0
| 3,649,913
|
import json
def list_datasets(github_repo="Ouranosinc/xclim-testdata", branch="main"):
"""Return a DataFrame listing all xclim test datasets available on the GitHub repo for the given branch.
The result includes the filepath, as passed to `open_dataset`, the file size (in KB) and the html url to the file.
This uses an unauthenticated call to GitHub's REST API, so it is limited to 60 requests per hour (per IP).
A single call of this function triggers one request per subdirectory, so use with parsimony.
"""
res = urlopen( # nosec
f"https://api.github.com/repos/{github_repo}/contents?ref={branch}"
)
base = json.loads(res.read().decode())
records = []
for folder in base:
if folder["path"].startswith(".") or folder["size"] > 0:
# drop hidden folders and other files.
continue
res = urlopen(folder["url"]) # nosec
listing = json.loads(res.read().decode())
for file in listing:
if file["path"].endswith(".nc"):
records.append(
{
"name": file["path"],
"size": file["size"] / 2**10,
"url": file["html_url"],
}
)
df = pd.DataFrame.from_records(records).set_index("name")
print(f"Found {len(df)} datasets.")
return df
|
199c56efcb105d9ff043f2a7c1ef51857a8b9b77
| 3,649,914
|
import logging
def make_layerwise_projection_unshrink(*, server_state_type,
client_update_output_type,
server_update_fn, server_model_fn,
client_model_fn, shrink_unshrink_info):
"""Creates an unshrink function which unshrinks by unprojecting weight matrices corresponding to make_layerwise_projection_shrink.
Args:
server_state_type: the type of server_state.
client_update_output_type: the type of client_outputs.
server_update_fn: a function which evolves the server_state.
server_model_fn: a `tf.keras.Model' which specifies the server-side model.
client_model_fn: a `tf.keras.Model' which specifies the client-side model.
shrink_unshrink_info: an object specifying how the shrink and unshrink
operations are performed.
Returns:
A corresponding shrink and unshrink functions.
"""
left_mask = shrink_unshrink_info.left_mask
right_mask = shrink_unshrink_info.right_mask
tf.debugging.assert_equal(len(left_mask), len(right_mask))
tf.debugging.assert_equal(
len(left_mask), len(get_model_weights(server_model_fn()).trainable))
tf.debugging.assert_equal(
len(left_mask), len(get_model_weights(client_model_fn()).trainable))
build_projection_matrix = shrink_unshrink_info.build_projection_matrix
federated_server_state_type = tff.type_at_server(server_state_type)
federated_client_outputs_type = tff.type_at_clients(client_update_output_type)
@tff.tf_computation(client_update_output_type)
def unproject_client_weights_fn(client_output):
whimsy_server_weights = get_model_weights(server_model_fn()).trainable
whimsy_client_weights = get_model_weights(client_model_fn()).trainable
left_maskval_to_projmat_dict = create_left_maskval_to_projmat_dict(
client_output.round_num //
shrink_unshrink_info.new_projection_dict_decimate,
whimsy_server_weights,
whimsy_client_weights,
left_mask,
right_mask,
build_projection_matrix=build_projection_matrix)
return unproject_client_weights(client_output, left_maskval_to_projmat_dict,
left_mask, right_mask)
@tff.tf_computation
def reshape_a(client_ouput_weight_delta):
whimsy_server_weights = get_model_weights(server_model_fn()).trainable
return tf.nest.map_structure(lambda a, b: tf.reshape(a, tf.shape(b)),
client_ouput_weight_delta,
whimsy_server_weights)
@tff.federated_computation(federated_server_state_type,
federated_client_outputs_type)
def unshrink(server_state, client_outputs):
client_outputs = tff.federated_map(unproject_client_weights_fn,
client_outputs)
my_weights_delta = tff.federated_map(reshape_a,
client_outputs.weights_delta)
round_model_delta = tff.federated_mean(
my_weights_delta, weight=client_outputs.client_weight)
logging.info("finished computing mean")
return tff.federated_map(server_update_fn,
(server_state, round_model_delta))
return unshrink
|
315e7096a45047abbb23bb444f13065f9c87dfc5
| 3,649,915
|
import json
def embed_terms(args, classes, dest, use_cache=True, path_to_json='ebd_cache.json'):
"""
Embeds class strings into word representations.
:param args
:param classes: (list of str) topic classes
:param dest: (str) path to destination file
:param path_to_json: (str) path to json file containing word embeddings
:return: dict {newsgroup class (int id) : embedded vector (nparray of float)}
"""
if use_cache:
with open(dest) as json_file:
return classes, json.load(json_file)
# Not using cache: extract vectors from global set
with open(path_to_json) as json_file:
mappings = json.load(json_file)
input()
input(mappings)
# Cache topic reps
cache = dict(zip(classes, [mappings[topic] for topic in classes]))
with open(dest, 'w') as fp:
json.dump(cache, fp)
|
8521b4828907c0083492b0d03848aeeb452d17e6
| 3,649,916
|
from pathlib import Path
def wf_paths(reachable):
"""
Construct all well-formed paths satisfying a given condition.
The condition is as follows: all the paths have height equal to
the ceiling of log_2(`reachable` + 1). `reachable` is interpreted
as a bitfield, with 1 meaning that the corresponding leaf on the
floor of the path should be reachable from the root, 0 meaning the
opposite.
This function has been used to count well-formed paths and guess
the link with Gelfand-Zetlin polytopes.
"""
if reachable <= 0:
raise ValueError
elif reachable == 1:
return [Path([])]
else:
floors = [reachable & 1]
reachable >>= 1
left = 2; right = 4
while reachable > 1:
if reachable & 1:
floors = [f | left for f in floors] + [f | right for f in floors]
left <<= 2; right <<= 2
reachable >>= 1
floors = [f | left for f in floors]
paths = []
for f in floors:
paths.extend([p.cat(f) for p in wf_paths(_h4(f))])
return paths
|
86b0a2e6408a8257e201f21058459aea4aceac00
| 3,649,917
|
def get_imagemodel_in_rar(rar_path, mode):
""" 압축파일(rar_path)의 이미지파일의 name, width, height를 모아서 반환한다."""
image_models = []
with rarfile.RarFile(rar_path) as rf:
for name in rf.namelist():
if is_hidden_or_trash(name):
continue
if is_extensions_allow_image(name):
model = BaseImageModel()
model._name = name
app.logger.info("fileName: " + name)
if mode == "1":
try:
with rf.read(name) as f:
data = BytesIO()
data.write(f)
data.seek(0)
size = get_image_size_from_bytes(data)
model._width = size[0]
model._height = size[1]
except Exception:
app.logger.error("Can not getting width, height >> " + name)
image_models.append(model)
return image_models
|
ea94406e17b66bbbf0288b8f0cb03cdd723a2d63
| 3,649,918
|
from re import L
def run_single(i,threshold_area_fraction,death_to_birth_rate_ratio,domain_size_multiplier,return_history=False):
"""run a single voronoi tessellation model simulation"""
rates = (DEATH_RATE,DEATH_RATE/death_to_birth_rate_ratio)
rand = np.random.RandomState()
history = lib.run_simulation(simulation,L,TIMESTEP,TIMEND,rand,progress_on=False,
init_time=INIT_TIME,til_fix=False,save_areas=True,
return_events=False,save_cell_histories=False,N_limit=MAX_POP_SIZE,
domain_size_multiplier=domain_size_multiplier,rates=rates,threshold_area_fraction=threshold_area_fraction)
return number_proliferating_neighbours_distribution(history,threshold_area_fraction,death_to_birth_rate_ratio)
|
b4ef11ae873f69b472a2c41c2f5d33e88ed1169a
| 3,649,919
|
def matrixmult (A, B):
"""Matrix multiplication function
This function returns the product of a matrix multiplication given two matrices.
Let the dimension of the matrix A be: m by n,
let the dimension of the matrix B be: p by q,
multiplication will only possible if n = p,
thus creating a matrix of m by q size.
Parameters
----------
A : list
First matrix, in a 2D array format.
B : list
Second matrix, in a 2D array format.
Returns
-------
C : list
The product of the matrix multiplication.
Examples
--------
>>> from .pycgmStatic import matrixmult
>>> A = [[11,12,13],[14,15,16]]
>>> B = [[1,2],[3,4],[5,6]]
>>> matrixmult(A, B)
[[112, 148], [139, 184]]
"""
C = [[0 for row in range(len(A))] for col in range(len(B[0]))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
C[i][j] += A[i][k]*B[k][j]
return C
|
98065981c8047d927bacb07877dbf173ba379159
| 3,649,920
|
def TorsLattice(data = None, *args, **kwargs):
"""
Construct a lattice of torsion classes from various forms of input data
This raises an error if the constructed lattice is not semidistributive,
since the lattice of torsion classes is semidistributive.
INPUT:
- ``data``, ``*args``, ``**kwargs`` -- data and options that will
be passed down to :func:`LatticePoset` to construct a poset that is
also a lattice.
OUTPUT:
An instance of :class:`FiniteTorsLattice`
"""
if isinstance(data, FiniteTorsLattice) and not args and not kwargs:
return data
L = LatticePoset(data, *args, **kwargs)
if not L.is_semidistributive():
raise ValueError("This lattice is not semidistributive.")
return FiniteTorsLattice(L)
|
e07cfef58b2927b8e3c660ee20cc0c6bc365fa4b
| 3,649,921
|
def get_accumulated_report(trigger_id, mission='fermi'):
"""
Return the last value for each keyword on the summary page for a given trigger_id
:param trigger_id:
:param mission: 'fermi' or 'swift'
:return:
"""
if 'fermi' in mission:
site = fermi_grb_site
elif 'swift' in mission:
site = swift_grb_site
else:
print "Uknown mission {0}".format(mission)
sys.exit()
html = urllib.urlopen(site.format(trigger_id))
rs = page_to_reports(html)
fs = None
for r in rs:
fs = report_to_fields(r, fs)
return fs
|
de77dc845a48d6572b2ff9229eed57e7fd55b38c
| 3,649,922
|
import torch
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.get_block_size()
model.eval()
for k in range(steps):
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
return x
|
c63ab2c001b7c88568d12d836da65abb368a8f31
| 3,649,923
|
def get_numbers(number, size, *, fg=DEFAULT_FGCHARACTER, bg=DEFAULT_BGCHARACTER):
"""Creates a shape of numbers.
Positional arguments:
number - number to print.
size - size of the shape.
Keyword arguments:
fg - foreground character.
bg - background character.
"""
_validate_positive_params(number+1,size)
width = int(size+1)
height = int(size*2+1)
x = range(width)
y = range(height)
# https://en.wikipedia.org/wiki/Seven-segment_display
l = [
f"y == {size*2} and x<={size}", # A
f"x == {size} and y>{size} and y<={size*2}", # B
f"x == {size} and y<={size}", # C
f"y == 0 and x<={size}", # D
f"x == 0 and y<={size}", # E
f"x == 0 and y>{size} and y<={size*2}", # F
f"y == {size} and x<={size}", # G
]
numbers = [
{l[0],l[1],l[2],l[3],l[4],l[5] }, # 0
{ l[1],l[2] }, # 1
{l[0],l[1], l[3],l[4], l[6]}, # 2
{l[0],l[1],l[2],l[3], l[6]}, # 3
{ l[1],l[2], l[5],l[6]}, # 4
{l[0], l[2],l[3], l[5],l[6]}, # 5
{l[0], l[2],l[3],l[4],l[5],l[6]}, # 6
{l[0],l[1],l[2] }, # 7
{l[0],l[1],l[2],l[3],l[4],l[5],l[6]}, # 8
{l[0],l[1],l[2],l[3], l[5],l[6]}, # 9
]
res = ""
for digit in str(number):
feqs = numbers[int(digit)]
s_digit = _make_shape(x, y, feqs, [], fg=fg, bg=bg)
if res:
new_res = ""
for i,j in zip(res.split("\n"),s_digit.split("\n")):
if i and j:
new_res += i+" "+j+'\n'
res=new_res
else:
res = s_digit
return res
|
1cc992796f7118cbc0b19938ece0f87ed146a0d2
| 3,649,924
|
import os
import sys
def config_from_env(key, config_schema=None):
"""Read config from a file path in os.env.
Args:
key (str) : Key represents an evironment variable to read config path
config_schema (trafaret): Trafaret object that defines the schema of the config.
If None, then trafaret validation is not used.
Example:
```
import trafaret as tr
config_schema = tr.Dict({
tr.Key('project_name'):
tr.Dict({
'db_path': tr.String(),
'username': tr.String(),
'password': tr.String(),
}),
})
```
Trafaret docs: http://trafaret.readthedocs.io/en/latest/
Return:
config json
"""
filepath = os.getenv(key, default=None)
if not filepath:
sys.stderr.write("Passed key does not exist: {0}".format(key))
raise AttributeError('Key {} does not exist in environment.'.format(key))
return config_from_path(filepath, config_schema)
|
cdeb2f32a1de803f2d2fb92451b9833e1718e5ec
| 3,649,925
|
def to_cmyk(r: int, g: int, b: int) -> _cmyk:
"""
Takes RGB values 0->255 and returns their values
in the CMYK namespace.
https://www.rapidtables.com/convert/color/rgb-to-cmyk.html
"""
r, g, b = to_float(r, g, b)
k = 1 - max(r, g, b)
c = (1 - r - k) / (1 - k)
m = (1 - g - k) / (1 - k)
y = (1 - b - k) / (1 - k)
return (c, m, y, k)
|
804f12c944ba0c0a740ca94c3e622b061db57dc5
| 3,649,926
|
def GetHostsInClusters(datacenter, clusterNames=[], connectionState=None):
"""
Return list of host objects from given cluster names.
@param datacenter: datacenter object
@type datacenter: Vim.Datacenter
@param clusterNames: cluster name list
@type clusterNames: string[]
@param connectionState: host connection state ("connected", "disconnected", "notResponding"), None means all states.
@typr connectionState: string
"""
if len(clusterNames) == 0:
clusterObjs = GetAllClusters(datacenter)
else:
clusterObjs = GetClusters(datacenter, clusterNames)
hostObjs = []
if connectionState == None:
hostObjs = [h for cl in clusterObjs for h in cl.host]
else:
hostObjs = [h for cl in clusterObjs for h in cl.host if h.runtime.connectionState == connectionState]
return hostObjs
|
c9722212e239eaec930da34dac2b5c82d45178fe
| 3,649,927
|
def get_gfa_targets(tiles, gfafile, faintlim=99, gaiadr="dr2"):
"""Returns a list of tables of GFA targets on each tile
Args:
tiles: table with columns TILEID, RA, DEC; or Tiles object
targets: table of targets with columsn RA, DEC
gaiadr: string, must be either "dr2" or "edr3" (default to "dr2")
MAY NOT BE FULLY IMPLEMENTED
Returns:
list of tables (one row per input tile) with the subset of targets
that are covered by GFAs on each tile. Each table has additional
`GFA_LOC` column indicating 0-9 which GFA was covered.
Note that a given target could be covered by GFAs on more than one tile.
Output is a list of astropy Tables; inputs can be numpy structured arrays
or astropy Tables
"""
log = Logger.get()
tm = Timer()
tm.start()
# Convert tiles to vanilla numpy array if needed
if isinstance(tiles, Tiles):
tx = np.zeros(len(tiles.ra),
dtype=[("RA", "f8"), ("DEC", "f8"), ("TILEID", "i4")])
tx["RA"] = tiles.ra
tx["DEC"] = tiles.dec
tx["TILEID"] = tiles.id
tiles = tx
# Load potential GFA targets and GFA locations
targets = fitsio.read(gfafile)
gfa = desimodel.focalplane.gfa.GFALocations(scale=2)
# Pre-filter what GFA targets cover what tiles with some buffer.
# find_points_in_tiles returns a list of lists;
# convert to dictionary of lists keyed by tileid
log.info("Finding overlap of {} GFA targets on {} tiles".format(
len(targets), len(tiles)))
gfa_tile_indices = dict()
ii = desimodel.footprint.find_points_in_tiles(
tiles, targets["RA"], targets["DEC"], radius=1.8)
for i, tileid in enumerate(tiles["TILEID"]):
gfa_tile_indices[tileid] = ii[i]
gfa_targets = list()
log.info("Generating GFA targets tables")
for telra, teldec, tileid in zip(tiles["RA"], tiles["DEC"],
tiles["TILEID"]):
tmp = gfa.targets_on_gfa(telra, teldec,
targets[gfa_tile_indices[tileid]])
t = Table(tmp)
# Rename some columns for downstream clarity and consistency
for oldname, newname in [
("TYPE", "MORPHTYPE"),
("RA", "TARGET_RA"),
("DEC", "TARGET_DEC"),
("RA_IVAR", "TARGET_RA_IVAR"),
("DEC_IVAR", "TARGET_DEC_IVAR")]:
if oldname in t.colnames:
t.rename_column(oldname, newname)
# Select which targets are good for ETC / GUIDE / FOCUS
# 0 == good
flag = np.zeros(len(t), dtype="i2")
#- Not PSF-like
isPSF = (t["MORPHTYPE"] == "PSF ") | (t["MORPHTYPE"] == "GPSF") | (t["MORPHTYPE"] == "PSF")
flag[~isPSF] |= 2**0
#- Not Isolated
if len(tmp) > 1:
notIsolated = ~isolated(tmp['RA'], tmp['DEC'])
flag[notIsolated] |= 2**1
#- Questionable astrometry / proper motion
tych = (0 < t['REF_ID'])
tych &= ( t['REF_ID'] < 1e10)
flag[tych] |= 2**2
#- Too faint
faint = t['GAIA_PHOT_G_MEAN_MAG'] > faintlim
flag[faint] |= 2**3
# AR not passing the Gaia AEN criterion (PM correction done for AEN targets only)
g = t["GAIA_PHOT_G_MEAN_MAG"]
aen = t["GAIA_ASTROMETRIC_EXCESS_NOISE"]
isaen = np.logical_or(
(g <= 19.0) * (aen < 10.0 ** 0.5),
(g >= 19.0) * (aen < 10.0 ** (0.5 + 0.2 * (g - 19.0))),
)
flag[~isaen] |= 2**4
if len(flag)-np.count_nonzero(flag) == 0:
log.error("ERROR: no good GFA targets for "
"ETC/GUIDE/FOCUS on tile {}".format(tileid))
t["ETC_FLAG"] = flag
t["GUIDE_FLAG"] = flag
t["FOCUS_FLAG"] = flag
# patch in Gaia-based synthetic r flux for use by ETC
t["FLUX_R"] = gaia_synth_r_flux(t, gaiadr=gaiadr)
gfa_targets.append(t)
tm.stop()
tm.report(" Identifying GFA targets")
return gfa_targets
|
aa8c5a42babca87d26ad93538035734db54574f8
| 3,649,928
|
from sympy import hyper, meijerg
from re import S
def meijerint_indefinite(f, x):
"""
Compute an indefinite integral of ``f`` by rewriting it as a G function.
Examples
========
>>> from sympy.integrals.meijerint import meijerint_indefinite
>>> from sympy import sin
>>> from sympy.abc import x
>>> meijerint_indefinite(sin(x), x)
-cos(x)
"""
results = []
for a in sorted(_find_splitting_points(f, x) | {S(0)}, key=default_sort_key):
res = _meijerint_indefinite_1(f.subs(x, x + a), x)
if not res:
continue
res = res.subs(x, x - a)
if _has(res, hyper, meijerg):
results.append(res)
else:
return res
if f.has(HyperbolicFunction):
_debug('Try rewriting hyperbolics in terms of exp.')
rv = meijerint_indefinite(
_rewrite_hyperbolics_as_exp(f), x)
if rv:
if not type(rv) is list:
return collect(factor_terms(rv), rv.atoms(exp))
results.extend(rv)
if results:
return next(ordered(results))
|
5f4669476aee46ecf359c8d0de59acbe0762cb3d
| 3,649,929
|
from typing import Tuple
from typing import Union
def fit_size(
img: IMG,
size: Tuple[int, int],
mode: FitSizeMode = FitSizeMode.INCLUDE,
direction: FitSizeDir = FitSizeDir.CENTER,
bg_color: Union[str, float, Tuple[float, ...]] = (255, 255, 255, 0),
) -> IMG:
"""
调整图片到指定的大小,超出部分裁剪,不足部分设为指定颜色
:params
* ``img``: 待调整的图片
* ``size``: 期望图片大小
* ``mode``: FitSizeMode.INSIDE 表示图片必须在指定的大小范围内,不足部分设为指定颜色;FitSizeMode.INCLUDE 表示图片必须包括指定的大小范围,超出部分裁剪
* ``direction``: 调整图片大小时图片的方位;默认为居中 FitSizeDir.CENTER
* ``bg_color``: FitSizeMode.INSIDE 时的背景颜色
"""
return cut_size(limit_size(img, size, mode), size, direction, bg_color)
|
6cc33cb8c3fff4edec3bf15978f8cedc056a5e0c
| 3,649,930
|
import os
import csv
import sys
def load_iot_config(ini: dict):
"""
"""
if not verify_params(ini, 'iot', ['file', 'filedir']):
return {}
# Set file directory
iot_params = ini['iot']
filedir = iot_params['filedir']
filedir = filedir.strip(" ")
if filedir == "":
# HOME directory set
filedir = os.environ['HOME']
logger.info("IoT configuration dir: '{}'".format(filedir))
iot_config = iot_params['file']
filepath = os.path.join(filedir, iot_config)
logger.info("IoT config filepath: '{}'".format(filepath))
if not os.path.exists(filepath):
logger.error("IoT config file '{}' not found".format(filepath))
return {}
# Load configuration data
topics = dict()
fieldnames = ["topic", "where", "h", "x", "y", "unit", "notes"]
try:
csvfd = open(filepath, "r", newline='')
reader = csv.DictReader(csvfd, fieldnames=fieldnames,
delimiter=";")
for row in reader:
topic = row['topic'].strip(" ")
if topic == 'topic':
logger.debug("Skipping header")
continue
if topic[0] == "#":
logger.warning("Line {} commented out".format(reader.line_num))
continue
if topic in topics.keys():
logger.warning("Topic '{}' at line {} skipped beacause duplicated ".format(topic, reader.line_num))
continue
# Remove 'topic' key and assign remaining to topics dict
row.pop('topic')
topics[topic] = row
logger.debug("Topic '{}' added: {}".format(topic, topics[topic]))
except:
logger.error("Loading IoT topics failed")
logger.error("Reason: {}".format(sys.exc_info()))
return {}
return topics
|
15121a76fc54b57c296fc7fb13c88911b9df15c4
| 3,649,931
|
import winreg
def supports_colour():
"""
Return True if the running system's terminal supports colour,
and False otherwise.
Adapted from https://github.com/django/django/blob/master/django/core/management/color.py
"""
def vt_codes_enabled_in_windows_registry():
"""
Check the Windows Registry to see if VT code handling has been enabled
by default, see https://superuser.com/a/1300251/447564.
"""
try:
# winreg is only available on Windows.
except ImportError:
return False
else:
reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Console')
try:
reg_key_value, _ = winreg.QueryValueEx(
reg_key, 'VirtualTerminalLevel')
except FileNotFoundError:
return False
else:
return reg_key_value == 1
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
return is_a_tty and (
sys.platform != 'win32' or
HAS_COLORAMA or
'ANSICON' in os.environ or
# Windows Terminal supports VT codes.
'WT_SESSION' in os.environ or
# Microsoft Visual Studio Code's built-in terminal supports colors.
os.environ.get('TERM_PROGRAM') == 'vscode' or
vt_codes_enabled_in_windows_registry()
)
|
d567b7818c314d345a30f10dffd99c7a3b411c3e
| 3,649,932
|
def next_code(value: int, mul: int = 252533, div: int = 33554393) -> int:
"""
Returns the value of the next code given the value of the current code
The first code is `20151125`.
After that, each code is generated by taking the previous one, multiplying it by `252533`,
and then keeping the remainder from dividing that value by `33554393`
"""
return (value * mul) % div
|
a9e5183e405574cc56a138a244f14de08ea68d00
| 3,649,933
|
import os
import subprocess
import shlex
import sys
def main() -> int:
"""Runs a program specified by command-line arguments."""
args = argument_parser().parse_args()
if not args.command or args.command[0] != '--':
return 1
env = os.environ.copy()
# Command starts after the "--".
command = args.command[1:]
if args.args_file is not None:
empty = True
for line in args.args_file:
empty = False
command.append(line.strip())
if args.skip_empty_args and empty:
return 0
if args.env_file is not None:
for line in args.env_file:
apply_env_var(line, env)
# Apply command-line overrides at a higher priority than the env file.
for string in args.env:
apply_env_var(string, env)
if args.capture_output:
output_args = {'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT}
else:
output_args = {}
process = subprocess.run(command, env=env, **output_args) # type: ignore
if process.returncode != 0 and args.capture_output:
_LOG.error('')
_LOG.error('Command failed with exit code %d in GN build.',
process.returncode)
_LOG.error('')
_LOG.error('Build target:')
_LOG.error('')
_LOG.error(' %s', args.target)
_LOG.error('')
_LOG.error('Full command:')
_LOG.error('')
_LOG.error(' %s', ' '.join(shlex.quote(arg) for arg in command))
_LOG.error('')
_LOG.error('Process output:')
print(flush=True)
sys.stdout.buffer.write(process.stdout)
print(flush=True)
_LOG.error('')
return process.returncode
|
62b01ad68221afb0e272c63a1747c3ef1bf02034
| 3,649,934
|
def read_csv_to_lol(full_path, sep=";"):
"""
Read csv file into lists of list.
Make sure to have a empty line at the bottom
"""
with open(full_path, 'r') as ff:
# read from CSV
data = ff.readlines()
# New line at the end of each line is removed
data = [i.replace("\n", "") for i in data]
# Creating lists of list
data = [i.split(sep) for i in data]
return data
|
e53c46c6a8eabaece788111530fbf859dd23133f
| 3,649,935
|
def read_experiment(path):
"""
Discovers CSV files an experiment produced and construct columns
for the experiment's conditions from the sub-directory structure.
Args:
path: path to the experiment's results.
Returns:
pd.DataFrame
"""
objects = list(path.rglob('*.csv'))
data = []
path_split = _recursive_split(path)
for obj in objects:
obj_path_split = _recursive_split(obj)
if len(obj_path_split) - len(path_split) > 7:
raise Exception("Path depth too long! Provide path to actual experiment or one of its sub-directories.")
data.append(obj_path_split)
df = pd.DataFrame(data=data)
columns = ["experiment", "imputer", "task", "missing_type", "missing_fraction", "strategy", "file_or_dir", "detail_file"]
auto_columns = []
for i in range(df.shape[1] - len(columns)):
auto_columns.append(f"col{i}")
df.columns = auto_columns + columns
df.drop(auto_columns, axis=1, inplace=True)
df["path"] = objects
df["detail_file"] = df["detail_file"].fillna("")
return df.reset_index(drop=True)
|
e9797fb71a0e9ba89e211fd0d079d5040e3a4639
| 3,649,936
|
from bs4 import BeautifulSoup
def single_keyword_search(keyword):
"""
구글에 keyword 검색결과를 html로 받아온뒤에 그 안에 일반 게시물 분류에 속하는
class='r' 부분만 모아서 return해주는 함수 입니다.
Args:
Keyword (String) : 구글에 검색할 Keyword
Returns:
title_list (bs4.element.ResultSet) : 구글 검색에서 확인된 일반게시물(class='r')들의 모음
"""
URL = 'https://www.google.com/search?q=' +keyword_preprocessing(keyword)
driver = webdriver.Chrome("C:/Users/ksg/py_tutorial/chromedriver.exe")
driver.implicitly_wait(1)
driver.get(URL)
driver.implicitly_wait(2)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
title_list = soup.find_all(name='div',attrs={'class':'r'})
return title_list
|
45909878da1c135c4dc6b1209c98ffc5e7e21b29
| 3,649,937
|
def partial_list(ys, xs, specified_shapes=None):
"""
Args:
ys: A list of tensors. Each tensor will be differentiated with the partial_nd
xs: A Tensor to be used for differentiation, or a list of tensors to be used for differentiation with the smae length as ys
specified_shapes: A list of specified dynamical shapes of ys. The first element of each shape is dummy as None or -1.
"""
assert (len(ys) > 0) and (len(xs) > 0), "The length of ys is 0"
if specified_shapes is None:
if len(xs) == 1:
return [partial_nd(y, xs) for y in ys]
else:
return [partial_nd(y, x) for (y,x) in zip(ys,xs)]
else:
if len(xs) == 1:
return [partial_nd(y, xs, specified_shape) for (y, specified_shape) in (ys, specified_shapes)]
else:
return [partial_nd(y, x, specified_shape) for (y,x,specified_shape) in zip(ys,xs,specified_shapes)]
|
24b0d2583f21cd4497e1c38d79643e44eaab693e
| 3,649,938
|
def _GuessBrowserName(bisect_bot):
"""Returns a browser name string for Telemetry to use."""
default = 'release'
browser_map = namespaced_stored_object.Get(_BOT_BROWSER_MAP_KEY)
if not browser_map:
return default
for bot_name_prefix, browser_name in browser_map:
if bisect_bot.startswith(bot_name_prefix):
return browser_name
return default
|
b6b0fedd238aff07bfa46c61e9d792087b647a13
| 3,649,939
|
from re import T
import functools
import collections
def compile_train_function(network, batch_size, learning_rate):
"""Compiles the training function.
Args:
network: The network instance.
batch_size: The training batch size.
learning_rate: The learning rate.
Returns:
The update function that takes a batch of images and targets and updates the
network weights.
"""
learning_rate = np.float32(learning_rate)
input_var = network.input_layers[0].input_var
target_var = T.ftensor4()
# Loss function
loss_fn = functools.partial(
losses.bootstrapped_xentropy,
targets=target_var,
batch_size=batch_size,
multiplier=BOOTSTRAP_MULTIPLIER
)
# Update function
lr = theano.shared(learning_rate)
update_fn = functools.partial(lasagne.updates.adam, learning_rate=lr)
pylogging.info("Compile SGD updates")
gd_step = hybrid_training.compile_gd_step(
network, loss_fn, [input_var, target_var], update_fn)
reduce_lr = theano.function(
inputs=[],
updates=collections.OrderedDict([
(lr, T.maximum(np.float32(5e-5), lr / np.float32(1.25)))
])
)
def _compute_update(imgs, targets, update_counter):
if (update_counter + 1) % REDUCE_LR_INTERVAL == 0:
reduce_lr()
loss = gd_step(imgs, targets)
return loss
return _compute_update
|
57778a2428d4348f6594d04ec35bc821a4fd8122
| 3,649,940
|
import json
import sys
import os
def json_loads(data):
"""Load json data, allowing - to represent stdin."""
if data is None:
return ""
if data == "-":
return json.load(sys.stdin)
elif os.path.exists(data):
with open(data, 'r') as handle:
return json.load(handle)
else:
return json.loads(data)
|
f5bdad826578108adccc32ca93ecd474954bfb7d
| 3,649,941
|
def filter_values(freq, values, nthOct: int = 3):
"""
Filters the given values into nthOct bands.
Parameters
----------
freq : ndarray
Array containing the frequency axis.
values : ndarray
Array containing the magnitude values to be filtered.
nthOct : int, optional
Fractional octave bands that the absorption will be filtered to.
Returns
-------
bands : ndarray
An array containing the center frequencies of the available bands.
result : ndarray
An array containing the filtered values in the available bands.
"""
bands = fractional_octave_frequencies(nthOct=nthOct) # [band_min, band_center, band_max]
bands = bands[np.argwhere((bands[:, 1] >= min(freq)) & (bands[:, 1] <= max(freq)))[:, 0]]
idx = np.array([np.argwhere((freq >= bands[a, 0]) & (freq <= bands[a, 2])) for a in
np.arange(0, len(bands))], dtype=object)
result = np.array([np.sum(values[idx[a]]) / len(idx[a]) for a in np.arange(0, len(bands))], dtype=object)
result = np.nan_to_num(result)
return bands[:, 1], result.astype(values.dtype)
|
2a1b270049f1c2869fa03d7bc2a4f64658646b7a
| 3,649,942
|
def create_project(request):
"""View to create new project"""
user = request.user
if user.is_annotator:
error = ErrorMessage(header="Access denied", message="Only admin and managers can create projects")
return render(request, 'error.html', {'error':error})
if request.method == "POST":
form = ProjectCreateForm(request.POST)
if form.is_valid():
project = form.save(commit=False)
project.manager = user
project.save()
return redirect('projects_list')
else:
form = ProjectCreateForm()
return render(request, 'projects/create.html', {'form': form})
|
34d6def496c9ddac99710425a9550be7fa8eba58
| 3,649,943
|
from typing import Optional
import pathlib
def limit(
observed_CLs: np.ndarray,
expected_CLs: np.ndarray,
poi_values: np.ndarray,
figure_path: Optional[pathlib.Path] = None,
close_figure: bool = False,
) -> mpl.figure.Figure:
"""Draws observed and expected CLs values as function of the parameter of interest.
Args:
observed_CLs (np.ndarray): observed CLs values
expected_CLs (np.ndarray): expected CLs values, including 1 and 2 sigma bands
poi_values (np.ndarray): parameter of interest values used in scan
figure_path (Optional[pathlib.Path], optional): path where figure should be
saved, or None to not save it, defaults to None
close_figure (bool, optional): whether to close each figure immediately after
saving it, defaults to False (enable when producing many figures to avoid
memory issues, prevents rendering in notebooks)
Returns:
matplotlib.figure.Figure: the CLs figure
"""
fig, ax = plt.subplots()
xmin = min(poi_values)
xmax = max(poi_values)
# line through CLs = 0.05
ax.hlines(
0.05,
xmin=xmin,
xmax=xmax,
linestyle="dashdot",
color="red",
label=r"CL$_S$ = 5%",
)
# 1 and 2 sigma bands
ax.fill_between(
poi_values,
expected_CLs[:, 0],
expected_CLs[:, 4],
color="yellow",
label=r"expected CL$_S$ $\pm 2\sigma$",
)
ax.fill_between(
poi_values,
expected_CLs[:, 1],
expected_CLs[:, 3],
color="limegreen",
label=r"expected CL$_S$ $\pm 1\sigma$",
)
# expected CLs
ax.plot(
poi_values,
expected_CLs[:, 2],
"--",
color="black",
label=r"expected CL$_S$",
)
# observed CLs values
ax.plot(poi_values, observed_CLs, "o-", color="black", label=r"observed CL$_S$")
# increase font sizes
for item in (
[ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()
):
item.set_fontsize("large")
# minor ticks
for axis in [ax.xaxis, ax.yaxis]:
axis.set_minor_locator(mpl.ticker.AutoMinorLocator())
ax.legend(frameon=False, fontsize="large")
ax.set_xlabel(r"$\mu$")
ax.set_ylabel(r"$\mathrm{CL}_{s}$")
ax.set_xlim([xmin, xmax])
ax.set_ylim([0, 1])
ax.tick_params(axis="both", which="major", pad=8)
ax.tick_params(direction="in", top=True, right=True, which="both")
fig.tight_layout()
utils._save_and_close(fig, figure_path, close_figure)
return fig
|
85bf753844083dcfbea8273cabe6cf7c0513c6d9
| 3,649,944
|
def get_genlu_code(cursor, label):
"""Find or create the code for this label."""
if label not in GENLU_CODES:
cursor.execute("SELECT max(id) from general_landuse")
row = cursor.fetchone()
newval = 0 if row[0] is None else row[0] + 1
LOG.debug("Inserting new general landuse code: %s [%s]", newval, label)
cursor.execute(
"INSERT into general_landuse(id, label) values (%s, %s)",
(newval, label),
)
GENLU_CODES[label] = newval
return GENLU_CODES[label]
|
a5bcfef36601a87648c21c93386b58afd7563fd7
| 3,649,945
|
def make_predictions(clf_object,predictors_str,data_source):
"""make_predictions comes up with predictions
from given input data
Input:
clf_object
object
constructed classification model
predictors_str
nd str array
string array containing names
of predictors
data_source
ndarray
source of data
either from valid
or test
Output:
preds
ndarray
prediction classes based on
given input data
"""
preds = clf_object.predict(data_source[predictors_str])
return preds
|
ed5f29e65ddf3d7f7081b89e6f747925de944567
| 3,649,946
|
def no_span_nodes(tree, debug=False, root_id=None):
"""Return True, iff there is no span node in the given ParentedTree."""
assert isinstance(tree, ParentedTree)
if root_id is None:
root_id = tree.root_id
span_label = debug_root_label('span', debug=debug, root_id=root_id)
if tree.label() == span_label:
return False
for node in tree:
if isinstance(node, ParentedTree) :
if node.label() == span_label:
return False
subtree_is_okay = no_span_nodes(node, debug=debug, root_id=root_id)
if not subtree_is_okay:
return False
return True
|
0baf0b47071b1f7faaaa5cd08bffbdc38d534afe
| 3,649,947
|
def get_token_annualized(address, days):
"""Return annualized returns for a specific token.
Args:
days [int]: Days ago for which to display annualized returns.
address [str]: Ethereum token address.
Return:
dict: Annualized returns for a specified token.
key [str]: Days annualized.
value [str]: Annualized returns.
"""
url = f"{config.URLS['annualized_returns']}/{address}"
response = api_call(url, params={'daysBack': days, 'key': POOLS_KEY})
return response
|
734b64fdce65d069eebd5fd62270b24fd2d27100
| 3,649,948
|
import random
import string
def generate_random_string(N):
"""
Generate a random string
Parameters
-------------
N
length of the string
Returns
-------------
random_string
Random string
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
|
3e2e672140e18546260a0882fa6cf06073bdf8e7
| 3,649,949
|
import re
def extract_charm_name_from_url(charm_url):
"""Extract the charm name from the charm url.
E.g. Extract 'heat' from local:bionic/heat-12
:param charm_url: Name of model to query.
:type charm_url: str
:returns: Charm name
:rtype: str
"""
charm_name = re.sub(r'-[0-9]+$', '', charm_url.split('/')[-1])
return charm_name.split(':')[-1]
|
9905d6b5c7a2f5047bc939d1b6e23d128ee8984d
| 3,649,950
|
def class_name(service_name: str) -> str:
"""Map service name to .pyi class name."""
return f"Service_{service_name}"
|
b4bed8a677f9eedfcd66d6d37078075b0967ea20
| 3,649,951
|
def interesting_columns(df):
"""Returns non-constant column names of a dataframe."""
return sorted(set(df.columns) - set(constant_columns(df)))
|
84e548e806bcfd9031d620d3c02f942f60fa53cc
| 3,649,952
|
def liberty_str(s):
"""
>>> liberty_str("hello")
'"hello"'
>>> liberty_str('he"llo')
Traceback (most recent call last):
...
ValueError: '"' is not allow in the string: 'he"llo'
>>> liberty_str(1.0)
'"1.0000000000"'
>>> liberty_str(1)
'"1.0000000000"'
>>> liberty_str([])
Traceback (most recent call last):
...
ValueError: [] is not a string
>>> liberty_str(True)
Traceback (most recent call last):
...
ValueError: True is not a string
"""
try:
if isinstance(s, (int, float)):
s = liberty_float(s)
except ValueError:
pass
if not isinstance(s, str):
raise ValueError("%r is not a string" % s)
if '"' in s:
raise ValueError("'\"' is not allow in the string: %r" % s)
return '"'+s+'"'
|
2bc56be42a062668f94c9cc88baa94f5f73feaa3
| 3,649,953
|
import warnings
def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
"""
Return True if input array is a valid distance matrix.
Distance matrices must be 2-dimensional numpy arrays.
They must have a zero-diagonal, and they must be symmetric.
Parameters
----------
D : ndarray
The candidate object to test for validity.
tol : float, optional
The distance matrix should be symmetric. `tol` is the maximum
difference between entries ``ij`` and ``ji`` for the distance
metric to be considered symmetric.
throw : bool, optional
An exception is thrown if the distance matrix passed is not valid.
name : str, optional
The name of the variable to checked. This is useful if
throw is set to True so the offending variable can be identified
in the exception message when an exception is thrown.
warning : bool, optional
Instead of throwing an exception, a warning message is
raised.
Returns
-------
valid : bool
True if the variable `D` passed is a valid distance matrix.
Notes
-----
Small numerical differences in `D` and `D.T` and non-zeroness of
the diagonal are ignored if they are within the tolerance specified
by `tol`.
"""
D = np.asarray(D, order='c')
valid = True
try:
s = D.shape
if len(D.shape) != 2:
if name:
raise ValueError(('Distance matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Distance matrix must have shape=2 (i.e. '
'be two-dimensional).')
if tol == 0.0:
if not (D == D.T).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric.') % name)
else:
raise ValueError('Distance matrix must be symmetric.')
if not (D[range(0, s[0]), range(0, s[0])] == 0).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must '
'be zero.') % name)
else:
raise ValueError('Distance matrix diagonal must be zero.')
else:
if not (D - D.T <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError('Distance matrix must be symmetric within'
' tolerance %5.5f.' % tol)
if not (D[range(0, s[0]), range(0, s[0])] <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% tol)
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
|
e21163a5d68fa5cf1c2e0bbed34276c7e5a6b851
| 3,649,954
|
from types import SimpleNamespace
import scipy
def solve_gradwavefront(data, excludeself=False, predict_at=None,
fix_covar=False, **kw):
"""Find turbulent contributions to measured fiber positions.
Assumes that the turbulent contributions can be modeled as the
gradient of a wavefront error. i.e., they are curl free.
Args:
data : ndarray containing measured positions and residuals
from expected locations
excludeself : bool
do not use this fiber when computing the turbulence
affecting this fiber.
**kw : additional keywords passed to solve_covar
Returns:
xturb : turbulent contributions in x direction
yturb : turbulent contributions in y direction
res : output from scipy.optimize.minimize describing best fit
covariance matrix
"""
if predict_at is not None and excludeself:
raise ValueError('predict_at does not make sense in combination with '
'excludeself')
if not fix_covar:
covar, res = solve_covar(data, lossfun=loss_gradwavefront,
covarfun=make_covar_gradwavefront, **kw)
else:
res = SimpleNamespace()
res.x = [5e-3, 5e-3, 100]
if kw.get('rq', False):
res.x = res.x + [2]
covar = make_covar_gradwavefront(data, res.x, **kw)
dvec = np.concatenate([data['dx'], data['dy']])
if not excludeself:
if predict_at:
# K(X*, X)(K(X, X) + C_n)^-1 y
# Rasmussen & Williams algorithm 2.1
chol, low = scipy.linalg.cho_factor(covar, check_finite=False,
overwrite_a=True)
covarpred = make_covar_gradwavefront_nonoise(
data['x'], data['y'], predict_at[0], predict_at[1],
res.x, **kw)
alpha = scipy.linalg.cho_solve((chol, low), dvec)
turb = np.dot(covarpred, alpha)
xturb, yturb = turb[:len(predict_at[0])], turb[len(predict_at[0]):]
else:
# remove measurement noise contribution to covar
cninv = np.eye(len(dvec))*res.x[0]**(-2)
covar -= np.eye(len(dvec))*res.x[0]**2
cpcninv = np.dot(covar, cninv)
aa = cpcninv+np.eye(len(dvec))
turb = np.linalg.solve(aa, np.dot(cpcninv, dvec))
xturb, yturb = turb[:len(data)], turb[len(data):]
else:
# Rasmussen & Williams 5.12
kinv = np.linalg.inv(covar)
turb = dvec - kinv.dot(dvec)/np.diag(kinv)
xturb, yturb = turb[:len(data)], turb[len(data):]
return xturb, yturb, res
|
94733f5cf073aa44752fa395fe24f66fc9524049
| 3,649,955
|
from typing import Any
def str_to_py(value: str):
"""Convert an string value to a native python type."""
rv: Any
if is_boolean_state(value):
rv = get_boolean(value)
elif is_integer(value):
rv = get_integer(value)
elif is_float(value):
rv = get_float(value)
else:
rv = value
return rv
|
b0a5d0fbe573be6d9d961ac5c3c1895fd82539eb
| 3,649,956
|
from typing import Dict
from typing import Any
from typing import Tuple
import json
import requests
def action(request: Dict[str, Any]) -> Tuple[str, int]:
"""Triggered from Slack action via an HTTPS endpoint.
Args:
request (dict): Request payload.
"""
if request.method != 'POST':
return 'Only POST requests are accepted', 405
print('Triggered Slack action.')
form = json.loads(request.form.get('payload', ''))
_verify_web_hook(form)
response_url = form.get('response_url')
if not response_url:
return 'No response URL!', 405
action_to_perform = form.get('actions')[0].get('value')
in_office = action_to_perform == 'response_yes'
_set_information(in_office)
today = _now().strftime('%Y-%m-%d')
status_to_response = {
True: f'{DOG_NAME} will be in the office today ({today}). :dog:',
False: f'{DOG_NAME} will not be in the office today ({today}). :no_entry_sign:',
}
response_text = f'Thanks for the response! I noted that {status_to_response[in_office]}'
response = _format_slack_message(response_text)
print(f'Replying with "{response_text}".')
response_obj = requests.post(
response_url,
data=json.dumps(response),
headers={'Content-Type': 'application/json'}
)
print(f'Slack POST request status code: "{response_obj.status_code}".')
return '', 200
|
fdf24beee3e5dc929f575883114987419967b2e9
| 3,649,957
|
def inmemory():
"""Returns an xodb database backed by an in-memory xapian
database. Does not support spelling correction.
"""
return open(xapian.inmemory_open(), spelling=False, inmem=True)
|
ed67dd9bd7d70c5aab33c963dfed4e2103f5cfd1
| 3,649,958
|
def all(iterable: object) -> bool:
"""all."""
for element in iterable:
if not element:
return False
return True
|
130a93230538122f35e29a6ec4ad5fca0efd835b
| 3,649,959
|
def ind2slice(Is):
"""Convert boolean and integer index arrays to slices.
Integer and boolean arrays are converted to slices that span the selected elements, but may include additional
elements. If possible, the slices are stepped.
Arguments
---------
Is : tuple
tuple of indices (slice, integer array, boolean array, or single integer)
Returns
-------
Js : tuple
tuple of slices
"""
if isinstance(Is, tuple):
return tuple(_ind2slice(I) for I in Is)
else:
return _ind2slice(Is)
|
6be6a82750f9f73b2008c528ff192b07b8e0a784
| 3,649,960
|
def cleanup_mediawiki(text):
"""Modify mediawiki markup to make it pandoc ready.
Long term this needs to be highly configurable on a site-by-site
basis, but for now I'll put local hacks here.
Returns tuple: cleaned up text, list of any categories
"""
# This tag was probably setup via SyntaxHighlight GeSHi for biopython.org's wiki
#
# <python>
# import antigravity
# </python>
#
# Replacing it with the following makes pandoc happy,
#
# <source lang=python>
# import antigravity
# </source>
#
# Conversion by pandoc to GitHub Flavour Markdown gives:
#
# ``` python
# import antigravity
# ```
#
# Which is much nicer.
#
# =================================================
#
# I may have been misled by old links, but right now I don't
# think there is an easy way to get a table-of-contents with
# (GitHub Flavoured) Markdown which works on GitHub pages.
#
# Meanwhile the MediaWiki __TOC__ etc get left in the .md
# so I'm just going to remove them here.
#
new = []
categories = []
languages = [
"python", "perl", "sql", "bash", "ruby", "java", "xml", "haskell"
]
for line in text.split("\n"):
# line is already unicode
line = line.replace(b"\xe2\x80\x8e".decode("utf-8"),
"") # LEFT-TO-RIGHT
# TODO - Would benefit from state tracking (for tag mismatches)
for lang in languages:
# Easy case <python> etc
if line.lower().startswith("<%s>" % lang):
line = (("<source lang=%s\n" % lang) +
line[len(lang) + 2:]).strip()
# Also cope with <python id=example> etc:
elif line.startswith("<%s " % lang) and ">" in line:
line = (("<source lang=%s " % lang) +
line[len(lang) + 2:]).strip()
# Want to support <python>print("Hello world")</python>
# where open and closing tags are on the same line:
if line.rstrip() == "</%s>" % lang:
line = "</source>"
elif line.rstrip().endswith("</%s>" % lang):
line = line.replace("</%s>" % lang, "\n</source>")
undiv = un_div(line)
if undiv in ["__TOC__", "__FORCETOC__", "__NOTOC__"]:
continue
elif undiv.startswith("[[Image:") and undiv.endswith("]]"):
# Markdown image wrapped in a div does not render on Github Pages,
# remove the div and any attempt at styling it (e.g. alignment)
line = undiv
# Look for any category tag, usually done as a single line:
if "[[Category:" in line:
tag = line[line.index("[[Category:") + 11:]
tag = tag[:tag.index("]]")]
assert ("[[Category:%s]]" %
tag) in line, "Infered %r from %s" % (tag, line)
categories.append(tag)
line = line.replace("[[Category:%s]]" % tag, "").strip()
if not line:
continue
# Special case fix for any category links,
# See https://github.com/jgm/pandoc/issues/2849
if "[[:Category:" in line:
line = line.replace("[[:Category:", "[[Category%3A")
if "[[User:" in line:
line = line.replace("[[User:", "[[User%3A")
new.append(line)
return "\n".join(new), categories
|
c4f210d60e847740f4b36ed321cfd4f95b88b39f
| 3,649,961
|
import re
def select_devices(devices):
""" 选择设备 """
device_count = len(devices)
print("Device list:")
print("0) All devices")
for i, d in enumerate(devices, start=1):
print("%d) %s\t%s" % (i, d['serial'], d['model']))
print("q) Exit this operation")
selected = input("\nselect: ")
nums = None
if selected == '0':
nums = range(0, device_count)
elif selected == 'q':
print("Exit this operation")
exit(-1)
else:
nums = []
for i in re.split(r'[\s+,]', selected):
if i.isdigit():
seq = int(i) - 1
if 0 <= seq < device_count:
nums.append(seq)
continue
print("error input: %s, retry again\n" % i)
return select_devices(devices)
return nums
|
91c405c8a198deb01e8abecc592ac2286dc712fd
| 3,649,962
|
def Fill( h ):
"""fill every empty value in histogram with
previous value.
"""
new_h = []
x,v = h[0]
if type(v) == ListType or type(v) == TupleType:
l = len(v)
previous_v = [0] * l
else:
previous_v = 0
for x, v in h:
if type(v) == ListType or type(v) == TupleType:
for i in range(0,l):
if v[i] == 0:
v[i] = previous_v[i]
else:
previous_v[i] = v[i]
else:
if v == 0:
v = previous_v
else:
previous_v = v
new_h.append( (x, v) )
return new_h
|
429a4d723c38692e8ee0ebaea2fda1ec008cded6
| 3,649,963
|
from re import L
def pendulum(theta, S, mg, drag) -> ps.Composition:
"""Draw a free body animation of a pendulum.
params:
theta: the angle from the vertical at which the pendulum is.
S: the force exerted toward the pivot.
mg: the force owing to gravity.
drag: the force acting against the motion of the pendulum.
return: A composition of the pendulum
"""
a = theta
P = ps.Point(W / 2, 0.9 * H) # rotation point
path = ps.Arc(P, L, -ps.Angle(np.pi / 2), a)
mass_pt = path.end
rod = ps.Line(P, mass_pt)
theta = ps.AngularDimension(
r"$\theta$", P + ps.Point(0, -L / 4), P + (mass_pt - P).unit_vector * (L / 4), P
)
theta.extension_lines = False
mass = ps.Circle(mass_pt, L / 30.0).set_fill_color(ps.Style.Color.BLUE)
rod_vec = rod.end - rod.start
length = ps.LinearDimension("$L$", mass_pt, P)
# Displace length indication
length = length.translate(ps.Point(-np.cos(a), -np.sin(a)) * (L / 15.0))
length.style.line_width = 0.1
gravity_start = ps.Point(0.8 * L, 0)
gravity = ps.Gravity(P + gravity_start, L / 3)
dashed_thin_black_line = ps.Style()
dashed_thin_black_line.line_style = ps.Style.LineStyle.DASHED
dashed_thin_black_line.line_color = ps.Style.Color.BLACK
dashed_thin_black_line.line_width = 1.0
path.style = dashed_thin_black_line
vertical = ps.Line(rod.start, rod.start + ps.Point(0, -L))
vertical.style = dashed_thin_black_line
rod.style = dashed_thin_black_line
comp = ps.Composition(
{
"body": mass,
"rod": rod,
"vertical": vertical,
"theta": theta,
"path": path,
"g": gravity,
# "L": length,
}
)
magnitude = 1.2 * L / 6 # length of a unit force in figure
force = mg # constant (scaled eq: about 1)
force *= magnitude
mg_force = (
ps.Force(
"$mg$",
mass_pt,
mass_pt + ps.Point(0, 1) * force,
text_position=ps.TextPosition.END,
)
if force != 0
else None
)
force = S
force *= magnitude
rod_force = (
ps.Force(
"S",
mass_pt,
mass_pt - rod_vec.unit_vector * force,
text_position=ps.TextPosition.END,
)
if force != 0
else None
)
force = drag
force *= magnitude
air_force = (
ps.Force(
"",
mass_pt,
mass_pt - rod_vec.normal * force,
)
if force != 0
else None
)
x0y0 = ps.Text("$(x_0,y_0)$", P + ps.Point(-0.4, -0.1))
ir = ps.Force(
r"$\mathbf{i}_r$",
P,
P + rod_vec.unit_vector * (L / 10),
text_position=ps.TextPosition.END,
# spacing=ps.Point(0.015, 0)
)
ith = ps.Force(
r"$\mathbf{i}_{\theta}$",
P,
P + rod_vec.normal * (L / 10),
text_position=ps.TextPosition.END,
# spacing=ps.Point(0.02, 0.005)
)
body_diagram = ps.Composition(
{
"mg": mg_force,
"S": rod_force,
"air": air_force,
"ir": ir,
"ith": ith,
"origin": x0y0,
}
)
comp = comp.merge(body_diagram)
return comp
|
9dd7918b00bae82600d4bb064461f7b6e58e9fb2
| 3,649,964
|
def get_items():
"""Fetches items from `INITIAL_OFFSET` in batches of `PAGINATION_OFFSET` until there are no
more """
offset = INITIAL_OFFSET
items = []
while True:
batch = get_page_of_items(JSON_ENDPOINT.format(offset))
if not batch:
break
items.extend(batch)
offset += PAGINATION_OFFSET
return items
|
b0950ee8eb291cceb1f0871e918b5aff26b6c2ab
| 3,649,965
|
def tcombinations_with_replacement(iterable, r):
"""
>>> tcombinations_with_replacement('ABCD', 0)
((),)
>>> tcombinations_with_replacement('ABCD', 1)
(('A',), ('B',), ('C',), ('D',))
>>> tcombinations_with_replacement('ABCD', 2)
(('A', 'A'), ('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'B'), ('B', 'C'), ('B', 'D'), ('C', 'C'), ('C', 'D'), ('D', 'D'))
>>> tcombinations_with_replacement('ABCD', 3)
(('A', 'A', 'A'), ('A', 'A', 'B'), ('A', 'A', 'C'), ('A', 'A', 'D'), ('A', 'B', 'B'), ('A', 'B', 'C'), ('A', 'B', 'D'), ('A', 'C', 'C'), ('A', 'C', 'D'), ('A', 'D', 'D'), ('B', 'B', 'B'), ('B', 'B', 'C'), ('B', 'B', 'D'), ('B', 'C', 'C'), ('B', 'C', 'D'), ('B', 'D', 'D'), ('C', 'C', 'C'), ('C', 'C', 'D'), ('C', 'D', 'D'), ('D', 'D', 'D'))
>>> tcombinations_with_replacement('ABCD', 4)
(('A', 'A', 'A', 'A'), ('A', 'A', 'A', 'B'), ('A', 'A', 'A', 'C'), ('A', 'A', 'A', 'D'), ('A', 'A', 'B', 'B'), ('A', 'A', 'B', 'C'), ('A', 'A', 'B', 'D'), ('A', 'A', 'C', 'C'), ('A', 'A', 'C', 'D'), ('A', 'A', 'D', 'D'), ('A', 'B', 'B', 'B'), ('A', 'B', 'B', 'C'), ('A', 'B', 'B', 'D'), ('A', 'B', 'C', 'C'), ('A', 'B', 'C', 'D'), ('A', 'B', 'D', 'D'), ('A', 'C', 'C', 'C'), ('A', 'C', 'C', 'D'), ('A', 'C', 'D', 'D'), ('A', 'D', 'D', 'D'), ('B', 'B', 'B', 'B'), ('B', 'B', 'B', 'C'), ('B', 'B', 'B', 'D'), ('B', 'B', 'C', 'C'), ('B', 'B', 'C', 'D'), ('B', 'B', 'D', 'D'), ('B', 'C', 'C', 'C'), ('B', 'C', 'C', 'D'), ('B', 'C', 'D', 'D'), ('B', 'D', 'D', 'D'), ('C', 'C', 'C', 'C'), ('C', 'C', 'C', 'D'), ('C', 'C', 'D', 'D'), ('C', 'D', 'D', 'D'), ('D', 'D', 'D', 'D'))
>>> tcombinations_with_replacement('ABC', 4)
(('A', 'A', 'A', 'A'), ('A', 'A', 'A', 'B'), ('A', 'A', 'A', 'C'), ('A', 'A', 'B', 'B'), ('A', 'A', 'B', 'C'), ('A', 'A', 'C', 'C'), ('A', 'B', 'B', 'B'), ('A', 'B', 'B', 'C'), ('A', 'B', 'C', 'C'), ('A', 'C', 'C', 'C'), ('B', 'B', 'B', 'B'), ('B', 'B', 'B', 'C'), ('B', 'B', 'C', 'C'), ('B', 'C', 'C', 'C'), ('C', 'C', 'C', 'C'))
"""
return tuple(combinations_with_replacement(iterable, r))
|
432c826751bcfc1aa7bfef36cd25a198e6fe7b72
| 3,649,966
|
def discreteFiber(c, s, B=I3, ndiv=120, invert=False, csym=None, ssym=None):
"""
Generate symmetrically reduced discrete orientation fiber.
Parameters
----------
c : TYPE
DESCRIPTION.
s : TYPE
DESCRIPTION.
B : TYPE, optional
DESCRIPTION. The default is I3.
ndiv : TYPE, optional
DESCRIPTION. The default is 120.
invert : TYPE, optional
DESCRIPTION. The default is False.
csym : TYPE, optional
DESCRIPTION. The default is None.
ssym : TYPE, optional
DESCRIPTION. The default is None.
Raises
------
RuntimeError
DESCRIPTION.
Returns
-------
retval : TYPE
DESCRIPTION.
"""
ztol = cnst.sqrt_epsf
# arg handling for c
if hasattr(c, '__len__'):
if hasattr(c, 'shape'):
assert c.shape[0] == 3, \
'scattering vector must be 3-d; yours is %d-d' \
% (c.shape[0])
if len(c.shape) == 1:
c = c.reshape(3, 1)
elif len(c.shape) > 2:
raise RuntimeError(
'incorrect arg shape; must be 1-d or 2-d, yours is %d-d'
% (len(c.shape))
)
else:
# convert list input to array and transpose
if len(c) == 3 and isscalar(c[0]):
c = asarray(c).reshape(3, 1)
else:
c = asarray(c).T
else:
raise RuntimeError('input must be array-like')
# arg handling for s
if hasattr(s, '__len__'):
if hasattr(s, 'shape'):
assert s.shape[0] == 3, \
'scattering vector must be 3-d; yours is %d-d' \
% (s.shape[0])
if len(s.shape) == 1:
s = s.reshape(3, 1)
elif len(s.shape) > 2:
raise RuntimeError(
'incorrect arg shape; must be 1-d or 2-d, yours is %d-d'
% (len(s.shape)))
else:
# convert list input to array and transpose
if len(s) == 3 and isscalar(s[0]):
s = asarray(s).reshape(3, 1)
else:
s = asarray(s).T
else:
raise RuntimeError('input must be array-like')
nptc = c.shape[1]
npts = s.shape[1]
c = unitVector(dot(B, c)) # turn c hkls into unit vector in crys frame
s = unitVector(s) # convert s to unit vector in samp frame
retval = []
for i_c in range(nptc):
dupl_c = tile(c[:, i_c], (npts, 1)).T
ax = s + dupl_c
anrm = columnNorm(ax).squeeze() # should be 1-d
okay = anrm > ztol
nokay = okay.sum()
if nokay == npts:
ax = ax / tile(anrm, (3, 1))
else:
nspace = nullSpace(c[:, i_c].reshape(3, 1))
hperp = nspace[:, 0].reshape(3, 1)
if nokay == 0:
ax = tile(hperp, (1, npts))
else:
ax[:, okay] = ax[:, okay] / tile(anrm[okay], (3, 1))
ax[:, not okay] = tile(hperp, (1, npts - nokay))
q0 = vstack([zeros(npts), ax])
# find rotations
# note: the following line fixes bug with use of arange
# with float increments
phi = arange(0, ndiv) * (2*pi/float(ndiv))
qh = quatOfAngleAxis(phi, tile(c[:, i_c], (ndiv, 1)).T)
# the fibers, arraged as (npts, 4, ndiv)
qfib = dot(
quatProductMatrix(qh, mult='right'),
q0
).transpose(2, 1, 0)
if csym is not None:
retval.append(
toFundamentalRegion(
qfib.squeeze(),
crysSym=csym,
sampSym=ssym
)
)
else:
retval.append(fixQuat(qfib).squeeze())
return retval
|
fd6c19886b16f715f472d85fb48d6289a42cd105
| 3,649,967
|
import random
def random_organism(invalid_data):
"""
Generate Random Organism
return: string containing "organism" name from CanCOGeN vocabulary.
"""
return random.choice(covid19_vocab_dict.get('organism')), global_valid_data
|
9319d04ddf874c43d84489c2b20c52cee334a8c1
| 3,649,968
|
import matplotlib.pyplot as plt
def fate_bias(adata,
group,
basis='umap',
fate_bias_df=None,
figsize=(6, 4),
save_show_or_return='show',
save_kwargs={},
**cluster_maps_kwargs
):
"""Plot the lineage (fate) bias of cells states whose vector field trajectories are predicted.
This function internally calls `dyn.tl.fate_bias` to calculate fate bias dataframe. You can also visualize the data
frame via pandas stlying (https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html), for example:
>>> df = dyn.tl.fate_bias(adata)
>>> df.style.background_gradient(cmap='viridis')
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object that contains the predicted fate trajectories in the `uns` attribute.
group: `str`
The column key that corresponds to the cell type or other group information for quantifying the bias of cell
state.
basis: `str` or None (default: `None`)
The embedding data space that cell fates were predicted and cell fates will be quantified.
fate_bias_df: `pandas.DataFrame` or None (default: `None`)
The DataFrame that stores the fate bias information, calculated via fate_bias_df = dyn.tl.fate_bias(adata).
figsize: `None` or `[float, float]` (default: None)
The width and height of a figure.
save_show_or_return: {'show', 'save', 'return'} (default: `show`)
Whether to save, show or return the figure.
save_kwargs: `dict` (default: `{}`)
A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig function
will use the {"path": None, "prefix": 'fate_bias', "dpi": None, "ext": 'pdf', "transparent": True, "close":
True, "verbose": True} as its parameters. Otherwise you can provide a dictionary that properly modify those keys
according to your needs.
cluster_maps_kwargs:
Additional arguments passed to sns.clustermap.
Returns
-------
Nothing but plot a heatmap shows the fate bias of each cell state to each of the cell group.
"""
fate_bias = fate_bias_pd(adata, group=group, basis=basis) if fate_bias_df is None else fate_bias_df
if 'confidence' in fate_bias.keys():
fate_bias.set_index([fate_bias.index, fate_bias.confidence], inplace=True)
ax = sns.clustermap(fate_bias, col_cluster=True, row_cluster=True, figsize=figsize, yticklabels=False,
**cluster_maps_kwargs)
if save_show_or_return == "save":
s_kwargs = {"path": None, "prefix": 'fate_bias', "dpi": None,
"ext": 'pdf', "transparent": True, "close": True, "verbose": True}
s_kwargs = update_dict(s_kwargs, save_kwargs)
save_fig(**s_kwargs)
elif save_show_or_return == "show":
plt.tight_layout()
plt.show()
elif save_show_or_return == "return":
return ax
|
d1246bbcca11439c6577e7fd3f3fb635aaa2a9f7
| 3,649,969
|
def comput_mean_ndcg(df, k):
"""
Input:rating_info
(usr_id, movie_id, rating)
output: 平均ndcg
对每一种人 得到他真实分数和预测分数的dataframe
然后得到values
"""
#df.insert(df.shape[1], 'pred', pred)
#print(df.groupby('user_id'))
piece = dict(list(df.groupby('user_id')))
ndcg_list = []
for user in df["user_id"].unique():
user_rating = piece[user]["rating"].values
user_pred_rating = piece[user]['pred'].values
ndcg_score = ndcg(user_pred_rating, user_rating, k)
ndcg_list.append(ndcg_score)
ndcg_list = np.array(ndcg_list)
#return ndcg_list.mean()
return ndcg_list
|
70bcb630b7df9940a76499f34a769bd2dca90283
| 3,649,970
|
from typing import List
def to_im_list(IMs: List[str]):
"""Converts a list of string to IM Objects"""
return [IM.from_str(im) for im in IMs]
|
9694500ae2f5fa7c203100a5a1756fe71af862cc
| 3,649,971
|
def ref_ellipsoid(refell, UNITS='MKS'):
"""
Computes parameters for a reference ellipsoid
Arguments
---------
refell: reference ellipsoid name
Keyword arguments
-----------------
UNITS: output units
MKS: meters, kilograms, seconds
CGS: centimeters, grams, seconds
"""
if refell.upper() in ('CLK66','NAD27'):
#-- Clarke 1866
a_axis = 6378206.4#-- [m] semimajor axis of the ellipsoid
flat = 1.0/294.9786982#-- flattening of the ellipsoid
elif refell.upper() in ('GRS80','NAD83'):
#-- Geodetic Reference System 1980
#-- North American Datum 1983
a_axis = 6378135.0#-- [m] semimajor axis of the ellipsoid
flat = 1.0/298.26#-- flattening of the ellipsoid
GM = 3.986005e14#-- [m^3/s^2] Geocentric Gravitational Constant
elif (refell.upper() == 'GRS67'):
#-- Geodetic Reference System 1967
#-- International Astronomical Union (IAU ellipsoid)
a_axis = 6378160.0#-- [m] semimajor axis of the ellipsoid
flat = 1.0/298.247167427#-- flattening of the ellipsoid
GM = 3.98603e14#-- [m^3/s^2] Geocentric Gravitational Constant
omega = 7292115.1467e-11#-- angular velocity of the Earth [rad/s]
elif (refell.upper() == 'WGS72'):
#-- World Geodetic System 1972
a_axis = 6378135.0#-- [m] semimajor axis of the ellipsoid
flat = 1.0/298.26#-- flattening of the ellipsoid
elif (refell.upper() == 'WGS84'):
#-- World Geodetic System 1984
a_axis = 6378137.0#-- [m] semimajor axis of the ellipsoid
flat = 1.0/298.257223563#-- flattening of the ellipsoid
elif (refell.upper() == 'ATS77'):
#-- Quasi-earth centred ellipsoid for ATS77
a_axis = 6378135.0#-- [m] semimajor axis of the ellipsoid
flat = 1.0/298.257#-- flattening of the ellipsoid
elif (refell.upper() == 'KRASS'):
#-- Krassovsky (USSR)
a_axis = 6378245.0#-- [m] semimajor axis of the ellipsoid
flat = 1.0/298.3#-- flattening of the ellipsoid
elif (refell.upper() == 'INTER'):
#-- International
a_axis = 6378388.0#-- [m] semimajor axis of the ellipsoid
flat = 1/297.0#-- flattening of the ellipsoid
elif (refell.upper() == 'MAIRY'):
#-- Modified Airy (Ireland 1965/1975)
a_axis = 6377340.189#-- [m] semimajor axis of the ellipsoid
flat = 1/299.3249646#-- flattening of the ellipsoid
elif (refell.upper() == 'TOPEX'):
#-- TOPEX/POSEIDON ellipsoid
a_axis = 6378136.3#-- [m] semimajor axis of the ellipsoid
flat = 1.0/298.257#-- flattening of the ellipsoid
GM = 3.986004415e14#-- [m^3/s^2]
elif (refell.upper() == 'EGM96'):
#-- EGM 1996 gravity model
a_axis = 6378136.3#-- [m] semimajor axis of the ellipsoid
flat = 1.0/298.256415099#-- flattening of the ellipsoid
GM = 3.986004415e14#-- [m^3/s^2]
elif (refell.upper() == 'HGH80'):
#-- Hughes 1980 Ellipsoid used in some NSIDC data
a_axis = 6378273.0#-- [m] semimajor axis of the ellipsoid
flat = 1.0/298.279411123064#-- flattening of the ellipsoid
else:
raise ValueError('Incorrect reference ellipsoid Name')
if refell.upper() not in ('GRS80','GRS67','NAD83','TOPEX','EGM96'):
#-- for ellipsoids not listing the Geocentric Gravitational Constant
GM = 3.986004418e14#-- [m^3/s^2]
if refell.upper() not in ('GRS67'):
#-- for ellipsoids not listing the angular velocity of the Earth
omega = 7292115e-11#-- [rad/s]
#-- convert units to CGS
if (UNITS == 'CGS'):
a_axis *= 100.0
GM *= 10e6
#-- DERIVED PARAMETERS:
#-- mean radius of the Earth having the same volume
#-- (4pi/3)R^3 = (4pi/3)(a^2)b = (4pi/3)(a^3)(1D -f)
rad_e = a_axis*(1.0 -flat)**(1.0/3.0)
#-- semiminor axis of the ellipsoid
b_axis = (1.0 -flat)*a_axis#-- [m]
#-- Ratio between ellipsoidal axes
ratio = (1.0 -flat)
#-- Polar radius of curvature
pol_rad=a_axis/(1.0 -flat)
#-- Linear eccentricity
lin_ecc = np.sqrt((2.0*flat - flat**2)*a_axis**2)
#-- first numerical eccentricity
ecc1 = lin_ecc/a_axis
#-- second numerical eccentricity
ecc2 = lin_ecc/b_axis
#-- m parameter [omega^2*a^2*b/(GM)]
#-- p. 70, Eqn.(2-137)
mp = omega**2*((1 -flat)*a_axis**3)/GM
#-- q, q_0
#-- p. 67, Eqn.(2-113)
q = 0.5*((1.0 + 3.0/(ecc2**2))*np.arctan(ecc2)-3.0/ecc2)
q_0 = 3*(1.0 +1.0/(ecc2**2))*(1.0 -1.0/ecc2*np.arctan(ecc2))-1.0
#-- J_2 p. 75 Eqn.(2-167), p. 76 Eqn.(2-172)
j_2 = (ecc1**2)*(1.0 - 2.0*mp*ecc2/(15.0*q))/3.0
#-- Normalized C20 terms.
#-- p. 60, Eqn.(2-80)
C20 = -j_2/np.sqrt(5.0)
#-- Normal gravity at the equator.
#-- p. 71, Eqn.(2-141)
ga = GM/(a_axis*b_axis)*(1.0 -mp -mp*ecc2*q_0/(6.0*q))
#-- Normal gravity at the pole.
#-- p. 71, Eqn.(2-142)
gb = GM/(a_axis**2.0)*(1.0 +mp*ecc2*q_0/(3.0*q))
#-- ratio between gravity at pole versus gravity at equator
dk = b_axis*gb/(a_axis*ga) - 1.0
#-- Normal potential at the ellipsoid
#-- p. 68, Eqn.(2-123)
U0 = GM/lin_ecc*np.arctan(ecc2)+(1.0/3.0)*omega**2*a_axis**2
#-- Surface area of the reference ellipsoid [m^2]
area = np.pi*a_axis**2.*(2.+((1.-ecc1**2)/ecc1)*np.log((1.+ecc1)/(1.-ecc1)))
#-- Volume of the reference ellipsoid [m^3]
vol = (4.0*np.pi/3.0)*(a_axis**3.0)*(1.0-ecc1**2.0)**0.5
return {'a':a_axis, 'b':b_axis, 'f':flat, 'rad_p':pol_rad, 'rad_e':rad_e,
'ratio':ratio, 'GM':GM, 'omega':omega, 'C20':C20, 'J2':j_2, 'U0':U0,
'dk':dk, 'norm_a':ga, 'norm_b':gb, 'mp':mp, 'q':q, 'q0':q_0,
'ecc':lin_ecc, 'ecc1':ecc1,'ecc2':ecc2, 'area':area, 'volume':vol}
|
8f5032af1375d758445ed139bc4a2d6989f15dc5
| 3,649,972
|
def is_number(s):
"""
Check if it is a number.
Args:
s: The variable that needs to be checked.
Returns:
bool: True if float, False otherwise.
"""
try:
float(s)
return True
except ValueError:
return False
|
071aeac26a5a907caf1764dc20d7de1c6408714b
| 3,649,973
|
import time
import pickle
import json
def index():
"""
搜索提示功能
根据输入的值自动联想,支持中文,英文,英文首字母
:return: response
"""
start_time = time.time()
# 输入词转小写
wd = request.args.get('wd').lower()
user_id = request.args.get('user_id')
if user_id and user_id != 'None':
print(user_id)
print(type(user_id))
if not wd:
return make_response("""queryList({"q":"","p":false,"bs":"","csor":"0","status":770,"s":[]});""")
# 搜索词(支持中文,英文,英文首字母)
s = wd
# result = search_script_conf.get_tips_word(search_script_conf.sug, search_script_conf.data, s)
# # print('前缀:',result)
global TREE
if TREE is None:
# 第一次为空,需要在接口中加载一次已经生成好的字典树,pickle.loads这一步耗时接近1s
temp = r.get('tree')
TREE = pickle.loads(temp)
# 内容中有字典树,直接获取
suggest = get_tips_word(TREE[0], TREE[1], s)
print('前缀:', suggest)
data_top = {}
if len(suggest) > 0:
# 从redis获取热度值
heat_list = r_decode.hmget("hot_word_heat", suggest)
_map = dict(zip(suggest, heat_list))
# 按照热度值排序
data = dict(sorted(_map.items(), key=lambda x: int(x[1]), reverse=True))
print("热度值排序:", data)
# TODO 获取个性化搜索结果展示
suggest = list(data.keys())[:15]
data_top = {i: data[i] for i in suggest}
response = make_response(
"""queryList({'q':'""" + wd + """','p':false,'s':""" + str(suggest) + """});""")
response.headers['Content-Type'] = 'text/javascript; charset=utf-8'
end_time = time.time()
# 记录日志
ret = dict()
ret['code'] = 200
ret['msg'] = "ok"
ret['search_word'] = wd
ret['search_suggest'] = suggest
ret['heat_rank'] = data_top
ret['search_type'] = 'search_suggest'
ret['gmt_created'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
ret['user_id'] = ''
ret['platformCode'] = ''
ret['total_time'] = end_time - start_time
info(json.dumps(ret, ensure_ascii=False))
return response
|
7ab07d20dea10c6bd16d899c8c9298f02bd9f17e
| 3,649,974
|
import itertools
def combineSets(listOfSets):
"""
Combines sets of strings by taking the cross product of the sets and \
concatenating the elements in the resulting tuples
:param listOfSets: 2-D list of strings
:returns: a list of strings
"""
totalCrossProduct = ['']
for i in range(len(listOfSets)):
currentProduct = []
for crossProduct in itertools.product(totalCrossProduct, listOfSets[i]):
currentProduct.append((crossProduct[0].strip() + ' ' + crossProduct[1].strip()).strip())
totalCrossProduct = currentProduct
return totalCrossProduct
|
26a383d224716fd8f4cf8589607e2df1ccb82a7e
| 3,649,975
|
def list_all_connections(pg_id='root', descendants=True):
"""
Lists all connections for a given Process Group ID
Args:
pg_id (str): ID of the Process Group to retrieve Connections from
descendants (bool): True to recurse child PGs, False to not
Returns:
(list): List of ConnectionEntity objects
"""
return list_all_by_kind('connections', pg_id, descendants)
|
6df326ff521f175b3ccfe4b1d2488328fe6e6213
| 3,649,976
|
import os
def find_file(fname):
"""
Return the full file name (path plus name) to file fname.
fname is the name of the file to find.
If the file fname is not found, then simply return None.
"""
for d in get_cli_search_dirs():
full_filename = os.path.join(d, fname)
if os.path.exists(full_filename):
return full_filename
raise Exception('[WARN] Could not find {}; skipping.'.format(fname))
|
6892d0fb6349c9029d557daa25b4a394a38a0311
| 3,649,977
|
def _GetTombstoneData(device, tombstone_file):
"""Retrieve the tombstone data from the device
Args:
device: An instance of DeviceUtils.
tombstone_file: the tombstone to retrieve
Returns:
A list of lines
"""
return device.old_interface.GetProtectedFileContents(
'/data/tombstones/' + tombstone_file)
|
99322ea3d67e150f4433c713159eb7bc8069271f
| 3,649,978
|
import time
def _strTogYear(v):
"""Test gYear value
@param v: the literal string
@return v
@raise ValueError: invalid value
"""
try:
time.strptime(v+"-01-01", "%Y-%m-%d")
return v
except:
raise ValueError("Invalid gYear %s" % v)
|
a65e04c2d3790d3d55bbc8788d6802e1aae1b78c
| 3,649,979
|
def aca_full_pivoting(A, epsilon):
"""ACA with full pivoting as in the lecture
Takes in a matrix, and returns the CUR decomposition
"""
# R0 = A
Rk = A.copy()
I_list = []
J_list = []
while frobenius_norm(Rk) > epsilon*frobenius_norm(A):
i, j = np.unravel_index(np.argmax(np.abs(Rk), axis=None), Rk.shape)
I_list.append(i)
J_list.append(j)
delta = Rk[i, j]
u = Rk[:, j]
v = Rk[i, :].T / delta
Rk = Rk - np.outer(u, v)
R = A[I_list, :]
U = np.linalg.inv(A[I_list, :][:, J_list])
C = A[:, J_list]
return C, U, R
|
96bcfd4b8cb560904efc4ab6cfac6473d8dafe47
| 3,649,980
|
from typing import List
import os
def get_word_cloud(words: List[str], max_words=500, image_path=None, image_name=None):
"""
Create a word cloud based on a set of words.
Args:
words (List[str]):
List of words to be included in the word cloud.
max_words (int):
Maximum number of words to be included in the word cloud.
image_path (str):
Path to the image file where to save the word cloud.
image_name (str):
Name of the image where to save the word cloud.
"""
# change the value to black
def black_color_func(
word, font_size, position, orientation, random_state=None, **kwargs
):
return "hsl(0,100%, 1%)"
# set the wordcloud background color to white
# set width and height to higher quality, 3000 x 2000
wordcloud = WordCloud(
font_path="/Library/Fonts/Arial Unicode.ttf",
background_color="white",
width=3000,
height=2000,
max_words=max_words,
).generate(" ".join(words))
# set the word color to black
wordcloud.recolor(color_func=black_color_func)
# set the figsize
plt.figure(figsize=[15, 10])
# plot the wordcloud
plt.imshow(wordcloud, interpolation="bilinear")
# remove plot axes
plt.axis("off")
if image_path is not None and image_name is not None:
# save the image
plt.savefig(os.path.join(image_path, image_name), bbox_inches="tight")
|
3ccbfe615d08f68ea7160848c254b7354d509a83
| 3,649,981
|
import os
import fnmatch
def get_data_sets(cnn_n_input, data_directory="data_set/", n_data_sets=5):
"""
Retrieve data and partition it into n_data_sets for cross-validation.
"""
print("Partitioning data into", str(n_data_sets), "splits.")
# Get list of labels
list_labels = extract_data.get_labels(data_directory + "labels.txt")
n_labels = len(list_labels)
# Dictionary that gives labels ID
label_to_int = dict()
for i in range(n_labels):
label_to_int[list_labels[i]] = i
# Dictionary that will count how many times each label appears
count_labels = dict()
# Data partitions : (time series, labels)
data_partition = [(list(), list()) for _ in range(n_data_sets)]
# Loop over data_set directory
files = [f for f in os.listdir(data_directory) if fnmatch.fnmatch(f, "*_label.txt")]
for file in files:
# Get label
label = extract_data.extract_label_from_txt(data_directory + file)[1]
# Increment label count
if label in count_labels:
count_labels[label] += 1
else:
count_labels[label] = 1
# Label_id
label_id = label_to_int[label]
# Get time series (data)
data = extract_data.extract_data_from_txt(data_directory + "MIN " + file.split('_')[0] + ".txt")\
.Value.values.astype(dtype="uint16", copy=False)
# Split data into samples
data = create_samples(data, cnn_n_input)
# Create labels
labels = [label_id] * len(data)
# Append to partition
data_partition[count_labels[label] % n_data_sets][0].extend(data) # Add data
data_partition[count_labels[label] % n_data_sets][1].extend(labels) # Add labels
print("--\nBuilding types inventory :")
print(count_labels)
print("--\nNumber of samples in each split :")
for x in data_partition:
print('\t' + str(len(x[0])))
return data_partition
|
3adcc276d3e69b98650728d70fe27d57d12c8349
| 3,649,982
|
import os
def _get_pyo_codes(fmt='', dtype='int16', file_out=''):
"""Convert file and data formats to int codes, e.g., wav int16 -> (0, 0).
"""
if not fmt:
dot_ext = os.path.splitext(file_out)[1]
fmt = dot_ext.lower().strip('.')
if fmt in pyo_formats:
file_fmt = pyo_formats[fmt]
else:
msg = 'format `{0}` not supported'.format(file_out)
raise PyoFormatException(msg)
if fmt in ['sd2', 'flac']:
ok_dfmt = {'int16': 0, 'int24': 1}
else:
ok_dfmt = pyo_dtype
if dtype in ok_dfmt:
data_fmt = pyo_dtype[dtype]
else:
msg = 'data format `{0}` not supported for `{1}`'.format(
dtype, file_out)
raise PyoFormatException(msg)
return file_fmt, data_fmt
|
12ff9d453542dfd77459bd5424ef65c9328bfaf5
| 3,649,983
|
def catch_gpu_memory_error( f ):
"""
Decorator that calls the function `f` and catches any GPU memory
error, during the execution of f.
If a memory error occurs, this decorator prints a corresponding message
and aborts the simulation (using MPI abort if needed)
"""
# Redefine the original function by calling it within a try/except
def g(*args, **kwargs):
try:
return f(*args, **kwargs)
except OutOfMemoryError as e:
handle_cuda_memory_error( e, f.__name__ )
# Decorator: return the new function
return(g)
|
1201236b7d2217fcfc3fcb95905f8f4e2f89af06
| 3,649,984
|
def horizontal_tail_planform_raymer(horizontal_stabilizer, wing, l_ht,c_ht):
"""Adjusts reference area before calling generic wing planform function to compute wing planform values.
Assumptions:
None
Source:
Raymer
Inputs:
horizontal_stabilizer [SUAVE data structure]
wing [SUAVE data structure] (should be the main wing)
l_ht [m] length from wing mean aerodynamic chord (MAC) to horizontal stabilizer MAC
c_ht [-] horizontal tail coefficient (Raymer specific) .5 = Sailplane, .5 = homebuilt,
.7 = GA single engine, .8 = GA twin engine .5 = agricultural, .9 = twin turboprop,
.7 = flying boat, .7 = jet trainer, .4 = jet fighter, 1. = military cargo/bomber,
1. = jet transport
Outputs:
horizontal_stabilier.areas.reference [m^2]
Other changes to horizontal_stabilizer (see wing_planform)
Properties Used:
N/A
"""
horizontal_stabilizer.areas.reference = wing.chords.mean_aerodynamic*c_ht*wing.areas.reference/l_ht
wing_planform(horizontal_stabilizer)
return 0
|
860a020e3e2b06943df2689bd54707a051fb30b2
| 3,649,985
|
from improved_permissions.roles import ALLOW_MODE
def inherit_check(role_s, permission):
"""
Check if the role class has the following
permission in inherit mode.
"""
role = get_roleclass(role_s)
if role.inherit is True:
if role.get_inherit_mode() == ALLOW_MODE:
return True if permission in role.inherit_allow else False
return False if permission in role.inherit_deny else True
return False
|
5dbaa7afee9802ea1eda4cec869dd44395faf0e5
| 3,649,986
|
import random
def giveHint(indexValue, myBoard):
"""Return a random matching card given the index of a card
and a game board"""
validMatches = []
card = myBoard[indexValue]
for c in myBoard:
if (card[0] == c[0]) and (myBoard.index(c) != indexValue):
validMatches.append(myBoard.index(c))
return random.choice(validMatches)
|
e578f40e7d7e2e17ddac53f9cfdc219e47c861cd
| 3,649,987
|
async def make_getmatch_embed(data):
"""Generate the embed description and other components for a getmatch() command.
As with its parent, remember that this currently does not support non team-vs.
`data` is expected to be the output of `get_individual_match_data()`.
The following `dict` is returned:
```
{
"embed_description": str,
"footer": str,
"embed_color": int (as color hex),
}
```
"""
scores = data["individual_scores"]
team_1_score_strings = []
team_2_score_strings = []
for individual_score in scores:
#at first i thought doing this would make the actual score_string more readable
#now i'm not very sure
player_name = individual_score["user_name"]
score_val = individual_score["score"]
maxcombo = individual_score["combo"]
accuracy = individual_score["accuracy"]
count_300 = individual_score["hits"]["300_count"]
count_100 = individual_score["hits"]["100_count"]
count_50 = individual_score["hits"]["50_count"]
count_miss = individual_score["hits"]["miss_count"]
accuracy = '{:.2%}'.format(accuracy)
score_val = "{:,}".format(score_val)
maxcombo = "{:,}".format(maxcombo)
score_string = (f'**{player_name}** - {score_val} ({maxcombo}x) ({accuracy} - {count_300}/{count_100}/{count_50}/{count_miss})')
team_1_score_strings.append(score_string) if individual_score["team"] == "1" else team_2_score_strings.append(score_string)
team_1_score_string = "\n".join(team_1_score_strings)
team_2_score_string = "\n".join(team_2_score_strings)
winner_string = {
"Blue": f"Blue team wins by {'{:,}'.format(data['score_difference'])}!",
"Red": f"Red team wins by {'{:,}'.format(data['score_difference'])}!",
"Tie": "Tie!"}
winner_color = {
"Blue": 0x0000FF,
"Red": 0xFF0000,
"Tie": 0x808080}
embed_desc = (
f'**{winner_string[data["winner"]]}**\n\n'
f'__Blue Team__ ({"{:,}".format(data["team_1_score"])} points, {"{:,}".format(data["team_1_score_avg"])} average)\n'
f'{team_1_score_string}\n\n'
f'__Red Team__ ({"{:,}".format(data["team_2_score"])} points, {"{:,}".format(data["team_2_score_avg"])} average)\n'
f'{team_2_score_string}')
#footer stuff
scoring_types = {
'0': 'Score',
'1': 'Accuracy',
'2': 'Combo',
'3': 'Score v2'}
team_types = {
'0': 'Head-to-head',
'1': 'Tag Co-op',
'2': 'Team VS',
'3': 'Tag Team VS'}
play_modes = {
'0': 'osu!',
'1': 'Taiko',
'2': 'CTB',
'3': 'osu!mania'}
embed_footer = (f'Played at {data["start_time"]} UTC | '
f'Win condition: {scoring_types[data["scoring_type"]]} | '
f'{team_types[data["team_type"]]} | '
f'{play_modes[data["play_mode"]]}')
final = {
"embed_description": embed_desc,
"footer": embed_footer,
"embed_color": winner_color[data["winner"]],
}
return final
|
c37e0d6ee948259e4ad898d3cafb8e13b6452d80
| 3,649,988
|
def allreduceCommunicate_op(node, comm):
"""Make a new instance of AllReduceCommunicateOp and call the instance.
Parameters:
----
node : Node
The Node to do allreduce
Returns:
----
A new Node instance created by Op.
"""
return AllReduceCommunicateOp(node, comm)
|
5096a9014ae349e39c2d59de77845221ffdddb10
| 3,649,989
|
def reduce_fn(state, values):
"""tf.data.Dataset-friendly implementation of mean and variance."""
k, n, ex, ex2 = state
# If this is the first iteration, we pick the first value to be 'k',
# which helps with precision - we assume that k is close to an average
# value and calculate mean and variance with respect to that.
k = tf.cond(tf.equal(n, 0), lambda: values[0], lambda: k)
sum_v = tf.reduce_sum(values, axis=0)
sum_v2 = tf.reduce_sum(tf.square(values), axis=0)
ones = tf.ones_like(values, dtype=tf.int32)
batch_size = tf.reduce_sum(ones, axis=0)
batch_size_f = tf.cast(batch_size, tf.float32)
ex = 0 + sum_v - tf.multiply(batch_size_f, k)
ex2 = 0 + sum_v2 + tf.multiply(
batch_size_f, (tf.square(k) -
tf.multiply(tf.multiply(2.0, k), sum_v)))
return (k, n + batch_size, ex, ex2)
|
473bb8cae3e898f3a166250fbdb805ad55aaaea9
| 3,649,990
|
def winged_edge(
face_features: np.ndarray,
edge_features: np.ndarray,
coedge_features: np.ndarray,
coedge_to_next: np.ndarray,
coedge_to_mate: np.ndarray,
coedge_to_face: np.ndarray,
coedge_to_edge: np.ndarray,
):
"""Create graph according to the `winged edge` configuration."""
coedge_to_prev = np.zeros_like(coedge_to_next)
for (from_ix, to_ix) in enumerate(coedge_to_next):
coedge_to_prev[to_ix] = from_ix
faces_num = face_features.shape[0]
edges_num = edge_features.shape[0]
coedges_num = coedge_features.shape[0]
face_to_node = np.arange(faces_num)
edge_to_node = np.arange(edges_num) + faces_num
coedge_to_node = np.arange(coedges_num) + (faces_num + edges_num)
edges = []
# Faces
_f(coedge_to_face, coedge_to_node, face_to_node, edges)
_mf(coedge_to_mate, coedge_to_node, coedge_to_face, face_to_node, edges)
# Edges
_e(coedge_to_edge, coedge_to_node, edge_to_node, edges)
_ne(coedge_to_next, coedge_to_node, coedge_to_edge, edges)
_pe(coedge_to_prev, coedge_to_node, coedge_to_edge, edges)
_mne(coedge_to_next, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
_mpe(coedge_to_prev, coedge_to_node, coedge_to_mate, coedge_to_edge, edges)
# CoEdges
_i(coedges_num, coedge_to_node, edges)
_m(coedge_to_mate, coedge_to_node, edges)
_n(coedge_to_next, coedge_to_node, edges)
_p(coedge_to_prev, coedge_to_node, edges)
_mn(coedge_to_next, coedge_to_node, coedge_to_mate, edges)
_mp(coedge_to_prev, coedge_to_node, coedge_to_mate, edges)
return _create_graph(face_features, edge_features, coedge_features, edges)
|
8f023d4e6133b044c435737e49ae768c83f089ca
| 3,649,991
|
def dollar_format(dollars):
"""
Args:
dollars (any): A dollar value (Any value that can be turned into a float can be used - int, Decimal, str, etc.)
Returns:
str: The formatted string
"""
decimal_dollars = Decimal(dollars)
if decimal_dollars < 0:
return "-${:,.2f}".format(-decimal_dollars)
else:
return "${:,.2f}".format(decimal_dollars)
|
d9f8a9195a92af39df9754e14bae723060c335b1
| 3,649,992
|
from typing import Callable
from typing import Any
def check_aea_project(
f: Callable, check_aea_version: bool = True, check_finger_prints: bool = False
) -> Callable:
"""
Check the consistency of the project as a decorator.
- try to load agent configuration file
- iterate over all the agent packages and check for consistency.
"""
def wrapper(*args: Any, **kwargs: Any) -> Callable:
_check_aea_project(
args,
check_aea_version=check_aea_version,
check_finger_prints=check_finger_prints,
)
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
|
31d909116613be819b61be16160bd72227462853
| 3,649,993
|
def find_closest_cross(wire1_path, wire2_path):
"""
Compare the coordinates of two wire paths to find the crossing point
closest (Manhattan Distance) to the origin (0,0).
Returns a list of crossing points, the closest crossing point and its distance to the start point
"""
best_result = -1
crossing_list = []
for i in range(len(wire1_path)):
if wire1_path[i] in wire2_path and wire1_path[i] != [0,0]:
test_result = abs(wire1_path[i][0]) + abs(wire1_path[i][1])
crossing_list.append(wire1_path[i])
if best_result == -1:
best_cross = wire1_path[i][:]
best_result = test_result
elif test_result < best_result:
best_cross = wire1_path[i][:]
best_result = test_result
return crossing_list, best_cross, best_result
|
c91c5db3bb09cdfc74c4c71c92bf46274eb8d88c
| 3,649,994
|
import re
def add_signature_source(service, **_):
"""
Add a signature source for a given service
Variables:
service => Service to which we want to add the source to
Arguments:
None
Data Block:
{
"uri": "http://somesite/file_to_get", # URI to fetch for parsing the rules
"name": "signature_file.yar", # Name of the file we will parse the rules as
"username": null, # Username used to get to the URI
"password": null, # Password used to get to the URI
"header": { # Header sent during the request to the URI
"X_TOKEN": "SOME RANDOM TOKEN" # Exemple of header
},
"private_key": null, # Private key used to get to the URI
"pattern": "^*.yar$" # Regex pattern use to get appropriate files from the URI
}
Result example:
{"success": True/False} # if the operation succeeded of not
"""
try:
data = request.json
except (ValueError, KeyError):
return make_api_response({"success": False},
err="Invalid source object data",
status_code=400)
# Ensure data source doesn't have spaces in name
data['name'] = re.sub('[^0-9a-zA-Z_]+', '', data['name'].replace(" ", "_"))
# Ensure private_key (if any) ends with a \n
if data.get('private_key', None) and not data['private_key'].endswith("\n"):
data['private_key'] += "\n"
service_data = STORAGE.get_service_with_delta(service, as_obj=False)
if not service_data.get('update_config', {}).get('generates_signatures', False):
return make_api_response({"success": False},
err="This service does not generate alerts therefor "
"you cannot add a source to get the alerts from.",
status_code=400)
current_sources = service_data.get('update_config', {}).get('sources', [])
for source in current_sources:
if source['name'] == data['name']:
return make_api_response({"success": False},
err=f"Update source name already exist: {data['name']}",
status_code=400)
current_sources.append(data)
service_delta = STORAGE.service_delta.get(service, as_obj=False)
if service_delta.get('update_config') is None:
service_delta['update_config'] = {"sources": current_sources}
else:
service_delta['update_config']['sources'] = current_sources
_reset_service_updates(service)
# Save the signature
success = STORAGE.service_delta.save(service, service_delta)
if success:
service_event_sender.send(data['name'], {
'operation': Operation.Modified,
'name': data['name']
})
return make_api_response({"success": success})
|
65526852dee90f077e0c8b52fc53e725043ffc1e
| 3,649,995
|
def edit_screen_item(self, request, form):
""" Edit a screen. """
layout = ManageScreensLayout(self, request)
if form.submitted(request):
form.update_model(self)
request.message(_('Screen modified.'), 'success')
request.app.pages_cache.flush()
return redirect(layout.manage_model_link)
if not form.errors:
form.apply_model(self)
return {
'layout': layout,
'form': form,
'title': _(
"Screen ${number}",
mapping={'number': self.number}
),
'subtitle': _('Edit screen'),
'cancel': layout.manage_model_link
}
|
456837172860c808c2347d556cb8aaa4fcf59fbb
| 3,649,996
|
def get_xyz_t():
"""
CIELAB to XYZ の逆関数の中の値を
XYZ のぞれぞれについて求める。
"""
c, l, h = symbols('c, l, h', real=True)
xt = (l + 16) / 116 + (c * cos(h)) / 500
yt = (l + 16) / 116
zt = (l + 16) / 116 - (c * sin(h)) / 200
xyz_t = [xt, yt, zt]
return xyz_t, c, l, h
|
e823744ada693fb525d57f5a616c89677c8ed0a5
| 3,649,997
|
async def home():
"""
Home page, welcome
Returns:
Rendered template of homepage
"""
return await render_template('home.html')
|
a981c121c64a99359adac620dfa0f58d31a63956
| 3,649,998
|
import torch
def compute_inverse_interpolation_img(weights, indices, img, b, h_i, w_i):
"""
weights: [b, h*w]
indices: [b, h*w]
img: [b, h*w, a, b, c, ...]
"""
w0, w1, w2, w3 = weights
ff_idx, cf_idx, fc_idx, cc_idx = indices
k = len(img.size()) - len(w0.size())
img_0 = w0[(...,) + (None,) * k] * img
img_1 = w1[(...,) + (None,) * k] * img
img_2 = w2[(...,) + (None,) * k] * img
img_3 = w3[(...,) + (None,) * k] * img
img_out = torch.zeros(b, h_i * w_i, *img.shape[2:]).type_as(img)
ff_idx = torch.clamp(ff_idx, min=0, max=h_i * w_i - 1)
cf_idx = torch.clamp(cf_idx, min=0, max=h_i * w_i - 1)
fc_idx = torch.clamp(fc_idx, min=0, max=h_i * w_i - 1)
cc_idx = torch.clamp(cc_idx, min=0, max=h_i * w_i - 1)
img_out.scatter_add_(1, ff_idx[(...,) + (None,) * k].expand_as(img_0), img_0)
img_out.scatter_add_(1, cf_idx[(...,) + (None,) * k].expand_as(img_1), img_1)
img_out.scatter_add_(1, fc_idx[(...,) + (None,) * k].expand_as(img_2), img_2)
img_out.scatter_add_(1, cc_idx[(...,) + (None,) * k].expand_as(img_3), img_3)
return img_out
|
6b69aa5ca372a9c8f976512191d4626919d71311
| 3,649,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.